docs: Clean up README.md

This commit is contained in:
Vegard Hagen
2024-04-05 22:56:38 +02:00
committed by Vegard Stenhjem Hagen
parent e343d41b85
commit b6b64ab4c4
16 changed files with 792 additions and 883 deletions

View File

@@ -1,8 +0,0 @@
CNI: Cilium
LoadBalancer: Cilium
Ingress: Traefik
Certs: Cert-manager
CD: ArgoCD
Monitoring: Prometheus
Observability: Grafana
Secrets: Bitnami Sealed Secrets

View File

@@ -1,173 +0,0 @@
# Proxmox config
https://github.com/tteck/Proxmox
```shell
bash -c "$(wget -qLO - https://github.com/tteck/Proxmox/raw/main/misc/post-pve-install.sh)"
```
```shell
bash -c "$(wget -qLO - https://github.com/tteck/Proxmox/raw/main/misc/microcode.sh)"
```
https://pve.proxmox.com/wiki/PCI_Passthrough#Verifying_IOMMU_parameters
https://pve.proxmox.com/pve-docs/pve-admin-guide.html#sysboot_edit_kernel_cmdline
https://www.reddit.com/r/homelab/comments/18jx15t/trouble_with_enabling_iommu_pcie_passthrough_81/kdnlyhd/
```shell
root@gauss:~# update-grub
Generating grub configuration file ...
W: This system is booted via proxmox-boot-tool:
W: Executing 'update-grub' directly does not update the correct configs!
W: Running: 'proxmox-boot-tool refresh'
```
This means edit /etc/kernel/cmdline
add
```shell
intel_iommu=on
```
```shell
dmesg | grep -e DMAR -e IOMMU
...
DMAR: IOMMU enabled
```
Nvidia
```shell
echo "blacklist nouveau" >> /etc/modprobe.d/blacklist.conf
echo "blacklist nvidia*" >> /etc/modprobe.d/blacklist.conf
```
Intel
```shell
echo "blacklist i915" >> /etc/modprobe.d/blacklist.conf
```
```shell
pvesh get /nodes/<NODE_NAME>/hardware/pci --pci-class-blacklist ""
```
https://3os.org/infrastructure/proxmox/gpu-passthrough/igpu-passthrough-to-vm/#linux-virtual-machine-igpu-passthrough-configuration
In Guest VM
```shell
sudo lspci -nnv | grep VGA
```
Mapped device
https://pve.proxmox.com/pve-docs/pve-admin-guide.html#resource_mapping
## Pass through Disk
https://pve.proxmox.com/wiki/Passthrough_Physical_Disk_to_Virtual_Machine_(VM)
```shell
apt install lshw
```
```shell
lsblk |awk 'NR==1{print $0" DEVICE-ID(S)"}NR>1{dev=$1;printf $0" ";system("find /dev/disk/by-id -lname \"*"dev"\" -printf \" %p\"");print "";}'|grep -v -E 'part|lvm'
```
```shell
veh@gauss:~$ lsblk |awk 'NR==1{print $0" DEVICE-ID(S)"}NR>1{dev=$1;printf $0" ";system("find /dev/disk/by-id -lname \"*"dev"\" -printf \" %p\"");print "";}'|grep -v -E 'part|lvm'
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT DEVICE-ID(S)
sda 8:0 0 476.9G 0 disk /dev/disk/by-id/ata-ADATA_SSD_SX900_512GB-DL2_7E5020000320 /dev/disk/by-id/wwn-0x5707c1800009389f
sh: 1: Syntax error: EOF in backquote substitution
sdb 8:16 0 12.7T 0 disk /var/lib/kubelet/pods/19ca1c6d-014b-4941-9df9-31ad06e6d0c3/volumes/kubernetes.io~local-volume/plex-media-pv /dev/disk/by-id/ata-WDC_WD140EFGX-68B0GN0_Y6G2TE5C /dev/disk/by-id/wwn-0x5000cca2adc1446e
sdc 8:32 0 1.8T 0 disk /dev/disk/by-id/ata-WDC_WD20EFRX-68EUZN0_WD-WCC4M1DPTXE7 /dev/disk/by-id/wwn-0x50014ee2bafd4fac
sh: 1: Syntax error: EOF in backquote substitution
sr0 11:0 1 1024M 0 rom /dev/disk/by-id/ata-PLDS_DVD+_-RW_DS-8ABSH_9F42J736394B653H4A02
nvme0n1 259:0 0 931.5G 0 disk /dev/disk/by-id/nvme-WD_BLACK_SN770_1TB_23413H401146 /dev/disk/by-id/nvme-eui.e8238fa6bf530001001b444a414eafc0
sh: 1: Syntax error: EOF in backquote substitution
```
```shell
qm set 100 -scsi2 /dev/disk/by-id/ata-WDC_WD20EFRX-68EUZN0_WD-WCC4M1DPTXE7
...
update VM 100: -scsi2 /dev/disk/by-id/ata-WDC_WD20EFRX-68EUZN0_WD-WCC4M1DPTXE7
```
```shell
qm set 100 -scsi3 /dev/disk/by-id/ata-WDC_WD140EFGX-68B0GN0_Y6G2TE5C
```
```shell
sdc 8:32 0 1.8T 0 disk
|-sdc1 8:33 0 512G 0 part /disk/etc
`-sdc2 8:34 0 1.3T 0 part /disk/var
```
```shell
veh@gauss:~$ cat /etc/fstab
# /etc/fstab: static file system information.
#
# Use 'blkid' to print the universally unique identifier for a
# device; this may be used with UUID= as a more robust way to name devices
# that works even if disks are added and removed. See fstab(5).
#
# systemd generates mount units based on this file, see systemd.mount(5).
# Please run 'systemctl daemon-reload' after making changes here.
#
# <file system> <mount point> <type> <options> <dump> <pass>
# / was on /dev/sda1 during installation
UUID=6116ff41-36cf-43cc-81c2-3b76a6586c68 / ext4 errors=remount-ro 0 1
# /home was on /dev/sda7 during installation
UUID=c9355084-506e-4bfc-81eb-b20833175f0c /home ext4 defaults 0 2
# /tmp was on /dev/sda6 during installation
UUID=025b6fcd-713d-4954-81dc-99c0fa7785c9 /tmp ext4 defaults 0 2
# /var was on /dev/sda5 during installation
UUID=632f8ab8-794d-4d5b-870a-2138c64fb22a /var ext4 defaults 0 2
/dev/sr0 /media/cdrom0 udf,iso9660 user,noauto 0 0
UUID=2ee1ed03-6306-442a-80b6-c581dfc135d0 /disk/data ext4 defaults 0 2
UUID=e909c1e9-d7ab-4bfa-9ffc-fd24189d7ac6 /disk/etc ext4 defaults 0 2
UUID=8b7d130b-87f8-40f9-b25a-48a5c1e41dbd /disk/var ext4 defaults 0 2
```
```shell
veh@gauss:~$ sudo blkid
/dev/nvme0n1p2: UUID="5B5B-D058" BLOCK_SIZE="512" TYPE="vfat" PARTUUID="705665bc-7474-4797-80cf-352fb4fd26cd"
/dev/nvme0n1p3: LABEL="rpool" UUID="3507575724543500591" UUID_SUB="13907707580269482486" BLOCK_SIZE="4096" TYPE="zfs_member" PARTUUID="832bb88c-ef55-47b9-a539-dffb8a39f046"
/dev/sdb: UUID="2ee1ed03-6306-442a-80b6-c581dfc135d0" BLOCK_SIZE="4096" TYPE="ext4"
/dev/sda1: UUID="6116ff41-36cf-43cc-81c2-3b76a6586c68" BLOCK_SIZE="4096" TYPE="ext4" PARTUUID="7358989f-01"
/dev/sda5: UUID="632f8ab8-794d-4d5b-870a-2138c64fb22a" BLOCK_SIZE="4096" TYPE="ext4" PARTUUID="7358989f-05"
/dev/sda6: UUID="025b6fcd-713d-4954-81dc-99c0fa7785c9" BLOCK_SIZE="4096" TYPE="ext4" PARTUUID="7358989f-06"
/dev/sda7: UUID="c9355084-506e-4bfc-81eb-b20833175f0c" BLOCK_SIZE="4096" TYPE="ext4" PARTUUID="7358989f-07"
/dev/sdc1: UUID="e909c1e9-d7ab-4bfa-9ffc-fd24189d7ac6" BLOCK_SIZE="4096" TYPE="ext4" PARTUUID="9261854f-1c03-ce47-b9df-417d7c48b7d9"
/dev/sdc2: UUID="8b7d130b-87f8-40f9-b25a-48a5c1e41dbd" BLOCK_SIZE="4096" TYPE="ext4" PARTUUID="8ef5bcde-692a-1e42-bcec-62338fd25f58"
/dev/nvme0n1p1: PARTUUID="4c3a80fe-2a31-4d90-b700-25879c905187"
```
```shell
qm create 106 \
--name deb-106 \
--agent 1 \
--memory 4096 \
--bios ovmf \
--sockets 1 --cores 4 \
--cpu host \
--net0 virtio,bridge=vmbr0 \
--scsihw virtio-scsi-single \
--boot order='scsi0' \
--efidisk0 local-lvm:0 \
--ide0 local-lvm:cloudinit \
--machine q35
```
## OpenTofu/Terraform
https://opentofu.org/
https://registry.terraform.io/providers/bpg/proxmox/latest/docs
## PN42 - k8s
```shell
sudo kubeadm init --skip-phases=addon/kube-proxy
```

376
README.md
View File

@@ -1,340 +1,82 @@
# Setup cluster with kubeadm
<div align="center">
## Proxmox (optional)
<img src="https://raw.githubusercontent.com/vehagn/homelab/main/docs/assets/kubernetes.svg" width="144px" alt="Kubernetes logo"/>
## Debian 12 Bookworm
# 🪨 Kubernetes Homelab 🏡
Enable `sudo` for the user
</div>
```shell
~$ su -
~# usermod -aG sudo <user>
~# apt install sudo
~# exit
~$ exit
`
---
Enable `ssh` on server
## 📝 Overview
```shell
sudo apt install openssh-server
```
This is the [IaC](https://en.wikipedia.org/wiki/Infrastructure_as_code) configuration for my homelab.
It's mainly powered by [Kubernetes](https://kubernetes.io/) and I do my best to adhere to GitOps practices.
On client
To organise all the configuration I've opted for an approach using Kustomized Helm with Argo CD which I've explained in
more detail [here](https://blog.stonegarden.dev/articles/2023/09/argocd-kustomize-with-helm/).
```shell
ssh-copy-id <user>@<ip>
```
I try to journal my adventures and exploits on my [blog](https://blog.stonegarden.dev) which is hosted by this repo.
Harden `ssh` server
## 🧑‍💻 Getting Started
```shell
echo "PermitRootLogin no" | sudo tee /etc/ssh/sshd_config.d/01-disable-root-login.conf
echo "PasswordAuthentication no" | sudo tee /etc/ssh/sshd_config.d/02-disable-password-auth.conf
echo "ChallengeResponseAuthentication no" | sudo tee /etc/ssh/sshd_config.d/03-disable-challenge-response-auth.conf
echo "UsePAM no" | sudo tee /etc/ssh/sshd_config.d/04-disable-pam.conf
sudo systemctl reload ssh
```
If you're new to Kubernetes I've written a fairly thorough guide
on [Bootstrapping k3s with Cilium](https://blog.stonegarden.dev/articles/2024/02/bootstrapping-k3s-with-cilium/).
In the article I try to guide you from a fresh Debian 12 Bookworm install to a working cluster using
the [k3s](https://k3s.io) flavour of Kubernetes with [Cilium](https://cilium.io) as a [CNI](https://www.cni.dev)
and [IngressController](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/).
## Install prerequisites
I've also written an article on how to get started
with [Kubernetes on Proxmox](https://blog.stonegarden.dev/articles/2024/03/proxmox-k8s-with-cilium/) if virtualisation
is more your thing.
https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/
A third option is the [Quickstart](docs/QUICKSTART.md) in the docs-folder.
Install vert tools
I also have a ["mini-cluster" repo](https://gitlab.com/vehagn/mini-homelab) which might be easier to start understanding
over at GitLab.
```shell
sudo apt update
sudo apt install -y apt-transport-https ca-certificates curl gpg
```
## ⚙️ Core Components
Add key and repo
* [Argo CD](https://argo-cd.readthedocs.io/en/stable/): Declarative, GitOps continuous delivery tool for Kubernetes.
* [Cert-manager](https://cert-manager.io/): Cloud native certificate management.
* [Cilium](https://cilium.io/): eBPF-based Networking, Observability, Security.
* [OpenTofu](https://opentofu.org/): The open source infrastructure as code tool.
* [Sealed-secrets](https://github.com/bitnami-labs/sealed-secrets): Encrypt your Secret into a SealedSecret, which is
safe to store - even inside a public repository.
```shell
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.29/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list
```
## 📂 Folder Structure
Install kubelet, kubeadm and kubectl
* `apps`: Different applications that I run in the cluster.
* `charts`: Tailor made Helm charts for this cluster.
* `docs`: Supplementary documentation.
* `infra`: Configuration for core infrastructure components
* `machines`: OpenTofu/Terraform configuration. Each sub folder is a physical machine.
* `sets`: Holds Argo CD Applications that points to the `apps` and `infra` folders for automatic Git-syncing.
```shell
sudo apt update
sudo apt install -y kubelet kubeadm kubectl
sudo apt-mark hold kubelet kubeadm kubectl
```
## 🖥️ Hardware
Kubelet ≥ 1.26 requires containerd ≥ 1.6.0.
| Name | Device | CPU | RAM | Storage | Purpose |
|--------|---------------------------|-----------------|----------------|------------|---------|
| Gauss | Dell Precision Tower 5810 | Xeon E5-1650 v3 | 64 GB DDR4 ECC | 14 TiB HDD | - |
| Euclid | ASUS ExpertCenter PN42 | Intel N100 | 32 GB DDR4 | - | - |
```shell
sudo apt install -y runc containerd
```
## 🏗️ Work in Progress
## Config
- [ ] Clean up DNS config
- [ ] Renovate for automatic updates
- [ ] Build a NAS for storage
- [ ] Template Gauss
- [ ] Replace Pi Hole with AdGuard Home
- [ ] Use iGPU on Euclid for video transcoding
- [ ] Replace Traefik with Cilium Ingress Controller
- [ ] Cilium mTLS & SPIFFE/SPIRE
### Disable swap
## 👷‍ Future Projects
Disable swap for kubelet to work properly
```shell
sudo swapoff -a
```
Comment out swap in `/etc/fstab` to disable swap on boot
```shell
sudo sed -e '/swap/ s/^#*/#/' -i /etc/fstab
```
### Forwarding IPv4 and letting iptables see bridged traffic
https://kubernetes.io/docs/setup/production-environment/container-runtimes/#install-and-configure-prerequisites
```shell
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
```
```shell
sudo modprobe overlay
sudo modprobe br_netfilter
```
Persist `sysctl` params across reboot
```shell
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF
```
Apply `sysctl` params without reboot
```shell
sudo sysctl --system
```
### containerd cgroups
Generate default config
```shell
containerd config default | sudo tee /etc/containerd/config.toml
```
https://kubernetes.io/docs/setup/production-environment/container-runtimes/#containerd-systemd
Configure the `systemd` cgroup driver for containerd
```shell
sudo sed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.toml
```
Restart containerd
```shell
sudo systemctl restart containerd
```
## Initialise cluster
We are going to use cilium in place of kube-proxy
https://docs.cilium.io/en/v1.12/gettingstarted/kubeproxy-free/
```shell
sudo kubeadm init --skip-phases=addon/kube-proxy
```
## Set up kubectl
https://kubernetes.io/docs/tasks/tools/
```shell
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
```
For remote kubectl copy the config file to local machine
```shell
scp veh@192.168.1.50:/home/veh/.kube/config ~/.kube/config
```
## (Optional) Remove taint for single node use
Get taints on nodes
```shell
kubectl get nodes -o json | jq '.items[].spec.taints'
```
Remove taint on master node to allow scheduling of all deployments
```shell
kubectl taint nodes --all node-role.kubernetes.io/control-plane-
```
## Install Cilium as CNI (Container Network Interface)
To bootstrap the cluster we can install Cilium using its namesake CLI.
For Linux this can be done by running
```shell
CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/main/stable.txt)
CLI_ARCH=amd64
if [ "$(uname -m)" = "aarch64" ]; then CLI_ARCH=arm64; fi
curl -L --fail --remote-name-all https://github.com/cilium/cilium-cli/releases/download/${CILIUM_CLI_VERSION}/cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}
sha256sum --check cilium-linux-${CLI_ARCH}.tar.gz.sha256sum
sudo tar xzvfC cilium-linux-${CLI_ARCH}.tar.gz /usr/local/bin
rm cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}
```
See the [Cilium official docs](https://docs.cilium.io/en/stable/gettingstarted/k8s-install-default/) for more options.
Next we install Cilium in Kube proxy replacement mode and enable L2 announcements to reply to ARP requests.
To not run into rate limiting while doing L2 announcements we also increase the k8s rate limits.
```shell
cilium install \
--set kubeProxyReplacement=true \
--set l2announcements.enabled=true \
--set externalIPs.enabled=true \
--set k8sClientRateLimit.qps=50 \
--set k8sClientRateLimit.burst=100
```
See [this blog post](https://blog.stonegarden.dev/articles/2023/12/migrating-from-metallb-to-cilium/#l2-announcements)
for more details.
Validate install
```shell
cilium status
```
## Cilium LB IPAM
For [Cilium to act as a load balancer](https://docs.cilium.io/en/stable/network/lb-ipam/) and start assigning IPs
to `LoadBalancer` `Service` resources we need to create a `CiliumLoadBalancerIPPool` with a valid pool.
Edit the cidr range to fit your network before applying it
```shell
kubectl apply -f infra/cilium/ip-pool.yaml
```
Next create a `CiliumL2AnnouncementPolicy` to announce the assigned IPs.
Leaving the `interfaces` field empty announces on all interfaces.
```shell
kubectl apply -f infra/cilium/announce.yaml
```
# Sealed Secrets
Used to create encrypted secrets
```shell
kubectl apply -k infra/sealed-secrets
```
Be sure to store the generated sealed secret key in a safe place!
```shell
kubectl -n kube-system get secrets
```
*NB!*: There will be errors if you use my sealed secrets as you (hopefully) don't have the decryption key
# Gateway API
```shell
kubectl apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.0.0/experimental-install.yaml
```
# Cert-manager
```shell
kubectl kustomize --enable-helm infra/cert-manager | kubectl apply -f -
```
# Traefik
Change the `io.cilium/lb-ipam-ips` annotation in `infra/traefik/values.yaml` to a valid IP address for your network.
Install Traefik
```shell
kubectl kustomize --enable-helm infra/traefik | kubectl apply -f -
```
## Port forward Traefik
Port forward Traefik ports in router from 8000 to 80 for http and 4443 to 443 for https.
IP can be found with `kubectl get svc` (it should be the same as the one you gave in the annotation).
# Test-application (Optional)
Deploy a test-application by editing the manifests in `apps/test/whoami` and apply them
```shell
kubectl apply -k apps/test/whoami
```
An unsecured test-application `whoami` should be available at [https://test.${DOMAIN}](https://test.${DOMAIN}).
If you configured `apps/test/whoami/traefik-forward-auth` correctly a secured version should be available
at [https://whoami.${DOMAIN}](https://whoami.${DOMAIN}).
# Argo CD
[ArgoCD](https://argo-cd.readthedocs.io/en/stable/getting_started/) is used to bootstrap the rest of the cluster.
The cluster uses a combination of Helm and Kustomize to configure infrastructure and applications.
For more details read [this blog post](https://blog.stonegarden.dev/articles/2023/09/argocd-kustomize-with-helm/)
```shell
kubectl kustomize --enable-helm infra/argocd | kubectl apply -f -
```
Get ArgoCD initial secret by running
```shell
kubectl -n argocd get secrets argocd-initial-admin-secret -o json | jq -r .data.password | base64 -d
```
# Kubernetes Dashboard
An OIDC (traefik-forward-auth)
protected [Kubernetes Dashboard](https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/) can be
deployed using
```shell
kubectl apply -k infra/dashboard
```
Create a token
```shell
kubectl -n kubernetes-dashboard create token admin-user
```
# ApplicationSets
*NB!*: This will not work before you've changed all the domain names and IP addresses.
Once you've tested everything get the ball rolling with
```shell
kubectl apply -k sets
```
# Cleanup
```shell
kubectl drain gauss --delete-emptydir-data --force --ignore-daemonsets
sudo kubeadm reset
sudo iptables -F && sudo iptables -t nat -F && sudo iptables -t mangle -F && sudo iptables -X
```
- [ ] Use Talos instead of Debian for Kubernetes
- [ ] Keycloak for auth
- [ ] Dynamic Resource Allocation for GPU
- [ ] Local LLM
- [ ] pfSense
- [ ] Use NetBird or Tailscale
- [ ] Use BGP instead of ARP

View File

@@ -1,10 +0,0 @@
https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/
https://kubernetes.io/docs/concepts/services-networking/service/
https://docs.cilium.io/en/stable/
https://github.com/bitnami-labs/sealed-secrets#usage
https://doc.traefik.io/traefik/v2.8/user-guides/crd-acme/
https://www.smarthomebeginner.com/traefik-forward-auth-google-oauth-2022/

338
docs/QUICKSTART.md Normal file
View File

@@ -0,0 +1,338 @@
# Quickstart using kubeadm
## Debian 12 Bookworm
Enable `sudo` for the user
```shell
~$ su -
~# usermod -aG sudo <user>
~# apt install sudo
~# exit
~$ exit
```
Enable `ssh` on server
```shell
sudo apt install openssh-server
```
On client
```shell
ssh-copy-id <user>@<ip>
```
Harden `ssh` server
```shell
echo "PermitRootLogin no" | sudo tee /etc/ssh/sshd_config.d/01-disable-root-login.conf
echo "PasswordAuthentication no" | sudo tee /etc/ssh/sshd_config.d/02-disable-password-auth.conf
echo "ChallengeResponseAuthentication no" | sudo tee /etc/ssh/sshd_config.d/03-disable-challenge-response-auth.conf
echo "UsePAM no" | sudo tee /etc/ssh/sshd_config.d/04-disable-pam.conf
sudo systemctl reload ssh
```
## Install prerequisites
https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/
Install cert tools
```shell
sudo apt update
sudo apt install -y apt-transport-https ca-certificates curl gpg
```
Add key and kubernetes repo
```shell
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.29/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list
```
Install kubelet, kubeadm and kubectl
```shell
sudo apt update
sudo apt install -y kubelet kubeadm kubectl
sudo apt-mark hold kubelet kubeadm kubectl
```
Kubelet ≥ 1.26 requires containerd ≥ 1.6.0.
```shell
sudo apt install -y runc containerd
```
## Config
### Disable swap
Disable swap for kubelet to work properly
```shell
sudo swapoff -a
```
Comment out swap in `/etc/fstab` to disable swap on boot
```shell
sudo sed -e '/swap/ s/^#*/#/' -i /etc/fstab
```
### Forwarding IPv4 and letting iptables see bridged traffic
https://kubernetes.io/docs/setup/production-environment/container-runtimes/#install-and-configure-prerequisites
```shell
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
```
```shell
sudo modprobe overlay
sudo modprobe br_netfilter
```
Persist `sysctl` params across reboot
```shell
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF
```
Apply `sysctl` params without reboot
```shell
sudo sysctl --system
```
### Containerd CGroups
Generate default config
```shell
containerd config default | sudo tee /etc/containerd/config.toml
```
https://kubernetes.io/docs/setup/production-environment/container-runtimes/#containerd-systemd
Configure the `systemd` cgroup driver for containerd
```shell
sudo sed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.toml
```
Restart containerd
```shell
sudo systemctl restart containerd
```
## Initialise cluster
We are going to use cilium in place of kube-proxy
https://docs.cilium.io/en/v1.12/gettingstarted/kubeproxy-free/
```shell
sudo kubeadm init --skip-phases=addon/kube-proxy
```
## Set up kubectl
https://kubernetes.io/docs/tasks/tools/
```shell
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
```
For remote kubectl copy the config file to local machine
```shell
scp <USER>@<IP>:/home/veh/.kube/config ~/.kube/config
```
## (Optional) Remove taint for single node use
Get taints on nodes
```shell
kubectl get nodes -o json | jq '.items[].spec.taints'
```
Remove taint on master node to allow scheduling of all deployments
```shell
kubectl taint nodes --all node-role.kubernetes.io/control-plane-
```
## Install Cilium as CNI (Container Network Interface)
To bootstrap the cluster we can install Cilium using its namesake CLI.
For Linux this can be done by running
```shell
CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/main/stable.txt)
CLI_ARCH=amd64
if [ "$(uname -m)" = "aarch64" ]; then CLI_ARCH=arm64; fi
curl -L --fail --remote-name-all https://github.com/cilium/cilium-cli/releases/download/${CILIUM_CLI_VERSION}/cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}
sha256sum --check cilium-linux-${CLI_ARCH}.tar.gz.sha256sum
sudo tar xzvfC cilium-linux-${CLI_ARCH}.tar.gz /usr/local/bin
rm cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}
```
See the [Cilium official docs](https://docs.cilium.io/en/stable/gettingstarted/k8s-install-default/) for more options.
Next we install Cilium in Kube proxy replacement mode and enable L2 announcements to reply to ARP requests.
To not run into rate limiting while doing L2 announcements we also increase the k8s rate limits.
```shell
cilium install \
--set kubeProxyReplacement=true \
--set l2announcements.enabled=true \
--set externalIPs.enabled=true \
--set k8sClientRateLimit.qps=50 \
--set k8sClientRateLimit.burst=100
```
See [this blog post](https://blog.stonegarden.dev/articles/2023/12/migrating-from-metallb-to-cilium/#l2-announcements)
for more details.
Validate install
```shell
cilium status
```
## Cilium LB IPAM
For [Cilium to act as a load balancer](https://docs.cilium.io/en/stable/network/lb-ipam/) and start assigning IPs
to `LoadBalancer` `Service` resources we need to create a `CiliumLoadBalancerIPPool` with a valid pool.
Edit the cidr range to fit your network before applying it
```shell
kubectl apply -f infra/cilium/ip-pool.yaml
```
Next create a `CiliumL2AnnouncementPolicy` to announce the assigned IPs.
Leaving the `interfaces` field empty announces on all interfaces.
```shell
kubectl apply -f infra/cilium/announce.yaml
```
## Sealed Secrets
Used to create encrypted secrets
```shell
kubectl apply -k infra/sealed-secrets
```
Be sure to store the generated sealed secret key in a safe place!
```shell
kubectl -n kube-system get secrets
```
*NB!*: There will be errors if you use my sealed secrets as you (hopefully) don't have the decryption key
## Gateway API
```shell
kubectl apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.0.0/experimental-install.yaml
```
## Cert-manager
```shell
kubectl kustomize --enable-helm infra/cert-manager | kubectl apply -f -
```
## Traefik
Change the `io.cilium/lb-ipam-ips` annotation in `infra/traefik/values.yaml` to a valid IP address for your network.
Install Traefik
```shell
kubectl kustomize --enable-helm infra/traefik | kubectl apply -f -
```
## Port forward Traefik
Port forward Traefik ports in router from 8000 to 80 for http and 4443 to 443 for https.
IP can be found with `kubectl get svc` (it should be the same as the one you gave in the annotation).
# Test-application (Optional)
Deploy a test-application by editing the manifests in `apps/test/whoami` and apply them
```shell
kubectl apply -k apps/test/whoami
```
An unsecured test-application `whoami` should be available at [https://test.${DOMAIN}](https://test.${DOMAIN}).
If you configured `apps/test/whoami/traefik-forward-auth` correctly a secured version should be available
at [https://whoami.${DOMAIN}](https://whoami.${DOMAIN}).
## Argo CD
[ArgoCD](https://argo-cd.readthedocs.io/en/stable/getting_started/) is used to bootstrap the rest of the cluster.
The cluster uses a combination of Helm and Kustomize to configure infrastructure and applications.
For more details read [this blog post](https://blog.stonegarden.dev/articles/2023/09/argocd-kustomize-with-helm/)
```shell
kubectl kustomize --enable-helm infra/argocd | kubectl apply -f -
```
Get ArgoCD initial secret by running
```shell
kubectl -n argocd get secrets argocd-initial-admin-secret -o json | jq -r .data.password | base64 -d
```
## Kubernetes Dashboard
An OIDC (traefik-forward-auth)
protected [Kubernetes Dashboard](https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/) can be
deployed using
```shell
kubectl apply -k infra/dashboard
```
Create a token
```shell
kubectl -n kubernetes-dashboard create token admin-user
```
## ApplicationSets
*NB!*: This will not work before you've changed all the domain names and IP addresses.
Once you've tested everything get the ball rolling with
```shell
kubectl apply -k sets
```
## Cleanup
```shell
kubectl drain gauss --delete-emptydir-data --force --ignore-daemonsets
sudo kubeadm reset
sudo iptables -F && sudo iptables -t nat -F && sudo iptables -t mangle -F && sudo iptables -X
```

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 11 KiB

View File

@@ -1,86 +0,0 @@
#cloud-config
users:
- name: ${username}
groups:
- sudo
shell: /bin/bash
ssh_authorized_keys:
- ${pub-key}
sudo: ALL=(ALL) NOPASSWD:ALL
network:
version: 1
config:
- type: nameserver
address:
- 1.1.1.1
- 8.8.8.8
hostname: ${hostname}
create_hostname_file: true
package_update: true
package_upgrade: true
locale: en_US.UTF-8
timezone: Europe/Oslo
write_files:
- path: /etc/modules-load.d/k8s.conf
content: |
overlay
br_netfilter
- path: /etc/sysctl.d/k8s.conf
content: |
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
# https://serverfault.com/questions/1148659/overwriting-provider-dns-via-cloud-init
- path: /etc/systemd/resolved.conf.d/dns_servers.conf
content: |
[Resolve]
DNS=1.1.1.1 8.8.8.8
Domains=~.
permissions: '0644'
packages:
- qemu-guest-agent
- net-tools
- vim
- apt-transport-https
- ca-certificates
- curl
- gpg
- open-iscsi
- jq
runcmd:
- systemctl enable qemu-guest-agent
- systemctl start qemu-guest-agent
- localectl set-locale LANG=en_US.UTF-8
- curl -fsSL https://pkgs.k8s.io/core:/stable:/v${k8s-version}/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
- echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v${k8s-version}/deb/ /' | tee /etc/apt/sources.list.d/kubernetes.list
- apt update
- apt install -y kubelet kubeadm kubectl
- apt-mark hold kubelet kubeadm kubectl
- apt install -y runc containerd
- containerd config default | tee /etc/containerd/config.toml
- sed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.toml
- modprobe overlay
- modprobe br_netfilter
- sysctl --system
- systemctl restart containerd
- systemctl restart systemd-resolved
- ${kubeadm-cmd}
- mkdir -p /home/${username}/.kube
- cp /etc/kubernetes/admin.conf /home/${username}/.kube/config
- chown -R ${username}:${username} /home/${username}/.kube
- curl -sfLO --fail https://github.com/cilium/cilium-cli/releases/download/v${cilium-cli-version}/cilium-linux-amd64.tar.gz
- tar xzvfC cilium-linux-amd64.tar.gz /usr/local/bin
- rm cilium-linux-amd64.tar.gz
- ${cilium-cli-cmd}
power_state:
delay: now
mode: reboot
message: Rebooting after cloud-init completion
condition: true

View File

@@ -1,29 +1,26 @@
#cloud-config
users:
- name: ${username}
groups:
- sudo
passwd: ${password}
lock_passwd: false
groups: [ adm, cdrom, dip, plugdev, lxd, sudo ]
shell: /bin/bash
ssh_authorized_keys:
- ${pub-key}
sudo: ALL=(ALL) NOPASSWD:ALL
network:
version: 1
config:
- type: nameserver
address:
- 1.1.1.1
- 8.8.8.8
#sudo: ALL=(ALL) NOPASSWD:ALL
hostname: ${hostname}
create_hostname_file: true
package_update: true
package_upgrade: true
locale: en_US.UTF-8
timezone: Europe/Oslo
write_files:
- path: /etc/ssh/sshd_config.d/01-harden-ssh.conf
content: |
PermitRootLogin no
PasswordAuthentication no
ChallengeResponseAuthentication no
UsePAM no
- path: /etc/modules-load.d/k8s.conf
content: |
overlay
@@ -34,13 +31,6 @@ write_files:
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
# https://serverfault.com/questions/1148659/overwriting-provider-dns-via-cloud-init
- path: /etc/systemd/resolved.conf.d/dns_servers.conf
content: |
[Resolve]
DNS=1.1.1.1 8.8.8.8
Domains=~.
permissions: '0644'
packages:
- qemu-guest-agent
@@ -69,11 +59,4 @@ runcmd:
- modprobe br_netfilter
- sysctl --system
- systemctl restart containerd
- systemctl restart systemd-resolved
- ${kubeadm-cmd}
power_state:
delay: now
mode: reboot
message: Rebooting after cloud-init completion
condition: true

View File

@@ -0,0 +1,9 @@
#cloud-config
${common-config}
- mkdir -p /home/${username}/.kube
- cp /etc/kubernetes/admin.conf /home/${username}/.kube/config
- chown -R ${username}:${username} /home/${username}/.kube
- curl -sfLO https://github.com/cilium/cilium-cli/releases/download/v${cilium-cli-version}/cilium-linux-amd64.tar.gz
- tar xzvfC cilium-linux-amd64.tar.gz /usr/local/bin
- rm cilium-linux-amd64.tar.gz
- ${cilium-cli-cmd}

View File

@@ -0,0 +1,2 @@
#cloud-config
${common-config}

View File

@@ -0,0 +1,58 @@
resource "proxmox_virtual_environment_download_file" "debian_12_generic_image" {
provider = proxmox.euclid
node_name = var.euclid.node_name
content_type = "iso"
datastore_id = "local"
file_name = "debian-12-generic-amd64-20240201-1644.img"
url = "https://cloud.debian.org/images/cloud/bookworm/20240211-1654/debian-12-generic-amd64-20240211-1654.qcow2"
checksum = "b679398972ba45a60574d9202c4f97ea647dd3577e857407138b73b71a3c3c039804e40aac2f877f3969676b6c8a1ebdb4f2d67a4efa6301c21e349e37d43ef5"
checksum_algorithm = "sha512"
}
# Make sure the "Snippets" content type is enabled on the target datastore in Proxmox before applying the configuration below.
# https://github.com/bpg/terraform-provider-proxmox/blob/main/docs/guides/cloud-init.md
resource "proxmox_virtual_environment_file" "cloud-init-ctrl-01" {
provider = proxmox.euclid
node_name = var.euclid.node_name
content_type = "snippets"
datastore_id = "local"
source_raw {
data = templatefile("./cloud-init/k8s-control-plane.yaml.tftpl", {
common-config = templatefile("./cloud-init/k8s-common.yaml.tftpl", {
hostname = "k8s-ctrl-01"
username = var.vm_user
password = var.vm_password
pub-key = var.host_pub-key
k8s-version = var.k8s-version
kubeadm-cmd = "kubeadm init --skip-phases=addon/kube-proxy"
})
username = var.vm_user
cilium-cli-version = var.cilium-cli-version
cilium-cli-cmd = "HOME=/home/${var.vm_user} KUBECONFIG=/etc/kubernetes/admin.conf cilium install --set kubeProxyReplacement=true"
})
file_name = "cloud-init-k8s-ctrl-01.yaml"
}
}
resource "proxmox_virtual_environment_file" "cloud-init-work-01" {
provider = proxmox.euclid
node_name = var.euclid.node_name
content_type = "snippets"
datastore_id = "local"
source_raw {
data = templatefile("./cloud-init/k8s-worker.yaml.tftpl", {
common-config = templatefile("./cloud-init/k8s-common.yaml.tftpl", {
hostname = "k8s-work-01"
username = var.vm_user
password = var.vm_password
pub-key = var.host_pub-key
k8s-version = var.k8s-version
kubeadm-cmd = module.kubeadm-join.stdout
})
})
file_name = "cloud-init-k8s-work-01.yaml"
}
}

View File

@@ -0,0 +1,108 @@
resource "proxmox_virtual_environment_vm" "k8s-ctrl-01" {
provider = proxmox.euclid
node_name = var.euclid.node_name
name = "k8s-ctrl-01"
description = "Kubernetes Control Plane 01"
tags = ["k8s", "control-plane"]
on_boot = true
vm_id = 8001
machine = "q35"
scsi_hardware = "virtio-scsi-single"
bios = "ovmf"
cpu {
cores = 4
type = "host"
}
memory {
dedicated = 4096
}
network_device {
bridge = "vmbr0"
mac_address = "BC:24:11:2E:C0:01"
}
efi_disk {
datastore_id = "local-zfs"
file_format = "raw" // To support qcow2 format
type = "4m"
}
disk {
datastore_id = "local-zfs"
file_id = proxmox_virtual_environment_download_file.debian_12_generic_image.id
interface = "scsi0"
cache = "writethrough"
discard = "on"
ssd = true
size = 32
}
boot_order = ["scsi0"]
agent {
enabled = true
}
operating_system {
type = "l26" # Linux Kernel 2.6 - 6.X.
}
initialization {
dns {
domain = var.vm_dns.domain
servers = var.vm_dns.servers
}
ip_config {
ipv4 {
address = "192.168.1.100/24"
gateway = "192.168.1.1"
}
}
datastore_id = "local-zfs"
user_data_file_id = proxmox_virtual_environment_file.cloud-init-ctrl-01.id
}
}
output "ctrl_01_ipv4_address" {
depends_on = [proxmox_virtual_environment_vm.k8s-ctrl-01]
value = proxmox_virtual_environment_vm.k8s-ctrl-01.ipv4_addresses[1][0]
}
resource "local_file" "ctrl-01-ip" {
content = proxmox_virtual_environment_vm.k8s-ctrl-01.ipv4_addresses[1][0]
filename = "output/ctrl-01-ip.txt"
file_permission = "0644"
}
module "sleep" {
depends_on = [local_file.ctrl-01-ip]
source = "Invicton-Labs/shell-data/external"
version = "0.4.2"
command_unix = "sleep 150"
}
module "kube-config" {
depends_on = [module.sleep]
source = "Invicton-Labs/shell-resource/external"
version = "0.4.1"
command_unix = "ssh -o StrictHostKeyChecking=no ${var.vm_user}@${local_file.ctrl-01-ip.content} cat /home/${var.vm_user}/.kube/config"
}
resource "local_file" "kube-config" {
content = module.kube-config.stdout
filename = "output/config"
file_permission = "0600"
}
module "kubeadm-join" {
depends_on = [local_file.kube-config]
source = "Invicton-Labs/shell-resource/external"
version = "0.4.1"
command_unix = "ssh -o StrictHostKeyChecking=no ${var.vm_user}@${local_file.ctrl-01-ip.content} /usr/bin/kubeadm token create --print-join-command"
}

View File

@@ -0,0 +1,91 @@
resource "proxmox_virtual_environment_vm" "k8s-work-01" {
provider = proxmox.euclid
node_name = var.euclid.node_name
name = "k8s-work-01"
description = "Kubernetes Worker 01"
tags = ["k8s", "worker"]
on_boot = true
vm_id = 8101
machine = "q35"
scsi_hardware = "virtio-scsi-single"
bios = "ovmf"
cpu {
cores = 4
type = "host"
}
memory {
dedicated = 8192
}
network_device {
bridge = "vmbr0"
mac_address = "BC:24:11:2E:AE:01"
}
efi_disk {
datastore_id = "local-zfs"
file_format = "raw" // To support qcow2 format
type = "4m"
}
disk {
datastore_id = "local-zfs"
file_id = proxmox_virtual_environment_download_file.debian_12_generic_image.id
interface = "scsi0"
cache = "writethrough"
discard = "on"
ssd = true
size = 32
}
boot_order = ["scsi0"]
agent {
enabled = true
}
operating_system {
type = "l26" # Linux Kernel 2.6 - 6.X.
}
initialization {
dns {
domain = var.vm_dns.domain
servers = var.vm_dns.servers
}
ip_config {
ipv4 {
address = "192.168.1.110/24"
gateway = "192.168.1.1"
}
}
datastore_id = "local-zfs"
user_data_file_id = proxmox_virtual_environment_file.cloud-init-work-01.id
}
hostpci {
# Passthrough iGPU
device = "hostpci0"
#id = "0000:00:02"
mapping = "iGPU"
pcie = true
rombar = true
xvga = false
}
}
output "work_01_ipv4_address" {
depends_on = [proxmox_virtual_environment_vm.k8s-work-01]
value = proxmox_virtual_environment_vm.k8s-work-01.ipv4_addresses[1][0]
}
resource "local_file" "work-01-ip" {
content = proxmox_virtual_environment_vm.k8s-work-01.ipv4_addresses[1][0]
filename = "output/work-01-ip.txt"
file_permission = "0644"
}

View File

@@ -2,7 +2,7 @@ terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
version = "0.48.2"
version = "0.50.0"
}
}
}
@@ -12,12 +12,11 @@ provider "proxmox" {
endpoint = var.euclid.endpoint
insecure = var.euclid.insecure
username = var.euclid_auth.username
api_token = var.euclid_auth.api_token
ssh {
agent = var.euclid_auth.agent
agent = true
username = var.euclid_auth.username
}
tmp_dir = "/var/tmp"
tmp_dir = "/var/tmp"
}

View File

@@ -1,5 +1,5 @@
variable "euclid" {
description = "Proxmox server configuration for Euclid machine"
description = "Proxmox server configuration for Euclid"
type = object({
node_name = string
endpoint = string
@@ -8,31 +8,44 @@ variable "euclid" {
}
variable "euclid_auth" {
description = "Auth for euclid proxmox server"
description = "Euclid Proxmox server auth"
type = object({
agent = bool
username = string
api_token = string
})
sensitive = true
}
variable "vm_dns" {
description = "DNS config for VMs"
type = object({
domain = string
servers = list(string)
})
}
variable "vm_user" {
description = "vm username"
description = "VM username"
type = string
}
variable "vm_pub-key" {
description = "vm username"
variable "vm_password" {
description = "VM password"
type = string
sensitive = true
}
variable "host_pub-key" {
description = "Host public key"
type = string
}
variable "k8s-version" {
description = "Kubernetes version"
type = string
type = string
}
variable "cilium-cli-version" {
description = "Cilium CLI version"
type = string
type = string
}

View File

@@ -1,248 +0,0 @@
resource "proxmox_virtual_environment_download_file" "debian_12_generic_image" {
provider = proxmox.euclid
node_name = var.euclid.node_name
content_type = "iso"
datastore_id = "local"
file_name = "debian-12-generic-amd64-20240201-1644.img"
url = "https://cloud.debian.org/images/cloud/bookworm/20240211-1654/debian-12-generic-amd64-20240211-1654.qcow2"
checksum = "b679398972ba45a60574d9202c4f97ea647dd3577e857407138b73b71a3c3c039804e40aac2f877f3969676b6c8a1ebdb4f2d67a4efa6301c21e349e37d43ef5"
checksum_algorithm = "sha512"
}
# Make sure the "Snippets" content type is enabled on the target datastore in Proxmox before applying the configuration below.
# https://github.com/bpg/terraform-provider-proxmox/blob/main/docs/guides/cloud-init.md
resource "proxmox_virtual_environment_file" "cloud-init-ctrl-01" {
provider = proxmox.euclid
node_name = var.euclid.node_name
content_type = "snippets"
datastore_id = "local"
source_raw {
data = templatefile("./cloud-init/control-plane.yaml", {
hostname = "k8s-ctrl-01"
username = var.vm_user
pub-key = var.vm_pub-key
k8s-version = var.k8s-version
kubeadm-cmd = "kubeadm init --skip-phases=addon/kube-proxy"
cilium-cli-version = var.cilium-cli-version
cilium-cli-cmd = "KUBECONFIG=/etc/kubernetes/admin.conf cilium install --set kubeProxyReplacement=true"
})
file_name = "cloud-init-k8s-ctrl-01.yaml"
}
}
resource "proxmox_virtual_environment_vm" "k8s-ctrl-01" {
provider = proxmox.euclid
node_name = var.euclid.node_name
name = "k8s-ctrl-01"
description = "Kubernetes Control Plane 01"
tags = ["k8s", "control-plane"]
on_boot = true
bios = "ovmf"
vm_id = 8001
initialization {
ip_config {
ipv4 {
#address = "dhcp"
address = "192.168.1.100/24"
gateway = "192.168.1.1"
}
}
datastore_id = "local-zfs"
user_data_file_id = proxmox_virtual_environment_file.cloud-init-ctrl-01.id
}
cpu {
cores = 4
type = "host"
}
memory {
dedicated = 4096
}
network_device {
bridge = "vmbr0"
mac_address = "BC:24:11:2E:C0:01"
}
agent {
enabled = true
}
machine = "q35"
scsi_hardware = "virtio-scsi-single"
efi_disk {
datastore_id = "local-zfs"
file_format = "raw" // To support qcow2 format
type = "4m"
}
disk {
datastore_id = "local-zfs"
file_id = proxmox_virtual_environment_download_file.debian_12_generic_image.id
interface = "scsi0"
cache = "writethrough"
discard = "on"
ssd = true
size = 32
}
boot_order = ["scsi0"]
operating_system {
type = "l26" # Linux Kernel 2.6 - 5.X.
}
}
output "ctrl_01_ipv4_address" {
depends_on = [proxmox_virtual_environment_vm.k8s-ctrl-01]
value = proxmox_virtual_environment_vm.k8s-ctrl-01.ipv4_addresses[1][0]
}
resource "local_file" "ctrl-01-ip" {
content = proxmox_virtual_environment_vm.k8s-ctrl-01.ipv4_addresses[1][0]
filename = "output/ctrl-01-ip.txt"
file_permission = "0644"
}
module "sleep" {
depends_on = [local_file.ctrl-01-ip]
source = "Invicton-Labs/shell-data/external"
version = "0.4.2"
command_unix = "sleep 120"
}
module "kube-config" {
depends_on = [module.sleep]
source = "Invicton-Labs/shell-resource/external"
version = "0.4.1"
command_unix = "ssh -o StrictHostKeyChecking=no ${var.vm_user}@${local_file.ctrl-01-ip.content} cat /home/${var.vm_user}/.kube/config"
}
resource "local_file" "kube-config" {
content = module.kube-config.stdout
filename = "output/config"
file_permission = "0600"
}
module "kubeadm-join" {
depends_on = [local_file.kube-config]
source = "Invicton-Labs/shell-resource/external"
version = "0.4.1"
# https://stackoverflow.com/questions/21383806/how-can-i-force-ssh-to-accept-a-new-host-fingerprint-from-the-command-line
command_unix = "ssh -o StrictHostKeyChecking=no ${var.vm_user}@${local_file.ctrl-01-ip.content} /usr/bin/kubeadm token create --print-join-command"
}
resource "proxmox_virtual_environment_file" "cloud-init-work-01" {
provider = proxmox.euclid
node_name = var.euclid.node_name
content_type = "snippets"
datastore_id = "local"
source_raw {
data = templatefile("./cloud-init/worker.yaml", {
hostname = "k8s-work-01"
username = var.vm_user
pub-key = var.vm_pub-key
k8s-version = var.k8s-version
kubeadm-cmd = module.kubeadm-join.stdout
})
file_name = "cloud-init-k8s-work-01.yaml"
}
}
resource "proxmox_virtual_environment_vm" "k8s-work-01" {
provider = proxmox.euclid
node_name = var.euclid.node_name
name = "k8s-work-01"
description = "Kubernetes Worker 01"
tags = ["k8s", "worker"]
on_boot = true
bios = "ovmf"
vm_id = 8101
initialization {
ip_config {
ipv4 {
address = "192.168.1.110/24"
gateway = "192.168.1.1"
}
}
datastore_id = "local-zfs"
user_data_file_id = proxmox_virtual_environment_file.cloud-init-work-01.id
}
cpu {
cores = 4
type = "host"
}
memory {
dedicated = 8192
}
network_device {
bridge = "vmbr0"
mac_address = "BC:24:11:2E:AE:01"
}
agent {
enabled = true
}
machine = "q35"
scsi_hardware = "virtio-scsi-single"
efi_disk {
datastore_id = "local-zfs"
file_format = "raw" // To support qcow2 format
type = "4m"
}
disk {
datastore_id = "local-zfs"
file_id = proxmox_virtual_environment_download_file.debian_12_generic_image.id
interface = "scsi0"
cache = "writethrough"
discard = "on"
ssd = true
size = 32
}
boot_order = ["scsi0"]
operating_system {
type = "l26" # Linux Kernel 2.6 - 5.X.
}
hostpci {
# Passthrough iGPU
device = "hostpci0"
#id = "0000:00:02"
mapping = "iGPU"
pcie = true
rombar = true
xvga = false
}
}
output "work_01_ipv4_address" {
depends_on = [proxmox_virtual_environment_vm.k8s-work-01]
value = proxmox_virtual_environment_vm.k8s-work-01.ipv4_addresses[1][0]
}
resource "local_file" "work-01-ip" {
content = proxmox_virtual_environment_vm.k8s-work-01.ipv4_addresses[1][0]
filename = "output/work-01-ip.txt"
file_permission = "0644"
}