feat(vm): Start to use Proxmox

This commit is contained in:
Vegard Hagen
2024-02-17 19:00:41 +01:00
committed by Vegard Stenhjem Hagen
parent 25692fd11f
commit d035bec693
8 changed files with 704 additions and 42 deletions

169
PROXMOX.md Normal file
View File

@@ -0,0 +1,169 @@
# Proxmox config
https://github.com/tteck/Proxmox
```shell
bash -c "$(wget -qLO - https://github.com/tteck/Proxmox/raw/main/misc/post-pve-install.sh)"
```
```shell
bash -c "$(wget -qLO - https://github.com/tteck/Proxmox/raw/main/misc/microcode.sh)"
```
https://pve.proxmox.com/wiki/PCI_Passthrough#Verifying_IOMMU_parameters
https://pve.proxmox.com/pve-docs/pve-admin-guide.html#sysboot_edit_kernel_cmdline
https://www.reddit.com/r/homelab/comments/18jx15t/trouble_with_enabling_iommu_pcie_passthrough_81/kdnlyhd/
```shell
root@gauss:~# update-grub
Generating grub configuration file ...
W: This system is booted via proxmox-boot-tool:
W: Executing 'update-grub' directly does not update the correct configs!
W: Running: 'proxmox-boot-tool refresh'
```
This means edit /etc/kernel/cmdline
add
```shell
intel_iommu=on
```
```shell
dmesg | grep -e DMAR -e IOMMU
...
DMAR: IOMMU enabled
```
Nvidia
```shell
echo "blacklist nouveau" >> /etc/modprobe.d/blacklist.conf
echo "blacklist nvidia*" >> /etc/modprobe.d/blacklist.conf
```
Intel
```shell
echo "blacklist i915" >> /etc/modprobe.d/blacklist.conf
```
```shell
pvesh get /nodes/<NODE_NAME>/hardware/pci --pci-class-blacklist ""
```
https://3os.org/infrastructure/proxmox/gpu-passthrough/igpu-passthrough-to-vm/#linux-virtual-machine-igpu-passthrough-configuration
```shell
sudo lspci -nnv | grep VGA
```
## Pass through Disk
https://pve.proxmox.com/wiki/Passthrough_Physical_Disk_to_Virtual_Machine_(VM)
```shell
apt install lshw
```
```shell
lsblk |awk 'NR==1{print $0" DEVICE-ID(S)"}NR>1{dev=$1;printf $0" ";system("find /dev/disk/by-id -lname \"*"dev"\" -printf \" %p\"");print "";}'|grep -v -E 'part|lvm'
```
```shell
veh@gauss:~$ lsblk |awk 'NR==1{print $0" DEVICE-ID(S)"}NR>1{dev=$1;printf $0" ";system("find /dev/disk/by-id -lname \"*"dev"\" -printf \" %p\"");print "";}'|grep -v -E 'part|lvm'
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT DEVICE-ID(S)
sda 8:0 0 476.9G 0 disk /dev/disk/by-id/ata-ADATA_SSD_SX900_512GB-DL2_7E5020000320 /dev/disk/by-id/wwn-0x5707c1800009389f
sh: 1: Syntax error: EOF in backquote substitution
sdb 8:16 0 12.7T 0 disk /var/lib/kubelet/pods/19ca1c6d-014b-4941-9df9-31ad06e6d0c3/volumes/kubernetes.io~local-volume/plex-media-pv /dev/disk/by-id/ata-WDC_WD140EFGX-68B0GN0_Y6G2TE5C /dev/disk/by-id/wwn-0x5000cca2adc1446e
sdc 8:32 0 1.8T 0 disk /dev/disk/by-id/ata-WDC_WD20EFRX-68EUZN0_WD-WCC4M1DPTXE7 /dev/disk/by-id/wwn-0x50014ee2bafd4fac
sh: 1: Syntax error: EOF in backquote substitution
sr0 11:0 1 1024M 0 rom /dev/disk/by-id/ata-PLDS_DVD+_-RW_DS-8ABSH_9F42J736394B653H4A02
nvme0n1 259:0 0 931.5G 0 disk /dev/disk/by-id/nvme-WD_BLACK_SN770_1TB_23413H401146 /dev/disk/by-id/nvme-eui.e8238fa6bf530001001b444a414eafc0
sh: 1: Syntax error: EOF in backquote substitution
```
```shell
qm set 100 -scsi2 /dev/disk/by-id/ata-WDC_WD20EFRX-68EUZN0_WD-WCC4M1DPTXE7
...
update VM 100: -scsi2 /dev/disk/by-id/ata-WDC_WD20EFRX-68EUZN0_WD-WCC4M1DPTXE7
```
```shell
qm set 100 -scsi3 /dev/disk/by-id/ata-WDC_WD140EFGX-68B0GN0_Y6G2TE5C
```
```shell
sdc 8:32 0 1.8T 0 disk
|-sdc1 8:33 0 512G 0 part /disk/etc
`-sdc2 8:34 0 1.3T 0 part /disk/var
```
```shell
veh@gauss:~$ cat /etc/fstab
# /etc/fstab: static file system information.
#
# Use 'blkid' to print the universally unique identifier for a
# device; this may be used with UUID= as a more robust way to name devices
# that works even if disks are added and removed. See fstab(5).
#
# systemd generates mount units based on this file, see systemd.mount(5).
# Please run 'systemctl daemon-reload' after making changes here.
#
# <file system> <mount point> <type> <options> <dump> <pass>
# / was on /dev/sda1 during installation
UUID=6116ff41-36cf-43cc-81c2-3b76a6586c68 / ext4 errors=remount-ro 0 1
# /home was on /dev/sda7 during installation
UUID=c9355084-506e-4bfc-81eb-b20833175f0c /home ext4 defaults 0 2
# /tmp was on /dev/sda6 during installation
UUID=025b6fcd-713d-4954-81dc-99c0fa7785c9 /tmp ext4 defaults 0 2
# /var was on /dev/sda5 during installation
UUID=632f8ab8-794d-4d5b-870a-2138c64fb22a /var ext4 defaults 0 2
/dev/sr0 /media/cdrom0 udf,iso9660 user,noauto 0 0
UUID=2ee1ed03-6306-442a-80b6-c581dfc135d0 /disk/data ext4 defaults 0 2
UUID=e909c1e9-d7ab-4bfa-9ffc-fd24189d7ac6 /disk/etc ext4 defaults 0 2
UUID=8b7d130b-87f8-40f9-b25a-48a5c1e41dbd /disk/var ext4 defaults 0 2
```
```shell
veh@gauss:~$ sudo blkid
/dev/nvme0n1p2: UUID="5B5B-D058" BLOCK_SIZE="512" TYPE="vfat" PARTUUID="705665bc-7474-4797-80cf-352fb4fd26cd"
/dev/nvme0n1p3: LABEL="rpool" UUID="3507575724543500591" UUID_SUB="13907707580269482486" BLOCK_SIZE="4096" TYPE="zfs_member" PARTUUID="832bb88c-ef55-47b9-a539-dffb8a39f046"
/dev/sdb: UUID="2ee1ed03-6306-442a-80b6-c581dfc135d0" BLOCK_SIZE="4096" TYPE="ext4"
/dev/sda1: UUID="6116ff41-36cf-43cc-81c2-3b76a6586c68" BLOCK_SIZE="4096" TYPE="ext4" PARTUUID="7358989f-01"
/dev/sda5: UUID="632f8ab8-794d-4d5b-870a-2138c64fb22a" BLOCK_SIZE="4096" TYPE="ext4" PARTUUID="7358989f-05"
/dev/sda6: UUID="025b6fcd-713d-4954-81dc-99c0fa7785c9" BLOCK_SIZE="4096" TYPE="ext4" PARTUUID="7358989f-06"
/dev/sda7: UUID="c9355084-506e-4bfc-81eb-b20833175f0c" BLOCK_SIZE="4096" TYPE="ext4" PARTUUID="7358989f-07"
/dev/sdc1: UUID="e909c1e9-d7ab-4bfa-9ffc-fd24189d7ac6" BLOCK_SIZE="4096" TYPE="ext4" PARTUUID="9261854f-1c03-ce47-b9df-417d7c48b7d9"
/dev/sdc2: UUID="8b7d130b-87f8-40f9-b25a-48a5c1e41dbd" BLOCK_SIZE="4096" TYPE="ext4" PARTUUID="8ef5bcde-692a-1e42-bcec-62338fd25f58"
/dev/nvme0n1p1: PARTUUID="4c3a80fe-2a31-4d90-b700-25879c905187"
```
```shell
qm create 106 \
--name deb-106 \
--agent 1 \
--memory 4096 \
--bios ovmf \
--sockets 1 --cores 4 \
--cpu host \
--net0 virtio,bridge=vmbr0 \
--scsihw virtio-scsi-single \
--boot order='scsi0' \
--efidisk0 local-lvm:0 \
--ide0 local-lvm:cloudinit \
--machine q35
```
## OpenTofu/Terraform
https://opentofu.org/
https://registry.terraform.io/providers/bpg/proxmox/latest/docs
## PN42 - k8s
```shell
sudo kubeadm init --skip-phases=addon/kube-proxy
```

185
README.md
View File

@@ -1,27 +1,143 @@
# Setup cluster with kubeadm
Disable swap for kubelet to work properly
## Proxmox (optional)
## Debian 12 Bookworm
Enable `sudo` for the user
```shell
swapoff -a
~$ su -
~# usermod -aG sudo <user>
~# apt install sudo
~# exit
~$ exit
`
Enable `ssh` on server
```shell
sudo apt install openssh-server
```
On client
```shell
ssh-copy-id <user>@<ip>
```
Harden `ssh` server
```shell
echo "PermitRootLogin no" | sudo tee /etc/ssh/sshd_config.d/01-disable-root-login.conf
echo "PasswordAuthentication no" | sudo tee /etc/ssh/sshd_config.d/02-disable-password-auth.conf
echo "ChallengeResponseAuthentication no" | sudo tee /etc/ssh/sshd_config.d/03-disable-challenge-response-auth.conf
echo "UsePAM no" | sudo tee /etc/ssh/sshd_config.d/04-disable-pam.conf
sudo systemctl reload ssh
```
## Install prerequisites
https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/
Install vert tools
```shell
sudo apt-get update
sudo apt-get install -y apt-transport-https ca-certificates curl
sudo apt update
sudo apt install -y apt-transport-https ca-certificates curl gpg
```
sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg
Add key and repo
echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
```shell
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.29/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list
```
sudo apt-get update
sudo apt-get install -y containerd conntrack socat kubelet kubeadm kubectl
Install kubelet, kubeadm and kubectl
```shell
sudo apt update
sudo apt install -y kubelet kubeadm kubectl
sudo apt-mark hold kubelet kubeadm kubectl
```
Kubelet ≥ 1.26 requires containerd ≥ 1.6.0.
```shell
sudo apt install -y runc containerd
```
## Config
### Disable swap
Disable swap for kubelet to work properly
```shell
sudo swapoff -a
```
Comment out swap in `/etc/fstab` to disable swap on boot
```shell
sudo sed -e '/swap/ s/^#*/#/' -i /etc/fstab
```
### Forwarding IPv4 and letting iptables see bridged traffic
https://kubernetes.io/docs/setup/production-environment/container-runtimes/#install-and-configure-prerequisites
```shell
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
```
```shell
sudo modprobe overlay
sudo modprobe br_netfilter
```
Persist `sysctl` params across reboot
```shell
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF
```
Apply `sysctl` params without reboot
```shell
sudo sysctl --system
```
### containerd cgroups
Generate default config
```shell
containerd config default | sudo tee /etc/containerd/config.toml
```
https://kubernetes.io/docs/setup/production-environment/container-runtimes/#containerd-systemd
Configure the `systemd` cgroup driver for containerd
```shell
sudo sed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.toml
```
Restart containerd
```shell
sudo systemctl restart containerd
```
## Initialise cluster
We are going to use cilium in place of kube-proxy
@@ -44,7 +160,7 @@ sudo chown $(id -u):$(id -g) $HOME/.kube/config
For remote kubectl copy the config file to local machine
```shell
scp gauss@192.168.1.12:/home/gauss/.kube/config ~/.kube/config
scp veh@192.168.1.50:/home/veh/.kube/config ~/.kube/config
```
## (Optional) Remove taint for single node use
@@ -108,14 +224,14 @@ to `LoadBalancer` `Service` resources we need to create a `CiliumLoadBalancerIPP
Edit the cidr range to fit your network before applying it
```shell
kubectl apply infra/cilium/ip-pool.yaml
kubectl apply -f infra/cilium/ip-pool.yaml
```
Next create a `CiliumL2AnnouncementPolicy` to announce the assigned IPs.
Leaving the `interfaces` field empty announces on all interfaces.
```shell
kubectl apply infra/cilium/announce.yaml
kubectl apply -f infra/cilium/announce.yaml
```
# Sealed Secrets
@@ -126,7 +242,7 @@ Used to create encrypted secrets
kubectl apply -k infra/sealed-secrets
```
Be sure to store the generated sealed secret key in a safa place!
Be sure to store the generated sealed secret key in a safe place!
```shell
kubectl -n kube-system get secrets
@@ -134,10 +250,21 @@ kubectl -n kube-system get secrets
*NB!*: There will be errors if you use my sealed secrets as you (hopefully) don't have the decryption key
# Gateway API
```shell
kubectl apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.0.0/experimental-install.yaml
```
# Cert-manager
```shell
kubectl kustomize --enable-helm infra/cert-manager | kubectl apply -f -
```
# Traefik
Remove the `deployment.dnsConfig` from `infra/traefik/values.yaml` and change the `io.cilium/lb-ipam-ips` annotation to
a valid IP address for your network.
Change the `io.cilium/lb-ipam-ips` annotation in `infra/traefik/values.yaml` to a valid IP address for your network.
Install Traefik
@@ -162,14 +289,14 @@ An unsecured test-application `whoami` should be available at [https://test.${DO
If you configured `apps/test/whoami/traefik-forward-auth` correctly a secured version should be available
at [https://whoami.${DOMAIN}](https://whoami.${DOMAIN}).
# ArgoCD
# Argo CD
[ArgoCD](https://argo-cd.readthedocs.io/en/stable/getting_started/) is used to bootstrap the rest of the cluster.
The cluster uses a combination of Helm and Kustomize to configure infrastructure and applications.
For more details read [this blog post](https://blog.stonegarden.dev/articles/2023/09/argocd-kustomize-with-helm/)
```shell
kubectl apply -k infra/argocd
kubectl kustomize --enable-helm infra/argocd | kubectl apply -f -
```
Get ArgoCD initial secret by running
@@ -210,30 +337,4 @@ kubectl apply -k sets
kubectl drain gauss --delete-emptydir-data --force --ignore-daemonsets
sudo kubeadm reset
sudo iptables -F && sudo iptables -t nat -F && sudo iptables -t mangle -F && sudo iptables -X
sudo ipvsadm -C
```
# Troubleshooting
Kubernetes 1.26 requires containerd 1.6.0 or later due to the removal of support for CRI
version `v1alpha2` ([link](https://kubernetes.io/blog/2022/11/18/upcoming-changes-in-kubernetes-1-26/#cri-api-removal)).
Make sure that `runc` is properly configured in containerd.
NB: Make sure the correct `containerd` daemon is running.
(Check the loaded `containerd` service definition as reported by `systemctl status containerd`)
Follow https://github.com/containerd/containerd/blob/main/docs/getting-started.md for further instructions.
```shell
sudo cat /etc/containerd/config.toml
```
```toml
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_path = "/usr/bin/runc"
runtime_type = "io.containerd.runc.v2"
```
## Sealed Secrets
Restart pod after applying master-key.

View File

@@ -0,0 +1,57 @@
#cloud-config
users:
- name: ${username}
groups:
- sudo
shell: /bin/bash
ssh_authorized_keys:
- ${pub_key}
sudo: ALL=(ALL) NOPASSWD:ALL
manage_etc_hosts: true
hostname: ${hostname}
create_hostname_file: true
package_update: true
package_upgrade: true
locale: en_US.UTF-8
timezone: Europe/Oslo
write_files:
- path: /etc/modules-load.d/k8s.conf
content: |
overlay
br_netfilter
- path: /etc/sysctl.d/k8s.conf
content: |
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
packages:
- qemu-guest-agent
- net-tools
- vim
- apt-transport-https
- ca-certificates
- curl
- gpg
- open-iscsi
- jq
runcmd:
- systemctl enable qemu-guest-agent
- systemctl start qemu-guest-agent
- localectl set-locale LANG=en_US.UTF-8
- curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
- echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.29/deb/ /' | tee /etc/apt/sources.list.d/kubernetes.list
- apt update
- apt install -y kubelet kubeadm kubectl
- apt-mark hold kubelet kubeadm kubectl
- apt install -y runc containerd
- containerd config default | tee /etc/containerd/config.toml
- sed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.toml
- modprobe overlay
- modprobe br_netfilter
- sysctl --system
- systemctl restart containerd

View File

@@ -0,0 +1,2 @@
wget https://github.com/home-assistant/operating-system/releases/download/12.0/haos_ova-12.0.qcow2.xz
xz -d haos_ova-12.0.qcow2.xz

23
machines/euclid/main.tf Normal file
View File

@@ -0,0 +1,23 @@
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
version = "0.48.2"
}
}
}
provider "proxmox" {
alias = "euclid"
endpoint = var.euclid.endpoint
insecure = var.euclid.insecure
username = var.euclid_auth.username
api_token = var.euclid_auth.api_token
ssh {
agent = var.euclid_auth.agent
username = var.euclid_auth.username
}
tmp_dir = "/var/tmp"
}

View File

@@ -0,0 +1,28 @@
variable "euclid" {
description = "Proxmox server configuration for Euclid machine"
type = object({
node_name = string
endpoint = string
insecure = bool
})
}
variable "euclid_auth" {
description = "Auth for euclid proxmox server"
type = object({
agent = bool
username = string
api_token = string
})
sensitive = true
}
variable "vm_user" {
description = "vm username"
type = string
}
variable "vm_pub-key" {
description = "vm username"
type = string
}

View File

@@ -0,0 +1,68 @@
resource "proxmox_virtual_environment_file" "haos_generic_image" {
provider = proxmox.euclid
node_name = var.euclid.node_name
content_type = "iso"
datastore_id = "local"
source_file {
path = "images/haos_ova-12.0.qcow2"
file_name = "haos_ova-12.0.img"
}
}
resource "proxmox_virtual_environment_vm" "home_assistant" {
provider = proxmox.euclid
node_name = var.euclid.node_name
name = "Home-Assistant"
description = "Managed by OpenTofu"
tags = ["tofu", "home-assistant"]
on_boot = true
bios = "ovmf"
vm_id = 1001
tablet_device = false
cpu {
cores = 2
type = "host"
}
memory {
dedicated = 4096
}
network_device {
bridge = "vmbr0"
mac_address = "BC:24:11:50:A6:33"
}
agent {
enabled = true
}
efi_disk {
datastore_id = "local-zfs"
file_format = "raw" // To support qcow2 format
type = "4m"
}
disk {
datastore_id = "local-zfs"
file_id = proxmox_virtual_environment_file.haos_generic_image.id
interface = "scsi0"
cache = "writethrough"
discard = "on"
ssd = true
size = 64
}
operating_system {
type = "l26" # Linux Kernel 2.6 - 5.X.
}
lifecycle {
prevent_destroy = true
}
}

214
machines/euclid/vm-k8s.tf Normal file
View File

@@ -0,0 +1,214 @@
resource "proxmox_virtual_environment_download_file" "debian_12_generic_image" {
provider = proxmox.euclid
node_name = var.euclid.node_name
content_type = "iso"
datastore_id = "local"
file_name = "debian-12-generic-amd64-20240201-1644.img"
url = "https://cloud.debian.org/images/cloud/bookworm/20240211-1654/debian-12-generic-amd64-20240211-1654.qcow2"
checksum = "b679398972ba45a60574d9202c4f97ea647dd3577e857407138b73b71a3c3c039804e40aac2f877f3969676b6c8a1ebdb4f2d67a4efa6301c21e349e37d43ef5"
checksum_algorithm = "sha512"
}
# Make sure the "Snippets" content type is enabled on the target datastore in Proxmox before applying the configuration below.
# https://github.com/bpg/terraform-provider-proxmox/blob/main/docs/guides/cloud-init.md
resource "proxmox_virtual_environment_file" "cloud-init-ctrl-01" {
provider = proxmox.euclid
node_name = var.euclid.node_name
content_type = "snippets"
datastore_id = "local"
source_raw {
data = templatefile("./cloud-init/user.yaml", {
username = var.vm_user
pub_key = var.vm_pub-key
hostname = "k8s-ctrl-01"
})
file_name = "cloud-init-k8s-ctrl-01.yaml"
}
}
resource "proxmox_virtual_environment_file" "cloud-init-work-01" {
provider = proxmox.euclid
node_name = var.euclid.node_name
content_type = "snippets"
datastore_id = "local"
source_raw {
data = templatefile("./cloud-init/user.yaml", {
username = var.vm_user
pub_key = var.vm_pub-key
hostname = "k8s-work-01"
})
file_name = "cloud-init-k8s-work-01.yaml"
}
}
resource "proxmox_virtual_environment_vm" "k8s-ctrl-01" {
provider = proxmox.euclid
node_name = var.euclid.node_name
name = "k8s-ctrl-01"
description = "Kubernetes Control Plane 01"
tags = ["k8s", "control-plane"]
on_boot = true
bios = "ovmf"
vm_id = 8001
initialization {
ip_config {
ipv4 {
#address = "dhcp"
address = "192.168.1.100/24"
gateway = "192.168.1.1"
}
}
datastore_id = "local-zfs"
user_data_file_id = proxmox_virtual_environment_file.cloud-init-ctrl-01.id
}
cpu {
cores = 4
type = "host"
}
memory {
dedicated = 4096
}
network_device {
bridge = "vmbr0"
mac_address = "BC:24:11:2E:C0:01"
}
agent {
enabled = true
}
machine = "q35"
scsi_hardware = "virtio-scsi-single"
efi_disk {
datastore_id = "local-zfs"
file_format = "raw" // To support qcow2 format
type = "4m"
}
disk {
datastore_id = "local-zfs"
file_id = proxmox_virtual_environment_download_file.debian_12_generic_image.id
interface = "scsi0"
cache = "writethrough"
discard = "on"
ssd = true
size = 32
}
boot_order = ["scsi0"]
operating_system {
type = "l26" # Linux Kernel 2.6 - 5.X.
}
}
resource "proxmox_virtual_environment_vm" "k8s-work-01" {
provider = proxmox.euclid
node_name = var.euclid.node_name
name = "k8s-work-01"
description = "Kubernetes Worker 01"
tags = ["k8s", "worker"]
on_boot = true
bios = "ovmf"
vm_id = 8101
initialization {
ip_config {
ipv4 {
address = "192.168.1.110/24"
gateway = "192.168.1.1"
}
}
datastore_id = "local-zfs"
user_data_file_id = proxmox_virtual_environment_file.cloud-init-work-01.id
}
cpu {
cores = 4
type = "host"
}
memory {
dedicated = 8192
}
network_device {
bridge = "vmbr0"
mac_address = "BC:24:11:2E:AE:01"
}
agent {
enabled = true
}
machine = "q35"
scsi_hardware = "virtio-scsi-single"
efi_disk {
datastore_id = "local-zfs"
file_format = "raw" // To support qcow2 format
type = "4m"
}
disk {
datastore_id = "local-zfs"
file_id = proxmox_virtual_environment_download_file.debian_12_generic_image.id
interface = "scsi0"
cache = "writethrough"
discard = "on"
ssd = true
size = 32
}
boot_order = ["scsi0"]
operating_system {
type = "l26" # Linux Kernel 2.6 - 5.X.
}
hostpci {
# Passthrough iGPU
device = "hostpci0"
id = "0000:00:02"
pcie = true
rombar = true
xvga = false
}
}
output "ctrl_01_ipv4_address" {
depends_on = [proxmox_virtual_environment_vm.k8s-ctrl-01]
value = proxmox_virtual_environment_vm.k8s-ctrl-01.ipv4_addresses[1][0]
}
output "work_01_ipv4_address" {
depends_on = [proxmox_virtual_environment_vm.k8s-work-01]
value = proxmox_virtual_environment_vm.k8s-work-01.ipv4_addresses[1][0]
}
resource "local_file" "ctrl_01_ip" {
content = proxmox_virtual_environment_vm.k8s-ctrl-01.ipv4_addresses[1][0]
filename = "output/ctrl-01-ip.txt"
file_permission = "0644"
}
resource "local_file" "work_01_ip" {
content = proxmox_virtual_environment_vm.k8s-work-01.ipv4_addresses[1][0]
filename = "output/work-01-ip.txt"
file_permission = "0644"
}