feat(k8s): multi-node Talos cluster

This commit is contained in:
Vegard Hagen
2024-05-23 18:21:38 +02:00
parent 854b6ce6a2
commit fbdccd7731
35 changed files with 1018 additions and 16 deletions

1
.gitignore vendored
View File

@@ -20,3 +20,4 @@ override.tf.json
*_override.tf.json
*.qcow2
*.raw

22
machines/cantor/main.tf Normal file
View File

@@ -0,0 +1,22 @@
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
version = "0.50.0"
}
}
}
provider "proxmox" {
alias = "cantor"
endpoint = var.cantor.endpoint
insecure = var.cantor.insecure
api_token = var.cantor_auth.api_token
ssh {
agent = true
username = var.cantor_auth.username
}
tmp_dir = "/var/tmp"
}

View File

@@ -0,0 +1,141 @@
resource "proxmox_virtual_environment_download_file" "truenas-scale-23" {
provider = proxmox.cantor
node_name = var.cantor.node_name
content_type = "iso"
datastore_id = "local"
file_name = "TrueNAS-SCALE-23.10.2.iso"
url = "https://download.sys.truenas.net/TrueNAS-SCALE-Cobia/23.10.2/TrueNAS-SCALE-23.10.2.iso"
checksum = "c2b0d6ef6ca6a9bf53a0ee9c50f8d0461fd5f12b962a8800e95d0bc3ef629edb"
checksum_algorithm = "sha256"
}
resource "proxmox_virtual_environment_vm" "truenas-scale" {
provider = proxmox.cantor
node_name = var.cantor.node_name
name = "truenas-scale"
description = "True NAS scale"
tags = ["nas"]
on_boot = true
vm_id = 1000
machine = "q35"
scsi_hardware = "virtio-scsi-single"
bios = "ovmf"
cpu {
cores = 4
type = "host"
}
memory {
dedicated = 24576
}
network_device {
bridge = "vmbr0"
}
efi_disk {
datastore_id = "local-zfs"
file_format = "raw" // To support qcow2 format
type = "4m"
}
disk {
datastore_id = "local-zfs"
file_id = proxmox_virtual_environment_download_file.truenas-scale-23.id
iothread = true
interface = "scsi0"
cache = "writethrough"
discard = "on"
ssd = true
size = 16
}
disk {
datastore_id = "local-zfs"
iothread = true
file_format = "raw"
interface = "scsi1"
cache = "writethrough"
discard = "on"
ssd = true
size = 128
}
boot_order = ["scsi1", "scsi0"]
agent {
enabled = true
}
operating_system {
type = "l26" # Linux Kernel 2.6 - 6.X.
}
initialization {
dns {
domain = var.vm_dns.domain
servers = var.vm_dns.servers
}
ip_config {
ipv4 {
address = "192.168.1.55/24"
gateway = "192.168.1.1"
}
}
datastore_id = "local-zfs"
# user_data_file_id = proxmox_virtual_environment_file.cloud-init-work-01.id
}
hostpci {
device = "hostpci0"
mapping = "ASM1166-0"
pcie = true
rombar = true
xvga = false
}
// hostpci {
// device = "hostpci1"
// mapping = "ASM1182e-0"
// pcie = true
// rombar = true
// xvga = false
// }
//
// hostpci {
// device = "hostpci2"
// mapping = "ASM1182e-1"
// pcie = true
// rombar = true
// xvga = false
// }
//
// hostpci {
// device = "hostpci3"
// mapping = "ASM1182e-2"
// pcie = true
// rombar = true
// xvga = false
// }
//
// hostpci {
// device = "hostpci4"
// mapping = "I226-V-0"
// pcie = true
// rombar = true
// xvga = false
// }
//
// hostpci {
// device = "hostpci5"
// mapping = "I226-V-1"
// pcie = true
// rombar = true
// xvga = false
// }
}

View File

@@ -0,0 +1,51 @@
variable "cantor" {
description = "Proxmox server configuration for Cantor"
type = object({
node_name = string
endpoint = string
insecure = bool
})
}
variable "cantor_auth" {
description = "Cantor Proxmox server auth"
type = object({
username = string
api_token = string
})
sensitive = true
}
variable "vm_dns" {
description = "DNS config for VMs"
type = object({
domain = string
servers = list(string)
})
}
variable "vm_user" {
description = "VM username"
type = string
}
variable "vm_password" {
description = "VM password"
type = string
sensitive = true
}
variable "host_pub-key" {
description = "Host public key"
type = string
}
variable "k8s-version" {
description = "Kubernetes version"
type = string
}
variable "cilium-cli-version" {
description = "Cilium CLI version"
type = string
}

View File

@@ -1,2 +1,4 @@
wget https://github.com/home-assistant/operating-system/releases/download/12.1/haos_ova-12.1.qcow2.xz
xz -d haos_ova-12.1.qcow2.xz
wget https://cloud.debian.org/images/cloud/bookworm-backports/20240429-1732/debian-12-backports-generic-amd64-20240429-1732.qcow2

View File

@@ -1,4 +1,4 @@
resource "proxmox_virtual_environment_download_file" "debian_12_generic_image" {
resource "proxmox_virtual_environment_download_file" "debian_12_bookworm" {
provider = proxmox.euclid
node_name = var.euclid.node_name
content_type = "iso"
@@ -10,6 +10,66 @@ resource "proxmox_virtual_environment_download_file" "debian_12_generic_image" {
checksum_algorithm = "sha512"
}
resource "proxmox_virtual_environment_download_file" "debian_12_bpo" {
provider = proxmox.euclid
node_name = var.euclid.node_name
content_type = "iso"
datastore_id = "local"
file_name = "debian-12-backports-generic-amd64-20240429-1732.img"
url = "https://cloud.debian.org/images/cloud/bookworm-backports/20240429-1732/debian-12-backports-generic-amd64-20240429-1732.qcow2"
# checksum = "b679398972ba45a60574d9202c4f97ea647dd3577e857407138b73b71a3c3c039804e40aac2f877f3969676b6c8a1ebdb4f2d67a4efa6301c21e349e37d43ef5"
# checksum_algorithm = "sha512"
}
resource "proxmox_virtual_environment_download_file" "ubuntu_jammy_cloud_amd64" {
provider = proxmox.euclid
node_name = var.euclid.node_name
content_type = "iso"
datastore_id = "local"
file_name = "jammy-server-cloudimg-amd64.img"
url = "https://cloud-images.ubuntu.com/jammy/20240514/jammy-server-cloudimg-amd64.img"
checksum = "1718f177dde4c461148ab7dcbdcf2f410c1f5daa694567f6a8bbb239d864b525"
checksum_algorithm = "sha256"
}
resource "proxmox_virtual_environment_download_file" "ubuntu_mantic-cloud-amd64" {
provider = proxmox.euclid
node_name = var.euclid.node_name
content_type = "iso"
datastore_id = "local"
file_name = "mantic-server-cloudimg-amd64.img"
url = "https://cloud-images.ubuntu.com/mantic/20240514/mantic-server-cloudimg-amd64.img"
# checksum = "1718f177dde4c461148ab7dcbdcf2f410c1f5daa694567f6a8bbb239d864b525"
# checksum_algorithm = "sha256"
}
resource "proxmox_virtual_environment_download_file" "ubuntu_noble-cloud-amd64" {
provider = proxmox.euclid
node_name = var.euclid.node_name
content_type = "iso"
datastore_id = "local"
file_name = "noble-server-cloudimg-amd64.img"
url = "https://cloud-images.ubuntu.com/noble/20240505/noble-server-cloudimg-amd64.img"
# checksum = "1718f177dde4c461148ab7dcbdcf2f410c1f5daa694567f6a8bbb239d864b525"
# checksum_algorithm = "sha256"
}
#resource "proxmox_virtual_environment_file" "debian_12_backports_image" {
# provider = proxmox.euclid
# node_name = var.euclid.node_name
# content_type = "iso"
# datastore_id = "local"
#
# source_file {
# path = "images/debian-12-backports-generic-amd64-20240429-1732.qcow2"
# file_name = "debian-12-backports-generic-amd64-20240429-1732.img"
# }
#}
# Make sure the "Snippets" content type is enabled on the target datastore in Proxmox before applying the configuration below.
# https://github.com/bpg/terraform-provider-proxmox/blob/main/docs/guides/cloud-init.md
resource "proxmox_virtual_environment_file" "cloud-init-ctrl-01" {

View File

@@ -34,7 +34,7 @@ resource "proxmox_virtual_environment_vm" "k8s-ctrl-01" {
disk {
datastore_id = "local-zfs"
file_id = proxmox_virtual_environment_download_file.debian_12_generic_image.id
file_id = proxmox_virtual_environment_download_file.debian_12_bpo.id
interface = "scsi0"
cache = "writethrough"
discard = "on"

View File

@@ -34,7 +34,7 @@ resource "proxmox_virtual_environment_vm" "k8s-work-01" {
disk {
datastore_id = "local-zfs"
file_id = proxmox_virtual_environment_download_file.debian_12_generic_image.id
file_id = proxmox_virtual_environment_download_file.debian_12_bpo.id
iothread = true
interface = "scsi0"
cache = "writethrough"
@@ -54,16 +54,16 @@ resource "proxmox_virtual_environment_vm" "k8s-work-01" {
size = 64
}
disk {
datastore_id = "local-zfs"
iothread = true
file_format = "raw"
interface = "scsi2"
cache = "writethrough"
discard = "on"
ssd = true
size = 512
}
# disk {
# datastore_id = "local-zfs"
# iothread = true
# file_format = "raw"
# interface = "scsi2"
# cache = "writethrough"
# discard = "on"
# ssd = true
# size = 512
# }
boot_order = ["scsi0"]

View File

@@ -2,7 +2,7 @@ terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
version = "0.50.0"
version = "0.57.0"
}
}
}

View File

@@ -0,0 +1,11 @@
resource "proxmox_virtual_environment_file" "haos_generic_image" {
provider = proxmox.euclid
node_name = var.euclid.node_name
content_type = "iso"
datastore_id = "local"
source_file {
path = "images/haos_ova-12.3.qcow2"
file_name = "haos_ova-12.3.img"
}
}

View File

@@ -0,0 +1,2 @@
wget https://github.com/home-assistant/operating-system/releases/download/12.3/haos_ova-12.3.qcow2.xz
xz -d haos_ova-12.3.qcow2.xz

View File

@@ -0,0 +1,9 @@
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
version = ">= 0.57.0"
}
}
}

View File

@@ -0,0 +1 @@
../machines/euclid.tf

View File

@@ -0,0 +1,57 @@
resource "proxmox_virtual_environment_vm" "home_assistant" {
provider = proxmox.euclid
node_name = var.euclid.node_name
name = "Home-Assistant"
description = "Managed by OpenTofu"
tags = ["home-assistant"]
on_boot = true
bios = "ovmf"
scsi_hardware = "virtio-scsi-single"
vm_id = 1000
tablet_device = false
cpu {
cores = 2
type = "host"
}
memory {
dedicated = 4096
}
network_device {
bridge = "vmbr0"
mac_address = "BC:24:11:50:A6:33"
}
agent {
enabled = true
}
efi_disk {
datastore_id = "local-zfs"
file_format = "raw" // To support qcow2 format
type = "4m"
}
disk {
datastore_id = "local-zfs"
file_id = proxmox_virtual_environment_file.haos_generic_image.id
interface = "scsi0"
cache = "writethrough"
discard = "on"
ssd = true
size = 64
}
operating_system {
type = "l26" # Linux Kernel 2.6 - 5.X.
}
lifecycle {
prevent_destroy = true
}
}

View File

@@ -0,0 +1,83 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cilium-install
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: cilium-install
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cilium-install
namespace: kube-system
---
apiVersion: batch/v1
kind: Job
metadata:
name: cilium-install
namespace: kube-system
spec:
backoffLimit: 10
template:
metadata:
labels:
app: cilium-install
spec:
restartPolicy: OnFailure
tolerations:
- operator: Exists
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
- effect: PreferNoSchedule
operator: Exists
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoExecute
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: PreferNoSchedule
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
serviceAccountName: cilium-install
hostNetwork: true
containers:
- name: cilium-install
image: quay.io/cilium/cilium-cli-ci:latest
env:
- name: KUBERNETES_SERVICE_HOST
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: KUBERNETES_SERVICE_PORT
value: "6443"
command:
- cilium
- install
- --version=v1.15.5 # renovate: github-releases=cilium/cilium
- --helm-set=ipam.mode=kubernetes
- --set
- kubeProxyReplacement=true
- --helm-set=securityContext.capabilities.ciliumAgent={CHOWN,KILL,NET_ADMIN,NET_RAW,IPC_LOCK,SYS_ADMIN,SYS_RESOURCE,DAC_OVERRIDE,FOWNER,SETGID,SETUID}
- --helm-set=securityContext.capabilities.cleanCiliumState={NET_ADMIN,SYS_ADMIN,SYS_RESOURCE}
- --helm-set=cgroup.autoMount.enabled=false
- --helm-set=cgroup.hostRoot=/sys/fs/cgroup
- --helm-set=k8sServiceHost=localhost
- --helm-set=k8sServicePort=7445

View File

@@ -0,0 +1,70 @@
users:
- name: ${username}
passwd: ${password}
lock_passwd: false
groups: [ adm, cdrom, dip, plugdev, lxd, sudo ]
shell: /bin/bash
ssh_authorized_keys:
- ${pub-key}
#sudo: ALL=(ALL) NOPASSWD:ALL
hostname: ${hostname}
package_update: true
package_upgrade: true
timezone: Europe/Oslo
write_files:
- path: /etc/ssh/sshd_config.d/01-harden-ssh.conf
content: |
PermitRootLogin no
PasswordAuthentication no
ChallengeResponseAuthentication no
UsePAM no
- path: /etc/modules-load.d/k8s.conf
content: |
overlay
br_netfilter
- path: /etc/sysctl.d/k8s.conf
content: |
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
packages:
- qemu-guest-agent
- net-tools
- vim
- apt-transport-https
- ca-certificates
- curl
- gpg
- open-iscsi
- jq
power_state:
delay: now
mode: reboot
message: Rebooting after cloud-init completion
condition: true
runcmd:
- systemctl enable qemu-guest-agent
- localectl set-locale LANG=en_US.UTF-8
- sed -i '/Components/s/$/ non-free non-free-firmware/' /etc/apt/sources.list.d/debian.sources
- curl -fsSL https://pkgs.k8s.io/core:/stable:/v${k8s-version}/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
- echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v${k8s-version}/deb/ /' | tee /etc/apt/sources.list.d/kubernetes.list
- apt update
- apt install -y nfs-common
- apt install -y firmware-misc-nonfree intel-media-va-driver-non-free intel-gpu-tools
- apt install -y kubelet kubeadm kubectl
- apt-mark hold kubelet kubeadm kubectl
- apt install -y runc containerd
- containerd config default | tee /etc/containerd/config.toml
- sed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.toml
- modprobe overlay
- modprobe br_netfilter
- sysctl --system
- systemctl restart containerd
- ${kubeadm-cmd}

View File

@@ -0,0 +1,9 @@
#cloud-config
${common-config}
- mkdir -p /home/${username}/.kube
- cp /etc/kubernetes/admin.conf /home/${username}/.kube/config
- chown -R ${username}:${username} /home/${username}/.kube
- curl -sfLO https://github.com/cilium/cilium-cli/releases/download/v${cilium-cli-version}/cilium-linux-amd64.tar.gz
- tar xzvfC cilium-linux-amd64.tar.gz /usr/local/bin
- rm cilium-linux-amd64.tar.gz
- ${cilium-cli-cmd}

View File

@@ -0,0 +1,2 @@
#cloud-config
${common-config}

132
tofu/k8s/config.tf Normal file
View File

@@ -0,0 +1,132 @@
resource "talos_machine_secrets" "machine_secrets" {
talos_version = "v1.7"
}
data "talos_client_configuration" "talosconfig" {
cluster_name = var.cluster.name
client_configuration = talos_machine_secrets.machine_secrets.client_configuration
endpoints = [for k, v in var.node_data.controlplanes : v.ip]
}
data "talos_machine_configuration" "control-plane" {
for_each = var.node_data.controlplanes
cluster_name = var.cluster.name
cluster_endpoint = var.cluster.endpoint
machine_type = "controlplane"
machine_secrets = talos_machine_secrets.machine_secrets.machine_secrets
talos_version = "v1.7"
config_patches = [
templatefile("talos/control-plane.yaml.tftpl", {
hostname = each.key
})
]
}
resource "proxmox_virtual_environment_file" "controlplane-config" {
provider = proxmox.abel
for_each = var.node_data.controlplanes
node_name = each.value.host_node
content_type = "snippets"
datastore_id = "local"
source_raw {
data = data.talos_machine_configuration.control-plane[each.key].machine_configuration
file_name = "talos-${each.key}.cloud-config.yaml"
}
}
resource "talos_machine_configuration_apply" "ctrl_config_apply" {
depends_on = [proxmox_virtual_environment_vm.controlplane]
for_each = var.node_data.controlplanes
node = each.value.ip
client_configuration = talos_machine_secrets.machine_secrets.client_configuration
machine_configuration_input = data.talos_machine_configuration.control-plane[each.key].machine_configuration
}
data "talos_machine_configuration" "worker" {
for_each = var.node_data.workers
cluster_name = var.cluster.name
cluster_endpoint = var.cluster.endpoint
machine_type = "worker"
machine_secrets = talos_machine_secrets.machine_secrets.machine_secrets
talos_version = "v1.7"
config_patches = [
templatefile("talos/worker.yaml.tftpl", {
hostname = each.key
})
]
}
resource "proxmox_virtual_environment_file" "worker-config" {
provider = proxmox.abel
for_each = var.node_data.workers
node_name = each.value.host_node
content_type = "snippets"
datastore_id = "local"
source_raw {
data = data.talos_machine_configuration.worker[each.key].machine_configuration
file_name = "talos-${each.key}.cloud-config.yaml"
}
}
resource "talos_machine_configuration_apply" "worker_config_apply" {
depends_on = [proxmox_virtual_environment_vm.workers]
for_each = var.node_data.workers
node = each.value.ip
client_configuration = talos_machine_secrets.machine_secrets.client_configuration
machine_configuration_input = data.talos_machine_configuration.worker[each.key].machine_configuration
config_patches = [
templatefile("talos/worker.yaml.tftpl", {
hostname = each.key
})
]
}
resource "talos_machine_bootstrap" "bootstrap" {
depends_on = [talos_machine_configuration_apply.ctrl_config_apply]
client_configuration = talos_machine_secrets.machine_secrets.client_configuration
node = [for k, v in var.node_data.controlplanes : v.ip][0]
}
data "talos_cluster_health" "health" {
depends_on = [talos_machine_configuration_apply.ctrl_config_apply]
client_configuration = data.talos_client_configuration.talosconfig.client_configuration
control_plane_nodes = [for k, v in var.node_data.controlplanes : v.ip]
worker_nodes = [for k, v in var.node_data.workers : v.ip]
endpoints = data.talos_client_configuration.talosconfig.endpoints
timeouts = {
read = "10m"
}
}
data "talos_cluster_kubeconfig" "kubeconfig" {
depends_on = [talos_machine_bootstrap.bootstrap, data.talos_cluster_health.health]
client_configuration = talos_machine_secrets.machine_secrets.client_configuration
node = [for k, v in var.node_data.controlplanes : v.ip][0]
}
output "talosconfig" {
value = data.talos_client_configuration.talosconfig.talos_config
sensitive = true
}
output "kubeconfig" {
value = data.talos_cluster_kubeconfig.kubeconfig.kubeconfig_raw
sensitive = true
}
resource "local_file" "taloc-client-config" {
content = data.talos_client_configuration.talosconfig.talos_config
filename = "output/talos-config.yaml"
file_permission = "0600"
}
resource "local_file" "kube-config" {
content = data.talos_cluster_kubeconfig.kubeconfig.kubeconfig_raw
filename = "output/kube-config.yaml"
file_permission = "0600"
}

20
tofu/k8s/images.tf Normal file
View File

@@ -0,0 +1,20 @@
locals {
talos = {
version = "v1.7.4" # renovate: github-releases=siderolabs/talos
checksum = "26e23f1bf44eecb0232d0aa221223b44f4e40806b7d12cf1a72626927da9a8a4"
}
}
resource "proxmox_virtual_environment_file" "talos_nocloud_image" {
provider = proxmox.abel
for_each = toset(var.host_machines)
node_name = each.key
content_type = "iso"
datastore_id = "local"
source_file {
path = "images/talos-${local.talos.version}-nocloud-amd64.raw"
file_name = "talos-${local.talos.version}-nocloud-amd64.img"
}
}

4
tofu/k8s/images/download.sh Executable file
View File

@@ -0,0 +1,4 @@
#wget https://github.com/siderolabs/talos/releases/download/v1.7.4/nocloud-amd64.raw.xz
wget https://factory.talos.dev/image/dcac6b92c17d1d8947a0cee5e0e6b6904089aa878c70d66196bb1138dbd05d1a/v1.7.4/nocloud-amd64.raw.xz
xz -d nocloud-amd64.raw.xz
mv nocloud-amd64.raw talos-v1.7.4-nocloud-amd64.raw

13
tofu/k8s/main.tf Normal file
View File

@@ -0,0 +1,13 @@
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
version = ">= 0.57.0"
}
talos = {
source = "siderolabs/talos"
version = "0.5.0"
}
}
}

1
tofu/k8s/pve_abel.tf Symbolic link
View File

@@ -0,0 +1 @@
../machines/abel.tf

1
tofu/k8s/pve_cantor.tf Symbolic link
View File

@@ -0,0 +1 @@
../machines/cantor.tf

1
tofu/k8s/pve_euclid.tf Symbolic link
View File

@@ -0,0 +1 @@
../machines/euclid.tf

21
tofu/k8s/rbac.tf Normal file
View File

@@ -0,0 +1,21 @@
resource "proxmox_virtual_environment_role" "csi" {
provider = proxmox.abel
role_id = "csi"
privileges = [
"VM.Audit",
"VM.Config.Disk",
"Datastore.Allocate",
"Datastore.AllocateSpace",
"Datastore.Audit"
]
}
resource "proxmox_virtual_environment_user" "kubernetes-csi" {
provider = proxmox.abel
user_id = "kubernetes-csi@pve"
acl {
path = "/"
propagate = true
role_id = proxmox_virtual_environment_role.csi.role_id
}
}

View File

@@ -0,0 +1,14 @@
machine:
network:
hostname: ${hostname}
cluster:
allowSchedulingOnControlPlanes: true
network:
cni:
name: none
proxy:
disabled: true
inlineManifests:
- name: cilium-install
contents: |
${indent(8, file("bootstrap/cilium-install.yaml"))}

View File

@@ -0,0 +1,7 @@
machine:
customization:
systemExtensions:
officialExtensions:
- siderolabs/i915-ucode
- siderolabs/intel-ucode
- siderolabs/qemu-guest-agent

View File

@@ -0,0 +1,3 @@
machine:
network:
hostname: ${hostname}

34
tofu/k8s/variables.tf Normal file
View File

@@ -0,0 +1,34 @@
variable "cluster" {
type = object({
name = string
endpoint = string
talos_version = string
})
}
variable "host_machines" {
type = list(string)
}
variable "node_data" {
description = "A map of node data"
type = object({
controlplanes = map(object({
ip = string
mac_address = string
host_node = string
vm_id = number
cpu = number
ram_dedicated = number
igpu = optional(bool, false)
}))
workers = map(object({
ip = string
mac_address = string
host_node = string
vm_id = number
cpu = number
ram_dedicated = number
}))
})
}

View File

@@ -0,0 +1,80 @@
resource "proxmox_virtual_environment_vm" "controlplane" {
provider = proxmox.abel
for_each = var.node_data.controlplanes
node_name = each.value.host_node
name = each.key
description = "Talos Kubernetes Control Plane"
tags = ["k8s", "control-plane"]
on_boot = true
vm_id = each.value.vm_id
machine = "q35"
scsi_hardware = "virtio-scsi-single"
bios = "seabios"
agent {
enabled = true
}
cpu {
cores = each.value.cpu
type = "host"
}
memory {
dedicated = each.value.ram_dedicated
}
network_device {
bridge = "vmbr0"
mac_address = each.value.mac_address
}
disk {
datastore_id = "local-zfs"
interface = "scsi0"
iothread = true
cache = "writethrough"
discard = "on"
ssd = true
# file_id = proxmox_virtual_environment_download_file.talos_nocloud_image.id
file_id = proxmox_virtual_environment_file.talos_nocloud_image[each.value.host_node].id
file_format = "raw"
size = 20
}
boot_order = ["scsi0"]
operating_system {
type = "l26" # Linux Kernel 2.6 - 6.X.
}
initialization {
datastore_id = "local-zfs"
# meta_data_file_id = proxmox_virtual_environment_file.controlplane-config[each.key].id
ip_config {
ipv4 {
address = "${each.value.ip}/24"
gateway = "192.168.1.1"
}
ipv6 {
address = "dhcp"
}
}
}
dynamic "hostpci" {
for_each = each.value.igpu ? [1] : []
content {
# Passthrough iGPU
device = "hostpci0"
mapping = "iGPU"
pcie = true
rombar = true
xvga = false
}
}
}

78
tofu/k8s/vm-workers.tf Normal file
View File

@@ -0,0 +1,78 @@
resource "proxmox_virtual_environment_vm" "workers" {
provider = proxmox.abel
for_each = var.node_data.workers
node_name = each.value.host_node
name = each.key
description = "Talos Kubernetes Worker"
tags = ["k8s", "worker"]
on_boot = true
vm_id = each.value.vm_id
machine = "q35"
scsi_hardware = "virtio-scsi-single"
bios = "seabios"
agent {
enabled = true
}
cpu {
cores = each.value.cpu
type = "host"
}
memory {
dedicated = each.value.ram_dedicated
}
network_device {
bridge = "vmbr0"
mac_address = each.value.mac_address
}
disk {
datastore_id = "local-zfs"
interface = "scsi0"
iothread = true
cache = "writethrough"
discard = "on"
ssd = true
# file_id = proxmox_virtual_environment_download_file.talos_nocloud_image.id
file_id = proxmox_virtual_environment_file.talos_nocloud_image[each.value.host_node].id
file_format = "raw"
size = 20
}
boot_order = ["scsi0"]
operating_system {
type = "l26" # Linux Kernel 2.6 - 6.X.
}
initialization {
datastore_id = "local-zfs"
# meta_data_file_id = proxmox_virtual_environment_file.worker-config[each.key].id
ip_config {
ipv4 {
address = "${each.value.ip}/24"
gateway = "192.168.1.1"
}
ipv6 {
address = "dhcp"
}
}
}
# hostpci {
# # Passthrough iGPU
# device = "hostpci0"
# #id = "0000:00:02"
# mapping = "iGPU"
# pcie = true
# rombar = true
# xvga = false
# }
}

24
tofu/machines/abel.tf Normal file
View File

@@ -0,0 +1,24 @@
variable "abel" {
description = "Abel Proxmox server auth"
type = object({
node_name = string
username = string
api_token = string
})
sensitive = true
}
provider "proxmox" {
alias = "abel"
#endpoint = "https://proxmox.abel.stonegarden.dev"
endpoint = "https://192.168.1.62:8006"
insecure = true
api_token = var.abel.api_token
ssh {
agent = true
username = var.abel.username
}
tmp_dir = "/var/tmp"
}

24
tofu/machines/cantor.tf Normal file
View File

@@ -0,0 +1,24 @@
variable "cantor" {
description = "Cantor Proxmox server auth"
type = object({
node_name = string
username = string
api_token = string
})
sensitive = true
}
provider "proxmox" {
alias = "cantor"
#endpoint = "https://proxmox.cantor.stonegarden.dev"
endpoint = "https://192.168.1.52:8006"
insecure = true
api_token = var.cantor.api_token
ssh {
agent = true
username = var.cantor.username
}
tmp_dir = "/var/tmp"
}

24
tofu/machines/euclid.tf Normal file
View File

@@ -0,0 +1,24 @@
variable "euclid" {
description = "Euclid Proxmox server auth"
type = object({
node_name = string
username = string
api_token = string
})
sensitive = true
}
provider "proxmox" {
alias = "euclid"
#endpoint = "https://proxmox.euclid.stonegarden.dev"
endpoint = "https://192.168.1.42:8006"
insecure = true
api_token = var.euclid.api_token
ssh {
agent = true
username = var.euclid.username
}
tmp_dir = "/var/tmp"
}