mirror of
https://github.com/optim-enterprises-bv/homelab.git
synced 2025-10-30 01:22:31 +00:00
feat(tofu): split into multiple configurations
This commit is contained in:
4
.gitignore
vendored
4
.gitignore
vendored
@@ -3,8 +3,8 @@ charts/example
|
||||
**/infra/*/charts
|
||||
*secret*.yaml
|
||||
|
||||
**/.terraform/*
|
||||
**/output
|
||||
.terraform
|
||||
output
|
||||
|
||||
*.tfstate
|
||||
*.tfstate.*
|
||||
|
||||
27
remodel/tofu/home-assistant/image.tf
Normal file
27
remodel/tofu/home-assistant/image.tf
Normal file
@@ -0,0 +1,27 @@
|
||||
resource "null_resource" "haos_image" {
|
||||
triggers = {
|
||||
on_version_change = var.haos_version
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "curl -s -L ${var.haos_download_url}/${var.haos_version}/haos_ova-${var.haos_version}.qcow2.xz | xz -d > ${var.local_file}"
|
||||
}
|
||||
|
||||
# provisioner "local-exec" {
|
||||
# when = destroy
|
||||
# command = "rm ${local.haos.local_file}"
|
||||
# }
|
||||
}
|
||||
|
||||
resource "proxmox_virtual_environment_file" "haos_generic_image" {
|
||||
depends_on = [null_resource.haos_image]
|
||||
node_name = var.proxmox_node.name
|
||||
datastore_id = var.proxmox_node.image_datastore
|
||||
|
||||
content_type = "iso"
|
||||
|
||||
source_file {
|
||||
path = var.local_file
|
||||
file_name = "haos_ova-${var.haos_version}.img"
|
||||
}
|
||||
}
|
||||
19
remodel/tofu/home-assistant/main.tf
Normal file
19
remodel/tofu/home-assistant/main.tf
Normal file
@@ -0,0 +1,19 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
proxmox = {
|
||||
source = "bpg/proxmox"
|
||||
version = "0.60.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "proxmox" {
|
||||
endpoint = var.proxmox_node.endpoint
|
||||
insecure = var.proxmox_node.insecure
|
||||
|
||||
api_token = var.proxmox_node.api_token
|
||||
ssh {
|
||||
agent = true
|
||||
username = var.proxmox_node.username
|
||||
}
|
||||
}
|
||||
25
remodel/tofu/home-assistant/variables.tf
Normal file
25
remodel/tofu/home-assistant/variables.tf
Normal file
@@ -0,0 +1,25 @@
|
||||
variable "proxmox_node" {
|
||||
type = object({
|
||||
name = string
|
||||
endpoint = string
|
||||
insecure = bool
|
||||
username = string
|
||||
api_token = string
|
||||
image_datastore = string
|
||||
})
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "haos_version" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "haos_download_url" {
|
||||
type = string
|
||||
default = "https://github.com/home-assistant/operating-system/releases/download"
|
||||
}
|
||||
|
||||
variable "local_file" {
|
||||
type = string
|
||||
default = "home-assistant/haos_ova.qcow2"
|
||||
}
|
||||
57
remodel/tofu/home-assistant/vm.tf
Normal file
57
remodel/tofu/home-assistant/vm.tf
Normal file
@@ -0,0 +1,57 @@
|
||||
resource "proxmox_virtual_environment_vm" "home_assistant" {
|
||||
node_name = var.proxmox_node.name
|
||||
|
||||
name = "Home-Assistant"
|
||||
description = "Managed by OpenTofu"
|
||||
tags = ["home-assistant"]
|
||||
on_boot = true
|
||||
bios = "ovmf"
|
||||
scsi_hardware = "virtio-scsi-single"
|
||||
|
||||
vm_id = 1100
|
||||
|
||||
tablet_device = false
|
||||
|
||||
cpu {
|
||||
cores = 2
|
||||
type = "host"
|
||||
}
|
||||
|
||||
memory {
|
||||
dedicated = 4096
|
||||
}
|
||||
|
||||
network_device {
|
||||
bridge = "vmbr0"
|
||||
mac_address = "BC:24:11:50:A6:33"
|
||||
}
|
||||
|
||||
agent {
|
||||
enabled = true
|
||||
}
|
||||
|
||||
efi_disk {
|
||||
datastore_id = "local-zfs"
|
||||
file_format = "raw"
|
||||
type = "4m"
|
||||
}
|
||||
|
||||
disk {
|
||||
datastore_id = "local-zfs"
|
||||
file_id = proxmox_virtual_environment_file.haos_generic_image.id
|
||||
interface = "scsi0"
|
||||
cache = "writethrough"
|
||||
discard = "on"
|
||||
iothread = true
|
||||
ssd = true
|
||||
size = 64
|
||||
}
|
||||
|
||||
operating_system {
|
||||
type = "l26"
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
prevent_destroy = true
|
||||
}
|
||||
}
|
||||
83
remodel/tofu/talos-k8s/bootstrap/cilium-install.yaml
Normal file
83
remodel/tofu/talos-k8s/bootstrap/cilium-install.yaml
Normal file
@@ -0,0 +1,83 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: cilium-install
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: cilium-install
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: cilium-install
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: cilium-install
|
||||
namespace: kube-system
|
||||
spec:
|
||||
backoffLimit: 10
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: cilium-install
|
||||
spec:
|
||||
restartPolicy: OnFailure
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
- effect: PreferNoSchedule
|
||||
operator: Exists
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
effect: NoExecute
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
effect: PreferNoSchedule
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
serviceAccountName: cilium-install
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: cilium-install
|
||||
image: quay.io/cilium/cilium-cli-ci:latest
|
||||
env:
|
||||
- name: KUBERNETES_SERVICE_HOST
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: status.podIP
|
||||
- name: KUBERNETES_SERVICE_PORT
|
||||
value: "6443"
|
||||
command:
|
||||
- cilium
|
||||
- install
|
||||
- --version=v1.15.5 # renovate: github-releases=cilium/cilium
|
||||
- --helm-set=ipam.mode=kubernetes
|
||||
- --set
|
||||
- kubeProxyReplacement=true
|
||||
- --helm-set=securityContext.capabilities.ciliumAgent={CHOWN,KILL,NET_ADMIN,NET_RAW,IPC_LOCK,SYS_ADMIN,SYS_RESOURCE,DAC_OVERRIDE,FOWNER,SETGID,SETUID}
|
||||
- --helm-set=securityContext.capabilities.cleanCiliumState={NET_ADMIN,SYS_ADMIN,SYS_RESOURCE}
|
||||
- --helm-set=cgroup.autoMount.enabled=false
|
||||
- --helm-set=cgroup.hostRoot=/sys/fs/cgroup
|
||||
- --helm-set=k8sServiceHost=localhost
|
||||
- --helm-set=k8sServicePort=7445
|
||||
12
remodel/tofu/talos-k8s/image.tf
Normal file
12
remodel/tofu/talos-k8s/image.tf
Normal file
@@ -0,0 +1,12 @@
|
||||
resource "proxmox_virtual_environment_download_file" "talos_nocloud_image" {
|
||||
for_each = toset(var.host_machines)
|
||||
|
||||
node_name = each.key
|
||||
content_type = "iso"
|
||||
datastore_id = var.proxmox_node.image_datastore
|
||||
|
||||
file_name = "talos-${var.talos_image.version}-nocloud-amd64.img"
|
||||
url = "${var.talos_image.base_url}/${var.talos_image.version}/nocloud-amd64.raw.gz"
|
||||
decompression_algorithm = "gz"
|
||||
overwrite = false
|
||||
}
|
||||
@@ -0,0 +1,11 @@
|
||||
machine:
|
||||
network:
|
||||
hostname: ${hostname}
|
||||
cluster:
|
||||
allowSchedulingOnControlPlanes: true
|
||||
network:
|
||||
cni:
|
||||
name: none
|
||||
proxy:
|
||||
disabled: true
|
||||
${inlineManifests}
|
||||
@@ -0,0 +1,7 @@
|
||||
machine:
|
||||
customization:
|
||||
systemExtensions:
|
||||
officialExtensions:
|
||||
- siderolabs/i915-ucode
|
||||
- siderolabs/intel-ucode
|
||||
- siderolabs/qemu-guest-agent
|
||||
3
remodel/tofu/talos-k8s/machine-config/worker.yaml.tftpl
Normal file
3
remodel/tofu/talos-k8s/machine-config/worker.yaml.tftpl
Normal file
@@ -0,0 +1,3 @@
|
||||
machine:
|
||||
network:
|
||||
hostname: ${hostname}
|
||||
45
remodel/tofu/talos-k8s/main.tf
Normal file
45
remodel/tofu/talos-k8s/main.tf
Normal file
@@ -0,0 +1,45 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
proxmox = {
|
||||
source = "bpg/proxmox"
|
||||
version = "0.60.0"
|
||||
}
|
||||
talos = {
|
||||
source = "siderolabs/talos"
|
||||
version = "0.5.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "proxmox" {
|
||||
endpoint = var.proxmox_node.endpoint
|
||||
insecure = var.proxmox_node.insecure
|
||||
|
||||
api_token = var.proxmox_node.api_token
|
||||
ssh {
|
||||
agent = true
|
||||
username = var.proxmox_node.username
|
||||
}
|
||||
}
|
||||
|
||||
output "talosconfig" {
|
||||
value = data.talos_client_configuration.talosconfig.talos_config
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "kubeconfig" {
|
||||
value = data.talos_cluster_kubeconfig.kubeconfig
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
resource "local_file" "talos-config" {
|
||||
content = data.talos_client_configuration.talosconfig.talos_config
|
||||
filename = "output/talos-config.yaml"
|
||||
file_permission = "0600"
|
||||
}
|
||||
|
||||
resource "local_file" "kube-config" {
|
||||
content = data.talos_cluster_kubeconfig.kubeconfig.kubeconfig_raw
|
||||
filename = "output/kube-config.yaml"
|
||||
file_permission = "0600"
|
||||
}
|
||||
19
remodel/tofu/talos-k8s/proxmox-rbac.tf
Normal file
19
remodel/tofu/talos-k8s/proxmox-rbac.tf
Normal file
@@ -0,0 +1,19 @@
|
||||
resource "proxmox_virtual_environment_role" "csi" {
|
||||
role_id = "csi"
|
||||
privileges = [
|
||||
"VM.Audit",
|
||||
"VM.Config.Disk",
|
||||
"Datastore.Allocate",
|
||||
"Datastore.AllocateSpace",
|
||||
"Datastore.Audit"
|
||||
]
|
||||
}
|
||||
|
||||
resource "proxmox_virtual_environment_user" "kubernetes-csi" {
|
||||
user_id = "kubernetes-csi@pve"
|
||||
acl {
|
||||
path = "/"
|
||||
propagate = true
|
||||
role_id = proxmox_virtual_environment_role.csi.role_id
|
||||
}
|
||||
}
|
||||
86
remodel/tofu/talos-k8s/talos-config.tf
Normal file
86
remodel/tofu/talos-k8s/talos-config.tf
Normal file
@@ -0,0 +1,86 @@
|
||||
resource "talos_machine_secrets" "machine_secrets" {
|
||||
talos_version = var.cluster.talos_version
|
||||
}
|
||||
|
||||
data "talos_client_configuration" "talosconfig" {
|
||||
cluster_name = var.cluster.name
|
||||
client_configuration = talos_machine_secrets.machine_secrets.client_configuration
|
||||
endpoints = [for k, v in var.node_data.controlplanes : v.ip]
|
||||
}
|
||||
|
||||
data "talos_machine_configuration" "control-plane" {
|
||||
for_each = var.node_data.controlplanes
|
||||
cluster_name = var.cluster.name
|
||||
cluster_endpoint = var.cluster.endpoint
|
||||
machine_type = "controlplane"
|
||||
machine_secrets = talos_machine_secrets.machine_secrets.machine_secrets
|
||||
talos_version = var.cluster.talos_version
|
||||
config_patches = [
|
||||
templatefile("${path.module}/machine-config/control-plane.yaml.tftpl", {
|
||||
hostname = each.key
|
||||
inlineManifests = indent(2,
|
||||
yamlencode(
|
||||
{
|
||||
inlineManifests : [
|
||||
{
|
||||
name : "cilium-bootstrap",
|
||||
contents : file("${path.module}/bootstrap/cilium-install.yaml")
|
||||
}
|
||||
]
|
||||
}))
|
||||
})
|
||||
]
|
||||
}
|
||||
|
||||
resource "talos_machine_configuration_apply" "ctrl_config_apply" {
|
||||
depends_on = [proxmox_virtual_environment_vm.controlplane]
|
||||
for_each = var.node_data.controlplanes
|
||||
node = each.value.ip
|
||||
client_configuration = talos_machine_secrets.machine_secrets.client_configuration
|
||||
machine_configuration_input = data.talos_machine_configuration.control-plane[each.key].machine_configuration
|
||||
}
|
||||
|
||||
data "talos_machine_configuration" "worker" {
|
||||
for_each = var.node_data.workers
|
||||
cluster_name = var.cluster.name
|
||||
cluster_endpoint = var.cluster.endpoint
|
||||
machine_type = "worker"
|
||||
machine_secrets = talos_machine_secrets.machine_secrets.machine_secrets
|
||||
talos_version = var.cluster.talos_version
|
||||
config_patches = [
|
||||
templatefile("${path.module}/machine-config/worker.yaml.tftpl", {
|
||||
hostname = each.key
|
||||
})
|
||||
]
|
||||
}
|
||||
|
||||
resource "talos_machine_configuration_apply" "worker_config_apply" {
|
||||
depends_on = [proxmox_virtual_environment_vm.workers]
|
||||
for_each = var.node_data.workers
|
||||
node = each.value.ip
|
||||
client_configuration = talos_machine_secrets.machine_secrets.client_configuration
|
||||
machine_configuration_input = data.talos_machine_configuration.worker[each.key].machine_configuration
|
||||
}
|
||||
|
||||
resource "talos_machine_bootstrap" "bootstrap" {
|
||||
depends_on = [talos_machine_configuration_apply.ctrl_config_apply]
|
||||
client_configuration = talos_machine_secrets.machine_secrets.client_configuration
|
||||
node = [for k, v in var.node_data.controlplanes : v.ip][0]
|
||||
}
|
||||
|
||||
data "talos_cluster_health" "health" {
|
||||
depends_on = [talos_machine_configuration_apply.ctrl_config_apply]
|
||||
client_configuration = data.talos_client_configuration.talosconfig.client_configuration
|
||||
control_plane_nodes = [for k, v in var.node_data.controlplanes : v.ip]
|
||||
worker_nodes = [for k, v in var.node_data.workers : v.ip]
|
||||
endpoints = data.talos_client_configuration.talosconfig.endpoints
|
||||
timeouts = {
|
||||
read = "10m"
|
||||
}
|
||||
}
|
||||
|
||||
data "talos_cluster_kubeconfig" "kubeconfig" {
|
||||
depends_on = [talos_machine_bootstrap.bootstrap, data.talos_cluster_health.health]
|
||||
client_configuration = talos_machine_secrets.machine_secrets.client_configuration
|
||||
node = [for k, v in var.node_data.controlplanes : v.ip][0]
|
||||
}
|
||||
53
remodel/tofu/talos-k8s/variables.tf
Normal file
53
remodel/tofu/talos-k8s/variables.tf
Normal file
@@ -0,0 +1,53 @@
|
||||
variable "proxmox_node" {
|
||||
type = object({
|
||||
name = string
|
||||
endpoint = string
|
||||
insecure = bool
|
||||
username = string
|
||||
api_token = string
|
||||
image_datastore = string
|
||||
})
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "talos_image" {
|
||||
type = object({
|
||||
base_url = string
|
||||
version = string
|
||||
})
|
||||
}
|
||||
|
||||
variable "host_machines" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "cluster" {
|
||||
type = object({
|
||||
name = string
|
||||
endpoint = string
|
||||
talos_version = string
|
||||
})
|
||||
}
|
||||
|
||||
variable "node_data" {
|
||||
description = "A map of node data"
|
||||
type = object({
|
||||
controlplanes = map(object({
|
||||
ip = string
|
||||
mac_address = string
|
||||
host_node = string
|
||||
vm_id = number
|
||||
cpu = number
|
||||
ram_dedicated = number
|
||||
igpu = optional(bool, false)
|
||||
}))
|
||||
workers = map(object({
|
||||
ip = string
|
||||
mac_address = string
|
||||
host_node = string
|
||||
vm_id = number
|
||||
cpu = number
|
||||
ram_dedicated = number
|
||||
}))
|
||||
})
|
||||
}
|
||||
77
remodel/tofu/talos-k8s/vm-controlplane.tf
Normal file
77
remodel/tofu/talos-k8s/vm-controlplane.tf
Normal file
@@ -0,0 +1,77 @@
|
||||
resource "proxmox_virtual_environment_vm" "controlplane" {
|
||||
for_each = var.node_data.controlplanes
|
||||
|
||||
node_name = each.value.host_node
|
||||
|
||||
name = each.key
|
||||
description = "Talos Kubernetes Control Plane"
|
||||
tags = ["k8s", "control-plane"]
|
||||
on_boot = true
|
||||
vm_id = each.value.vm_id
|
||||
|
||||
machine = "q35"
|
||||
scsi_hardware = "virtio-scsi-single"
|
||||
bios = "seabios"
|
||||
|
||||
agent {
|
||||
enabled = true
|
||||
}
|
||||
|
||||
cpu {
|
||||
cores = each.value.cpu
|
||||
type = "host"
|
||||
}
|
||||
|
||||
memory {
|
||||
dedicated = each.value.ram_dedicated
|
||||
}
|
||||
|
||||
network_device {
|
||||
bridge = "vmbr0"
|
||||
mac_address = each.value.mac_address
|
||||
}
|
||||
|
||||
disk {
|
||||
datastore_id = "local-zfs"
|
||||
interface = "scsi0"
|
||||
iothread = true
|
||||
cache = "writethrough"
|
||||
discard = "on"
|
||||
ssd = true
|
||||
file_id = proxmox_virtual_environment_download_file.talos_nocloud_image[each.value.host_node].id
|
||||
file_format = "raw"
|
||||
size = 20
|
||||
}
|
||||
|
||||
boot_order = ["scsi0"]
|
||||
|
||||
operating_system {
|
||||
type = "l26" # Linux Kernel 2.6 - 6.X.
|
||||
}
|
||||
|
||||
initialization {
|
||||
datastore_id = "local-zfs"
|
||||
# meta_data_file_id = proxmox_virtual_environment_file.controlplane-config[each.key].id
|
||||
ip_config {
|
||||
ipv4 {
|
||||
address = "${each.value.ip}/24"
|
||||
gateway = "192.168.1.1"
|
||||
}
|
||||
ipv6 {
|
||||
address = "dhcp"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "hostpci" {
|
||||
for_each = each.value.igpu ? [1] : []
|
||||
content {
|
||||
# Passthrough iGPU
|
||||
device = "hostpci0"
|
||||
mapping = "iGPU"
|
||||
pcie = true
|
||||
rombar = true
|
||||
xvga = false
|
||||
}
|
||||
}
|
||||
}
|
||||
75
remodel/tofu/talos-k8s/vm-workers.tf
Normal file
75
remodel/tofu/talos-k8s/vm-workers.tf
Normal file
@@ -0,0 +1,75 @@
|
||||
resource "proxmox_virtual_environment_vm" "workers" {
|
||||
for_each = var.node_data.workers
|
||||
|
||||
node_name = each.value.host_node
|
||||
|
||||
name = each.key
|
||||
description = "Talos Kubernetes Worker"
|
||||
tags = ["k8s", "worker"]
|
||||
on_boot = true
|
||||
vm_id = each.value.vm_id
|
||||
|
||||
machine = "q35"
|
||||
scsi_hardware = "virtio-scsi-single"
|
||||
bios = "seabios"
|
||||
|
||||
agent {
|
||||
enabled = true
|
||||
}
|
||||
|
||||
cpu {
|
||||
cores = each.value.cpu
|
||||
type = "host"
|
||||
}
|
||||
|
||||
memory {
|
||||
dedicated = each.value.ram_dedicated
|
||||
}
|
||||
|
||||
network_device {
|
||||
bridge = "vmbr0"
|
||||
mac_address = each.value.mac_address
|
||||
}
|
||||
|
||||
disk {
|
||||
datastore_id = "local-zfs"
|
||||
interface = "scsi0"
|
||||
iothread = true
|
||||
cache = "writethrough"
|
||||
discard = "on"
|
||||
ssd = true
|
||||
file_id = proxmox_virtual_environment_download_file.talos_nocloud_image[each.value.host_node].id
|
||||
file_format = "raw"
|
||||
size = 20
|
||||
}
|
||||
|
||||
boot_order = ["scsi0"]
|
||||
|
||||
operating_system {
|
||||
type = "l26" # Linux Kernel 2.6 - 6.X.
|
||||
}
|
||||
|
||||
initialization {
|
||||
datastore_id = "local-zfs"
|
||||
# meta_data_file_id = proxmox_virtual_environment_file.worker-config[each.key].id
|
||||
ip_config {
|
||||
ipv4 {
|
||||
address = "${each.value.ip}/24"
|
||||
gateway = "192.168.1.1"
|
||||
}
|
||||
ipv6 {
|
||||
address = "dhcp"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# hostpci {
|
||||
# # Passthrough iGPU
|
||||
# device = "hostpci0"
|
||||
# #id = "0000:00:02"
|
||||
# mapping = "iGPU"
|
||||
# pcie = true
|
||||
# rombar = true
|
||||
# xvga = false
|
||||
# }
|
||||
}
|
||||
Reference in New Issue
Block a user