bootstrap full cluster

This commit is contained in:
Serge Logvinov
2023-04-04 16:26:00 +03:00
parent daf0841801
commit 582e8cc035
13 changed files with 194 additions and 28 deletions

View File

@@ -6,6 +6,7 @@ help:
@awk 'BEGIN {FS = ":.*?## "} /^[0-9a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
create-config: ## Genereate talos configs
terraform apply -auto-approve -target=local_file.worker_patch
talosctl gen config --output-dir _cfgs --with-docs=false --with-examples=false --config-patch-worker @templates/worker.patch.yaml ${CLUSTERNAME} https://${ENDPOINT}:6443
talosctl --talosconfig _cfgs/talosconfig config endpoint ${ENDPOINT}
@@ -30,7 +31,7 @@ create-controlplane-bootstrap:
talosctl --talosconfig _cfgs/talosconfig --nodes 172.16.0.48 bootstrap
create-controlplane: ## Bootstrap first controlplane node
terraform apply -auto-approve -target=hcloud_server.controlplane
terraform apply -auto-approve -target=local_file.worker_patch
create-kubeconfig: ## Prepare kubeconfig
talosctl --talosconfig _cfgs/talosconfig --nodes 172.16.0.48 kubeconfig .

View File

@@ -17,3 +17,7 @@ cicustom: user=local:snippets/VMID.yaml,meta=local:snippets/VMID.meta
ipconfig0: ...
net0: ...
```
```shell
make create-config create-templates
```

View File

@@ -1,11 +1,13 @@
resource "local_file" "worker_patch" {
content = templatefile("${path.module}/templates/worker.yaml.tpl",
content = templatefile("${path.module}/templates/worker.patch.yaml.tpl",
merge(var.kubernetes, {
lbv4 = local.ipv4_vip
nodeSubnets = var.vpc_main_cidr
labels = "project.io/node-pool=worker"
})
)
filename = "${path.module}/templates/worker.patch.yaml"
filename = "${path.module}/templates/worker.patch.yaml.tpl"
file_permission = "0600"
}

View File

@@ -10,6 +10,7 @@ locals {
node_name : zone
cpu : lookup(try(var.controlplane[zone], {}), "cpu", 1)
mem : lookup(try(var.controlplane[zone], {}), "mem", 2048)
ip0 : lookup(try(var.controlplane[zone], {}), "ip0", "ip6=auto")
ipv4 : "${cidrhost(local.controlplane_subnet, index(local.zones, zone) + inx)}/24"
gwv4 : local.gwv4
}
@@ -52,7 +53,7 @@ resource "proxmox_vm_qemu" "controlplane" {
define_connection_info = false
os_type = "ubuntu"
qemu_os = "l26"
ipconfig0 = "ip6=auto"
ipconfig0 = each.value.ip0
ipconfig1 = "ip=${each.value.ipv4},gw=${each.value.gwv4}"
cicustom = "meta=local:snippets/${each.value.name}.metadata.yaml"
cloudinit_cdrom_storage = var.proxmox_storage
@@ -122,10 +123,10 @@ resource "local_file" "controlplane" {
file_permission = "0600"
}
resource "null_resource" "controlplane" {
for_each = local.controlplanes
provisioner "local-exec" {
command = "echo talosctl apply-config --insecure --nodes ${split("/", each.value.ipv4)[0]} --config-patch @_cfgs/${each.value.name}.yaml --file _cfgs/controlplane.yaml"
}
depends_on = [proxmox_vm_qemu.controlplane, local_file.controlplane]
}
# resource "null_resource" "controlplane" {
# for_each = local.controlplanes
# provisioner "local-exec" {
# command = "sleep 60 && talosctl apply-config --insecure --nodes ${split("/", each.value.ipv4)[0]} --config-patch @_cfgs/${each.value.name}.yaml --file _cfgs/controlplane.yaml"
# }
# depends_on = [proxmox_vm_qemu.controlplane, local_file.controlplane]
# }

View File

@@ -12,6 +12,7 @@ locals {
node_name : zone
cpu : lookup(try(var.instances[zone], {}), "web_cpu", 1)
mem : lookup(try(var.instances[zone], {}), "web_mem", 2048)
ip0 : lookup(try(var.instances[zone], {}), "web_ip0", "ip6=auto")
ipv4 : "${cidrhost(local.subnets[zone], inx)}/24"
gwv4 : local.gwv4
}
@@ -28,12 +29,19 @@ resource "null_resource" "web_machineconfig" {
}
provisioner "file" {
source = "${path.module}/_cfgs/worker.yaml"
# source = "${path.module}/_cfgs/worker.yaml"
content = templatefile("${path.module}/templates/web.yaml.tpl",
merge(var.kubernetes, {
lbv4 = local.ipv4_vip
nodeSubnets = var.vpc_main_cidr
labels = local.web_labels
}))
destination = "/var/lib/vz/snippets/${local.web_prefix}.yaml"
}
triggers = {
params = filemd5("${path.module}/_cfgs/worker.yaml")
params = filemd5("${path.module}/templates/web.yaml.tpl")
}
}
@@ -72,7 +80,7 @@ resource "proxmox_vm_qemu" "web" {
define_connection_info = false
os_type = "ubuntu"
qemu_os = "l26"
ipconfig0 = "ip6=auto"
ipconfig0 = each.value.ip0
ipconfig1 = "ip=${each.value.ipv4},gw=${each.value.gwv4}"
cicustom = "user=local:snippets/${local.web_prefix}.yaml,meta=local:snippets/${each.value.name}.metadata.yaml"
cloudinit_cdrom_storage = var.proxmox_storage

View File

@@ -12,6 +12,7 @@ locals {
node_name : zone
cpu : lookup(try(var.instances[zone], {}), "worker_cpu", 1)
mem : lookup(try(var.instances[zone], {}), "worker_mem", 2048)
ip0 : lookup(try(var.instances[zone], {}), "worke_ip0", "ip6=auto")
ipv4 : "${cidrhost(local.subnets[zone], 4 + inx)}/24"
gwv4 : local.gwv4
}
@@ -28,12 +29,17 @@ resource "null_resource" "worker_machineconfig" {
}
provisioner "file" {
source = "${path.module}/_cfgs/worker.yaml"
content = templatefile("${path.module}/templates/worker.yaml.tpl",
merge(var.kubernetes, {
lbv4 = local.ipv4_vip
nodeSubnets = var.vpc_main_cidr
labels = local.worker_labels
}))
destination = "/var/lib/vz/snippets/${local.worker_prefix}.yaml"
}
triggers = {
params = filemd5("${path.module}/_cfgs/worker.yaml")
params = filemd5("${path.module}/templates/worker.yaml.tpl")
}
}
@@ -146,7 +152,7 @@ resource "proxmox_vm_qemu" "worker" {
define_connection_info = false
os_type = "ubuntu"
qemu_os = "l26"
ipconfig0 = "ip6=auto"
ipconfig0 = each.value.ip0
ipconfig1 = "ip=${each.value.ipv4},gw=${each.value.gwv4}"
cicustom = "user=local:snippets/${local.worker_prefix}.yaml,meta=local:snippets/${each.value.name}.metadata.yaml"
cloudinit_cdrom_storage = var.proxmox_storage
@@ -186,10 +192,19 @@ resource "proxmox_vm_qemu" "worker" {
ssd = 1
backup = false
}
disk {
type = "scsi"
storage = var.proxmox_storage
size = "128G"
cache = "none"
ssd = 1
backup = false
}
lifecycle {
ignore_changes = [
boot,
disk,
network,
desc,
numa,

View File

@@ -4,6 +4,14 @@ output "controlplane_endpoint" {
value = local.ipv4_vip
}
output "controlplane_apply" {
description = "Kubernetes controlplane apply command"
value = [for cp in local.controlplanes :
"talosctl apply-config --insecure --nodes ${split("/", cp.ipv4)[0]} --config-patch @_cfgs/${cp.name}.yaml --file _cfgs/controlplane.yaml"
]
depends_on = [proxmox_vm_qemu.controlplane]
}
output "controlplane_nodes" {
description = "Kubernetes controlplane nodes"
value = [

View File

@@ -48,6 +48,8 @@ machine:
allowedKubernetesNamespaces:
- kube-system
cluster:
adminKubeconfig:
certLifetime: 8h0m0s
controlPlane:
endpoint: https://${apiDomain}:6443
network:

View File

@@ -0,0 +1,58 @@
version: v1alpha1
debug: false
persist: true
machine:
type: worker
token: ${tokenMachine}
ca:
crt: ${caMachine}
kubelet:
extraArgs:
cloud-provider: external
rotate-server-certificates: true
node-labels: "${labels}"
clusterDNS:
- 169.254.2.53
- ${cidrhost(split(",",serviceSubnets)[0], 10)}
nodeIP:
validSubnets: ${format("%#v",split(",",nodeSubnets))}
network:
interfaces:
- interface: dummy0
addresses:
- 169.254.2.53/32
extraHostEntries:
- ip: ${lbv4}
aliases:
- ${apiDomain}
sysctls:
net.core.somaxconn: 65535
net.core.netdev_max_backlog: 4096
systemDiskEncryption:
state:
provider: luks2
keys:
- nodeID: {}
slot: 0
ephemeral:
provider: luks2
keys:
- nodeID: {}
slot: 0
options:
- no_read_workqueue
- no_write_workqueue
cluster:
id: ${clusterID}
secret: ${clusterSecret}
controlPlane:
endpoint: https://${apiDomain}:6443
clusterName: ${clusterName}
discovery:
enabled: true
network:
dnsDomain: ${domain}
serviceSubnets: ${format("%#v",split(",",serviceSubnets))}
token: ${token}
ca:
crt: ${ca}

View File

@@ -0,0 +1,45 @@
machine:
kubelet:
extraArgs:
cloud-provider: external
rotate-server-certificates: true
node-labels: "project.io/node-pool=worker"
clusterDNS:
- 169.254.2.53
- 10.200.0.10
nodeIP:
validSubnets: ["172.16.0.0/24"]
network:
interfaces:
- interface: dummy0
addresses:
- 169.254.2.53/32
extraHostEntries:
- ip: 172.16.0.10
aliases:
- api.cluster.local
sysctls:
net.core.somaxconn: 65535
net.core.netdev_max_backlog: 4096
systemDiskEncryption:
state:
provider: luks2
options:
- no_read_workqueue
- no_write_workqueue
keys:
- nodeID: {}
slot: 0
ephemeral:
provider: luks2
options:
- no_read_workqueue
- no_write_workqueue
keys:
- nodeID: {}
slot: 0
cluster:
controlPlane:
endpoint: https://api.cluster.local:6443
proxy:
disabled: true

View File

@@ -1,8 +1,16 @@
version: v1alpha1
debug: false
persist: true
machine:
type: worker
token: ${tokenMachine}
ca:
crt: ${caMachine}
kubelet:
extraArgs:
cloud-provider: external
rotate-server-certificates: true
node-labels: "${labels}"
clusterDNS:
- 169.254.2.53
- ${cidrhost(split(",",serviceSubnets)[0], 10)}
@@ -23,22 +31,32 @@ machine:
systemDiskEncryption:
state:
provider: luks2
options:
- no_read_workqueue
- no_write_workqueue
keys:
- nodeID: {}
slot: 0
ephemeral:
provider: luks2
options:
- no_read_workqueue
- no_write_workqueue
keys:
- nodeID: {}
slot: 0
options:
- no_read_workqueue
- no_write_workqueue
disks:
- device: /dev/sdb
partitions:
- mountpoint: /var/data
cluster:
id: ${clusterID}
secret: ${clusterSecret}
controlPlane:
endpoint: https://${apiDomain}:6443
proxy:
disabled: true
clusterName: ${clusterName}
discovery:
enabled: true
network:
dnsDomain: ${domain}
serviceSubnets: ${format("%#v",split(",",serviceSubnets))}
token: ${token}
ca:
crt: ${ca}

View File

@@ -81,12 +81,14 @@ variable "controlplane" {
count = 0,
cpu = 2,
mem = 4096,
# ip0 = "ip6=1:2::3/64,gw6=1:2::1"
},
"node2" = {
id = 510
count = 0,
cpu = 2,
mem = 4096,
# ip0 = "ip6=dhcp",
}
}
}
@@ -100,10 +102,12 @@ variable "instances" {
web_count = 0,
web_cpu = 2,
web_mem = 4096,
web_ip0 = "", # ip=dhcp,ip6=dhcp
worker_id = 1050
worker_count = 0,
worker_cpu = 2,
worker_mem = 4096,
worker_ip0 = "", # ip=dhcp,ip6=dhcp
},
"node2" = {
web_id = 2000

View File

@@ -42,9 +42,9 @@ source "proxmox" "talos" {
cpu_type = "host"
memory = 3072
vga {
type = "serial0"
}
# vga {
# type = "serial0"
# }
serials = ["socket"]
ssh_username = "root"