full setup

This commit is contained in:
Serge Logvinov
2023-04-15 21:37:45 +03:00
parent 27ad8a011c
commit c79bb4e511
18 changed files with 499 additions and 325 deletions

View File

@@ -25,10 +25,10 @@ Having a single Kubernetes control plane that spans multiple cloud providers can
| [Azure](azure) | 1.3.4 | CCM,CSI,Autoscaler | many regions, many zones | ✓ | ✓ |
| [Exoscale](exoscale) | 1.3.0 | CCM,Autoscaler | many regions | ✗ | |
| [GCP](gcp-zonal) | 1.3.4 | CCM,CSI,Autoscaler | one region, many zones | ✓ | ✓ |
| [Hetzner](hetzner) | 1.3.4 | CCM,CSI,Autoscaler | many regions | ✗ | ✓ |
| [Hetzner](hetzner) | 1.4.0 | CCM,CSI,Autoscaler | many regions, one network zone | ✗ | ✓ |
| [Openstack](openstack) | 1.3.4 | CCM,CSI | many regions, many zones | ✓ | ✓ |
| [Oracle](oracle) | 1.3.4 | CCM,~~CSI~~,Autoscaler | one region, many zones | ✓ | ✓ |
| [Proxmox](proxmox) | 1.3.4 | TalosCCM | one region, one zones | ✓ | ✓ |
| [Proxmox](proxmox) | 1.3.4 | CCM | one region, one zones | ✓ | ✓ |
| [Scaleway](scaleway) | 1.3.4 | CCM,CSI | one region | ✓ | ✓ |
## Known issues

View File

@@ -1,14 +1,18 @@
ENDPOINT:=api.cluster.local
# ENDPOINT:=${shell terraform output -raw controlplane_endpoint 2>/dev/null}
CLUSTERNAME:="talos-k8s-hetzner"
CLUSTERNAME := "talos-k8s-hetzner"
CPFIRST := ${shell terraform output -raw controlplane_firstnode 2>/dev/null}
ENDPOINT := ${shell terraform output -raw controlplane_endpoint 2>/dev/null}
ifneq (,$(findstring Warning,${ENDPOINT}))
ENDPOINT := api.cluster.local
endif
help:
@awk 'BEGIN {FS = ":.*?## "} /^[0-9a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
clean: ## Clean all
terraform destroy -auto-approve
rm -rf _cfgs
rm -f kubeconfig
rm -f kubeconfig terraform.tfvars.json
prepare:
@[ -f ~/.ssh/terraform ] || ssh-keygen -f ~/.ssh/terraform -N '' -t rsa
@@ -23,10 +27,8 @@ create-config: ## Genereate talos configs
talosctl --talosconfig _cfgs/talosconfig config endpoint ${ENDPOINT}
create-templates:
@yq ea -P '. as $$item ireduce ({}; . * $$item )' _cfgs/controlplane.yaml templates/controlplane.yaml.tpl > templates/controlplane.yaml
@echo 'podSubnets: "10.32.0.0/12,fd00:10:32::/102"' > _cfgs/tfstate.vars
@echo 'podSubnets: "10.32.0.0/12,fd40:10:32::/102"' > _cfgs/tfstate.vars
@echo 'serviceSubnets: "10.200.0.0/22,fd40:10:200::/112"' >> _cfgs/tfstate.vars
@echo 'nodeSubnets: "172.16.0.0/12"' >> _cfgs/tfstate.vars
@echo 'apiDomain: api.cluster.local' >> _cfgs/tfstate.vars
@yq eval '.cluster.network.dnsDomain' _cfgs/controlplane.yaml | awk '{ print "domain: "$$1}' >> _cfgs/tfstate.vars
@yq eval '.cluster.clusterName' _cfgs/controlplane.yaml | awk '{ print "clusterName: "$$1}' >> _cfgs/tfstate.vars
@@ -40,8 +42,8 @@ create-templates:
@yq eval -o=json '{"kubernetes": .}' _cfgs/tfstate.vars > terraform.tfvars.json
create-controlplane-bootstrap:
talosctl --talosconfig _cfgs/talosconfig config endpoint ${ENDPOINT}
talosctl --talosconfig _cfgs/talosconfig --nodes 172.16.0.11 bootstrap
talosctl --talosconfig _cfgs/talosconfig config endpoint ${CPFIRST}
talosctl --talosconfig _cfgs/talosconfig --nodes ${CPFIRST} bootstrap
create-controlplane: ## Bootstrap first controlplane node
terraform apply -auto-approve -target=hcloud_server.controlplane
@@ -51,16 +53,10 @@ create-infrastructure: ## Bootstrap all nodes
terraform apply
create-kubeconfig: ## Prepare kubeconfig
talosctl --talosconfig _cfgs/talosconfig --nodes 172.16.0.11 kubeconfig .
talosctl --talosconfig _cfgs/talosconfig --nodes ${CPFIRST} kubeconfig .
kubectl --kubeconfig=kubeconfig config set clusters.${CLUSTERNAME}.server https://${ENDPOINT}:6443
kubectl --kubeconfig=kubeconfig config set-context --current --namespace=kube-system
create-deployments:
helm template --namespace=kube-system --version=1.12.7 -f deployments/cilium.yaml cilium \
cilium/cilium > deployments/cilium-result.yaml
helm template --namespace=ingress-nginx --version=4.4.0 -f deployments/ingress.yaml ingress-nginx \
ingress-nginx/ingress-nginx > deployments/ingress-result.yaml
create-secrets:
dd if=/dev/urandom bs=1 count=16 2>/dev/null | hexdump -e '"%00x"' > hcloud-csi-secret.secret
kubectl --kubeconfig=kubeconfig create secret generic hcloud-csi-secret --from-file=encryptionPassphrase=hcloud-csi-secret.secret

View File

@@ -155,7 +155,8 @@ Cluster Autoscaler for [Hetzner Cloud](https://github.com/kubernetes/autoscaler/
Create/deploy autoscaler:
```shell
kubectl -n kube-system create secret generic hcloud-init --from-file=worker=_cfgs/worker-as.yaml.base64 --from-literal=ssh-key=${SSHID} --from-literal=image=${IMAGEID}
cat _cfgs/worker-as.yaml | base64 > _cfgs/worker-as.yaml.base64
kubectl -n kube-system create secret generic hcloud-init --from-file=worker=_cfgs/worker-as.yaml.base64 --from-literal=image="os=talos"
kubectl apply -f deployments/hcloud-autoscaler.yaml
```

View File

@@ -139,14 +139,14 @@ spec:
spec:
serviceAccountName: cluster-autoscaler
nodeSelector:
node-role.kubernetes.io/control-plane: ""
# node-role.kubernetes.io/control-plane: ""
node.cloudprovider.kubernetes.io/platform: hcloud
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
containers:
- name: cluster-autoscaler
image: k8s.gcr.io/autoscaling/cluster-autoscaler:v1.25.0
image: registry.k8s.io/autoscaling/cluster-autoscaler:v1.26.2
# image: ghcr.io/sergelogvinov/cluster-autoscaler-amd64:dev
name: cluster-autoscaler
resources:
@@ -167,7 +167,7 @@ spec:
- --nodes=0:2:CPX31:NBG1:worker-nbg1
- --nodes=0:2:CPX31:FSN1:worker-fsn1
- --nodes=0:2:CPX31:HEL1:worker-hel1
- --v=1
- --v=2
env:
- name: HCLOUD_TOKEN
valueFrom:
@@ -179,6 +179,11 @@ spec:
secretKeyRef:
name: hcloud
key: network
- name: HCLOUD_SSH_KEY
valueFrom:
secretKeyRef:
name: hcloud
key: sshkey
- name: HCLOUD_IMAGE
valueFrom:
secretKeyRef:
@@ -189,8 +194,3 @@ spec:
secretKeyRef:
name: hcloud-init
key: worker
- name: HCLOUD_SSH_KEY
valueFrom:
secretKeyRef:
name: hcloud-init
key: ssh-key

View File

@@ -1,15 +1,4 @@
---
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: csi.hetzner.cloud
spec:
attachRequired: true
podInfoOnMount: true
volumeLifecycleModes:
- Persistent
fsGroupPolicy: File
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
@@ -190,17 +179,46 @@ subjects:
name: hcloud-csi-controller
namespace: kube-system
---
kind: StatefulSet
apiVersion: v1
kind: Service
metadata:
labels:
app: hcloud-csi-controller
name: hcloud-csi-controller-metrics
namespace: kube-system
spec:
ports:
- name: metrics
port: 9189
targetPort: metrics
selector:
app: hcloud-csi-controller
---
apiVersion: v1
kind: Service
metadata:
labels:
app: hcloud-csi
name: hcloud-csi-node-metrics
namespace: kube-system
spec:
ports:
- name: metrics
port: 9189
targetPort: metrics
selector:
app: hcloud-csi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: hcloud-csi-controller
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
app: hcloud-csi-controller
serviceName: hcloud-csi-controller
replicas: 1
template:
metadata:
labels:
@@ -212,102 +230,84 @@ spec:
tolerations:
- key: "node-role.kubernetes.io/control-plane"
effect: NoSchedule
serviceAccount: hcloud-csi-controller
containers:
- name: csi-attacher
image: k8s.gcr.io/sig-storage/csi-attacher:v3.2.1
volumeMounts:
- name: socket-dir
mountPath: /run/csi
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
- name: csi-resizer
image: k8s.gcr.io/sig-storage/csi-resizer:v1.2.0
volumeMounts:
- name: socket-dir
mountPath: /run/csi
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
- name: csi-provisioner
image: k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2
args:
- --feature-gates=Topology=true
- --default-fstype=ext4
volumeMounts:
- name: socket-dir
mountPath: /run/csi
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
- name: hcloud-csi-driver
image: hetznercloud/hcloud-csi-driver:2.0.0
imagePullPolicy: Always
command:
- /bin/hcloud-csi-driver-controller
env:
- name: CSI_ENDPOINT
value: unix:///run/csi/socket
- name: METRICS_ENDPOINT
value: 0.0.0.0:9189
- name: ENABLE_METRICS
value: "true"
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: HCLOUD_TOKEN
valueFrom:
secretKeyRef:
name: hcloud
key: token
volumeMounts:
- name: socket-dir
mountPath: /run/csi
ports:
- containerPort: 9189
name: metrics
- name: healthz
containerPort: 9808
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
timeoutSeconds: 3
periodSeconds: 2
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
- name: liveness-probe
imagePullPolicy: Always
image: k8s.gcr.io/sig-storage/livenessprobe:v2.3.0
volumeMounts:
- mountPath: /run/csi
name: socket-dir
- args:
- --default-fstype=ext4
image: registry.k8s.io/sig-storage/csi-attacher:v4.1.0
name: csi-attacher
volumeMounts:
- mountPath: /run/csi
name: socket-dir
- image: registry.k8s.io/sig-storage/csi-resizer:v1.7.0
name: csi-resizer
volumeMounts:
- mountPath: /run/csi
name: socket-dir
- args:
- --feature-gates=Topology=true
- --default-fstype=ext4
image: registry.k8s.io/sig-storage/csi-provisioner:v3.4.0
name: csi-provisioner
volumeMounts:
- mountPath: /run/csi
name: socket-dir
- command:
- /bin/hcloud-csi-driver-controller
env:
- name: CSI_ENDPOINT
value: unix:///run/csi/socket
- name: METRICS_ENDPOINT
value: 0.0.0.0:9189
- name: ENABLE_METRICS
value: "true"
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: HCLOUD_TOKEN
valueFrom:
secretKeyRef:
key: token
name: hcloud
image: hetznercloud/hcloud-csi-driver:2.2.0
imagePullPolicy: Always
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
periodSeconds: 2
timeoutSeconds: 3
name: hcloud-csi-driver
ports:
- containerPort: 9189
name: metrics
- containerPort: 9808
name: healthz
protocol: TCP
volumeMounts:
- mountPath: /run/csi
name: socket-dir
- image: registry.k8s.io/sig-storage/livenessprobe:v2.9.0
imagePullPolicy: Always
name: liveness-probe
volumeMounts:
- mountPath: /run/csi
name: socket-dir
serviceAccountName: hcloud-csi-controller
volumes:
- name: socket-dir
emptyDir: {}
- emptyDir: {}
name: socket-dir
---
kind: DaemonSet
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: hcloud-csi-node
namespace: kube-system
labels:
app: hcloud-csi
name: hcloud-csi-node
namespace: kube-system
spec:
selector:
matchLabels:
@@ -323,110 +323,89 @@ spec:
- effect: NoSchedule
operator: Exists
containers:
- name: csi-node-driver-registrar
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.2.0
args:
- --kubelet-registration-path=/var/lib/kubelet/plugins/csi.hetzner.cloud/socket
env:
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
volumeMounts:
- name: plugin-dir
mountPath: /run/csi
- name: registration-dir
mountPath: /registration
securityContext:
privileged: true
- name: hcloud-csi-driver
image: hetznercloud/hcloud-csi-driver:2.1.0
imagePullPolicy: Always
command:
- /bin/hcloud-csi-driver-node
env:
- name: CSI_ENDPOINT
value: unix:///run/csi/socket
- name: METRICS_ENDPOINT
value: 0.0.0.0:9189
- name: ENABLE_METRICS
value: "true"
volumeMounts:
- name: kubelet-dir
mountPath: /var/lib/kubelet
mountPropagation: "Bidirectional"
- name: plugin-dir
mountPath: /run/csi
- name: device-dir
mountPath: /dev
securityContext:
privileged: true
ports:
- containerPort: 9189
name: metrics
- name: healthz
containerPort: 9808
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
timeoutSeconds: 3
periodSeconds: 2
- name: liveness-probe
imagePullPolicy: Always
image: k8s.gcr.io/sig-storage/livenessprobe:v2.3.0
volumeMounts:
- mountPath: /run/csi
name: plugin-dir
- args:
- --kubelet-registration-path=/var/lib/kubelet/plugins/csi.hetzner.cloud/socket
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0
name: csi-node-driver-registrar
volumeMounts:
- mountPath: /run/csi
name: plugin-dir
- mountPath: /registration
name: registration-dir
- command:
- /bin/hcloud-csi-driver-node
env:
- name: CSI_ENDPOINT
value: unix:///run/csi/socket
- name: METRICS_ENDPOINT
value: 0.0.0.0:9189
- name: ENABLE_METRICS
value: "true"
image: hetznercloud/hcloud-csi-driver:2.2.0
imagePullPolicy: Always
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
periodSeconds: 2
timeoutSeconds: 3
name: hcloud-csi-driver
ports:
- containerPort: 9189
name: metrics
- containerPort: 9808
name: healthz
protocol: TCP
securityContext:
privileged: true
volumeMounts:
- mountPath: /var/lib/kubelet
mountPropagation: Bidirectional
name: kubelet-dir
- mountPath: /run/csi
name: plugin-dir
- mountPath: /dev
name: device-dir
- image: registry.k8s.io/sig-storage/livenessprobe:v2.9.0
imagePullPolicy: Always
name: liveness-probe
volumeMounts:
- mountPath: /run/csi
name: plugin-dir
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
volumes:
- name: kubelet-dir
hostPath:
path: /var/lib/kubelet
type: Directory
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins/csi.hetzner.cloud/
type: DirectoryOrCreate
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/
type: Directory
- name: device-dir
hostPath:
path: /dev
type: Directory
- hostPath:
path: /var/lib/kubelet
type: Directory
name: kubelet-dir
- hostPath:
path: /var/lib/kubelet/plugins/csi.hetzner.cloud/
type: DirectoryOrCreate
name: plugin-dir
- hostPath:
path: /var/lib/kubelet/plugins_registry/
type: Directory
name: registration-dir
- hostPath:
path: /dev
type: Directory
name: device-dir
---
apiVersion: v1
kind: Service
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: hcloud-csi-controller-metrics
namespace: kube-system
labels:
app: hcloud-csi
name: csi.hetzner.cloud
spec:
selector:
app: hcloud-csi-controller
ports:
- port: 9189
name: metrics
targetPort: metrics
---
apiVersion: v1
kind: Service
metadata:
name: hcloud-csi-node-metrics
namespace: kube-system
labels:
app: hcloud-csi
spec:
selector:
app: hcloud-csi
ports:
- port: 9189
name: metrics
targetPort: metrics
attachRequired: true
fsGroupPolicy: File
podInfoOnMount: true
volumeLifecycleModes:
- Persistent

View File

@@ -21,13 +21,15 @@ spec:
labels:
run: overprovisioning
spec:
# nodeSelector:
# node.cloudprovider.kubernetes.io/platform: hcloud
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node.kubernetes.io/instance-type
operator: Exists
# - key: node.kubernetes.io/instance-type
# operator: Exists
# - key: instance.hetzner.cloud/is-root-server
# operator: NotIn
# values:
@@ -50,3 +52,12 @@ spec:
resources:
requests:
cpu: "700m"
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault

View File

@@ -1,11 +0,0 @@
resource "local_file" "worker" {
content = templatefile("${path.module}/modules/templates/worker-as.yaml.tpl",
merge(var.kubernetes, {
lbv4 = local.ipv4_vip
labels = "project.io/node-pool=worker,hcloud/node-group=worker-as"
})
)
filename = "_cfgs/worker-as.yaml"
file_permission = "0600"
}

View File

@@ -15,10 +15,6 @@ locals {
]) : k.name => k }
}
output "instances" {
value = local.controlplanes
}
resource "hcloud_server" "controlplane" {
for_each = local.controlplanes
location = each.value.region
@@ -81,19 +77,25 @@ resource "local_file" "controlplane" {
for_each = local.controlplanes
content = templatefile("${path.module}/templates/controlplane.yaml.tpl",
merge(var.kubernetes, {
{
name = each.value.name
apiDomain = var.kubernetes["apiDomain"]
domain = var.kubernetes["domain"]
podSubnets = var.kubernetes["podSubnets"]
serviceSubnets = var.kubernetes["serviceSubnets"]
ipv4_vip = local.ipv4_vip
ipv4_local = each.value.ip
lbv4_local = local.lbv4_local
lbv4 = local.lbv4
lbv6 = local.lbv6
nodeSubnets = hcloud_network_subnet.core.ip_range
hcloud_network = hcloud_network.main.id
hcloud_token = var.hcloud_token
hcloud_image = data.hcloud_image.talos["amd64"].id
hcloud_sshkey = hcloud_ssh_key.infra.id
robot_user = var.robot_user
robot_password = var.robot_password
})
}
)
filename = "_cfgs/${each.value.name}.yaml"
file_permission = "0600"

View File

@@ -1,22 +1,53 @@
module "web" {
source = "./modules/worker"
locals {
web_prefix = "web"
web_labels = "project.io/node-pool=web"
for_each = var.instances
location = each.key
labels = merge(var.tags, { label = "web" })
network = hcloud_network.main.id
subnet = hcloud_network_subnet.core.ip_range
vm_name = "web-${each.key}-"
vm_items = lookup(each.value, "web_count", 0)
vm_type = lookup(each.value, "web_type", "cx11")
vm_image = data.hcloud_image.talos.id
vm_ip_start = (3 + try(index(var.regions, each.key), 0)) * 10
vm_security_group = [hcloud_firewall.web.id]
vm_params = merge(var.kubernetes, {
lbv4 = local.ipv4_vip
labels = "project.io/node-pool=web,hcloud/node-group=web-${each.key}"
})
web = { for k in flatten([
for regions in var.regions : [
for inx in range(lookup(try(var.instances[regions], {}), "web_count", 0)) : {
name : "${local.web_prefix}-${regions}-${1 + inx}"
image : data.hcloud_image.talos[startswith(lookup(try(var.instances[regions], {}), "web_type", "cpx11"), "ca") ? "arm64" : "amd64"].id
region : regions
type : lookup(try(var.instances[regions], {}), "web_type", "cpx11")
ip : cidrhost(hcloud_network_subnet.core.ip_range, 40 + 10 * index(var.regions, regions) + inx)
}
]
]) : k.name => k }
}
resource "hcloud_server" "web" {
for_each = local.web
location = each.value.region
name = each.value.name
image = each.value.image
server_type = each.value.type
ssh_keys = [hcloud_ssh_key.infra.id]
keep_disk = true
labels = merge(var.tags, { label = "web" })
user_data = templatefile("${path.module}/templates/worker.yaml.tpl",
merge(var.kubernetes, {
name = each.value.name
ipv4 = each.value.ip
lbv4 = local.ipv4_vip
nodeSubnets = hcloud_network_subnet.core.ip_range
labels = "${local.web_labels},hcloud/node-group=web-${each.value.region}"
})
)
firewall_ids = [hcloud_firewall.web.id]
network {
network_id = hcloud_network.main.id
ip = each.value.ip
}
lifecycle {
ignore_changes = [
image,
server_type,
user_data,
ssh_keys,
]
}
}

View File

@@ -0,0 +1,13 @@
resource "local_sensitive_file" "worker-as" {
content = templatefile("${path.module}/templates/worker-as.yaml.tpl",
merge(var.kubernetes, {
lbv4 = local.ipv4_vip
nodeSubnets = var.vpc_main_cidr
labels = "project.io/node-pool=worker,hcloud/node-group=worker-as"
})
)
filename = "_cfgs/worker-as.yaml"
file_permission = "0600"
}

View File

@@ -0,0 +1,75 @@
locals {
worker_prefix = "worker"
worker_labels = "project.io/node-pool=worker"
worker = { for k in flatten([
for regions in var.regions : [
for inx in range(lookup(try(var.instances[regions], {}), "worker_count", 0)) : {
name : "${local.worker_prefix}-${regions}-${1 + inx}"
image : data.hcloud_image.talos[startswith(lookup(try(var.instances[regions], {}), "worker_type", "cpx11"), "ca") ? "arm64" : "amd64"].id
region : regions
type : lookup(try(var.instances[regions], {}), "worker_type", "cpx11")
ip : cidrhost(hcloud_network_subnet.core.ip_range, 80 + 10 * index(var.regions, regions) + inx)
}
]
]) : k.name => k }
}
resource "hcloud_server" "worker" {
for_each = local.worker
location = each.value.region
name = each.value.name
image = each.value.image
server_type = each.value.type
ssh_keys = [hcloud_ssh_key.infra.id]
keep_disk = true
labels = merge(var.tags, { label = "worker" })
user_data = templatefile("${path.module}/templates/worker.yaml.tpl",
merge(var.kubernetes, {
name = each.value.name
ipv4 = each.value.ip
lbv4 = local.ipv4_vip
nodeSubnets = hcloud_network_subnet.core.ip_range
labels = "${local.worker_labels},hcloud/node-group=worker-${each.value.region}"
})
)
firewall_ids = [hcloud_firewall.worker.id]
network {
network_id = hcloud_network.main.id
ip = each.value.ip
}
lifecycle {
ignore_changes = [
image,
server_type,
user_data,
ssh_keys,
]
}
}
# module "worker" {
# source = "./modules/worker"
# for_each = var.instances
# location = each.key
# labels = merge(var.tags, { label = "worker" })
# network = hcloud_network.main.id
# subnet = hcloud_network_subnet.core.ip_range
# vm_name = "worker-${each.key}-"
# vm_items = lookup(each.value, "worker_count", 0)
# vm_type = lookup(each.value, "worker_type", "cx11")
# vm_image = data.hcloud_image.talos.id
# vm_ip_start = (6 + try(index(var.regions, each.key), 0)) * 10
# vm_security_group = [hcloud_firewall.worker.id]
# vm_params = merge(var.kubernetes, {
# lbv4 = local.ipv4_vip
# labels = "project.io/node-pool=worker,hcloud/node-group=worker-${each.key}"
# })
# }

View File

@@ -1,22 +0,0 @@
module "worker" {
source = "./modules/worker"
for_each = var.instances
location = each.key
labels = merge(var.tags, { label = "worker" })
network = hcloud_network.main.id
subnet = hcloud_network_subnet.core.ip_range
vm_name = "worker-${each.key}-"
vm_items = lookup(each.value, "worker_count", 0)
vm_type = lookup(each.value, "worker_type", "cx11")
vm_image = data.hcloud_image.talos.id
vm_ip_start = (6 + try(index(var.regions, each.key), 0)) * 10
vm_security_group = [hcloud_firewall.worker.id]
vm_params = merge(var.kubernetes, {
lbv4 = local.ipv4_vip
labels = "project.io/node-pool=worker,hcloud/node-group=worker-${each.key}"
})
}

View File

@@ -7,20 +7,5 @@ output "controlplane_endpoint" {
output "controlplane_firstnode" {
description = "Kubernetes controlplane first node"
value = try(hcloud_server.controlplane[0].ipv4_address, "none")
}
output "controlplane_nodes" {
description = "Kubernetes controlplane nodes"
value = [
for s in hcloud_server.controlplane[*] :
{
name = s.name
ipv4_address = s.ipv4_address
ipv6_address = s.ipv6_address
zone = "hetzner"
location = s.location
params = ""
}
]
value = try(flatten([for c in hcloud_server.controlplane : c.ipv4_address])[0], "127.0.0.1")
}

View File

@@ -110,6 +110,7 @@ cluster:
user: ${base64encode(robot_user)}
password: ${base64encode(robot_password)}
image: ${base64encode(hcloud_image)}
sshkey: ${base64encode(hcloud_sshkey)}
externalCloudProvider:
enabled: true
manifests:

View File

@@ -1,13 +0,0 @@
data:
hosts: |
# static hosts
169.254.2.53 dns.local
# terraform
%{ for node in masters ~}
${format("%-24s",node.ipv4_address)} ${node.name}
${format("%-24s",node.ipv6_address)} ${node.name}
%{ endfor ~}
%{ for node in web ~}
${format("%-24s",node.ipv4_address)} ${node.name}
${format("%-24s",node.ipv6_address)} ${node.name}
%{ endfor ~}

View File

@@ -0,0 +1,63 @@
version: v1alpha1
debug: false
persist: true
machine:
type: worker
token: ${tokenMachine}
ca:
crt: ${caMachine}
certSANs: []
nodeLabels:
node.kubernetes.io/disktype: ssd
kubelet:
extraArgs:
cloud-provider: external
rotate-server-certificates: true
node-labels: "${labels}"
clusterDNS:
- 169.254.2.53
- ${cidrhost(split(",",serviceSubnets)[0], 10)}
nodeIP:
validSubnets: ${format("%#v",split(",",nodeSubnets))}
network:
interfaces:
- interface: dummy0
addresses:
- 169.254.2.53/32
extraHostEntries:
- ip: ${lbv4}
aliases:
- ${apiDomain}
install:
wipe: false
sysctls:
net.core.somaxconn: 65535
net.core.netdev_max_backlog: 4096
systemDiskEncryption:
state:
provider: luks2
keys:
- nodeID: {}
slot: 0
ephemeral:
provider: luks2
keys:
- nodeID: {}
slot: 0
options:
- no_read_workqueue
- no_write_workqueue
cluster:
id: ${clusterID}
secret: ${clusterSecret}
controlPlane:
endpoint: https://${apiDomain}:6443
clusterName: ${clusterName}
discovery:
enabled: true
network:
dnsDomain: ${domain}
serviceSubnets: ${format("%#v",split(",",serviceSubnets))}
token: ${token}
ca:
crt: ${ca}

View File

@@ -0,0 +1,64 @@
version: v1alpha1
debug: false
persist: true
machine:
type: worker
token: ${tokenMachine}
ca:
crt: ${caMachine}
certSANs: []
nodeLabels:
node.kubernetes.io/disktype: ssd
kubelet:
extraArgs:
cloud-provider: external
rotate-server-certificates: true
node-labels: "${labels}"
clusterDNS:
- 169.254.2.53
- ${cidrhost(split(",",serviceSubnets)[0], 10)}
nodeIP:
validSubnets: ${format("%#v",split(",",nodeSubnets))}
network:
hostname: "${name}"
interfaces:
- interface: dummy0
addresses:
- 169.254.2.53/32
extraHostEntries:
- ip: ${lbv4}
aliases:
- ${apiDomain}
install:
wipe: false
sysctls:
net.core.somaxconn: 65535
net.core.netdev_max_backlog: 4096
systemDiskEncryption:
state:
provider: luks2
keys:
- nodeID: {}
slot: 0
ephemeral:
provider: luks2
keys:
- nodeID: {}
slot: 0
options:
- no_read_workqueue
- no_write_workqueue
cluster:
id: ${clusterID}
secret: ${clusterSecret}
controlPlane:
endpoint: https://${apiDomain}:6443
clusterName: ${clusterName}
discovery:
enabled: true
network:
dnsDomain: ${domain}
serviceSubnets: ${format("%#v",split(",",serviceSubnets))}
token: ${token}
ca:
crt: ${ca}

View File

@@ -30,9 +30,8 @@ variable "kubernetes" {
default = {
podSubnets = "10.32.0.0/12,fd40:10:32::/102"
serviceSubnets = "10.200.0.0/22,fd40:10:200::/112"
nodeSubnets = "192.168.0.0/16"
domain = "cluster.local"
apiDomain = "api.cluster.local"
domain = "cluster.local"
clusterName = "talos-k8s-hetzner"
tokenMachine = ""
caMachine = ""
@@ -72,7 +71,7 @@ variable "controlplane" {
type = "cpx11",
},
"fsn1" = {
count = 1,
count = 0,
type = "cpx11",
},
"hel1" = {