full setup

This commit is contained in:
Serge Logvinov
2023-04-15 21:37:45 +03:00
parent 27ad8a011c
commit c79bb4e511
18 changed files with 499 additions and 325 deletions

View File

@@ -25,10 +25,10 @@ Having a single Kubernetes control plane that spans multiple cloud providers can
| [Azure](azure) | 1.3.4 | CCM,CSI,Autoscaler | many regions, many zones | ✓ | ✓ | | [Azure](azure) | 1.3.4 | CCM,CSI,Autoscaler | many regions, many zones | ✓ | ✓ |
| [Exoscale](exoscale) | 1.3.0 | CCM,Autoscaler | many regions | ✗ | | | [Exoscale](exoscale) | 1.3.0 | CCM,Autoscaler | many regions | ✗ | |
| [GCP](gcp-zonal) | 1.3.4 | CCM,CSI,Autoscaler | one region, many zones | ✓ | ✓ | | [GCP](gcp-zonal) | 1.3.4 | CCM,CSI,Autoscaler | one region, many zones | ✓ | ✓ |
| [Hetzner](hetzner) | 1.3.4 | CCM,CSI,Autoscaler | many regions | ✗ | ✓ | | [Hetzner](hetzner) | 1.4.0 | CCM,CSI,Autoscaler | many regions, one network zone | ✗ | ✓ |
| [Openstack](openstack) | 1.3.4 | CCM,CSI | many regions, many zones | ✓ | ✓ | | [Openstack](openstack) | 1.3.4 | CCM,CSI | many regions, many zones | ✓ | ✓ |
| [Oracle](oracle) | 1.3.4 | CCM,~~CSI~~,Autoscaler | one region, many zones | ✓ | ✓ | | [Oracle](oracle) | 1.3.4 | CCM,~~CSI~~,Autoscaler | one region, many zones | ✓ | ✓ |
| [Proxmox](proxmox) | 1.3.4 | TalosCCM | one region, one zones | ✓ | ✓ | | [Proxmox](proxmox) | 1.3.4 | CCM | one region, one zones | ✓ | ✓ |
| [Scaleway](scaleway) | 1.3.4 | CCM,CSI | one region | ✓ | ✓ | | [Scaleway](scaleway) | 1.3.4 | CCM,CSI | one region | ✓ | ✓ |
## Known issues ## Known issues

View File

@@ -1,14 +1,18 @@
ENDPOINT:=api.cluster.local CLUSTERNAME := "talos-k8s-hetzner"
# ENDPOINT:=${shell terraform output -raw controlplane_endpoint 2>/dev/null} CPFIRST := ${shell terraform output -raw controlplane_firstnode 2>/dev/null}
CLUSTERNAME:="talos-k8s-hetzner" ENDPOINT := ${shell terraform output -raw controlplane_endpoint 2>/dev/null}
ifneq (,$(findstring Warning,${ENDPOINT}))
ENDPOINT := api.cluster.local
endif
help: help:
@awk 'BEGIN {FS = ":.*?## "} /^[0-9a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) @awk 'BEGIN {FS = ":.*?## "} /^[0-9a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
clean: ## Clean all clean: ## Clean all
terraform destroy -auto-approve
rm -rf _cfgs rm -rf _cfgs
rm -f kubeconfig rm -f kubeconfig terraform.tfvars.json
prepare: prepare:
@[ -f ~/.ssh/terraform ] || ssh-keygen -f ~/.ssh/terraform -N '' -t rsa @[ -f ~/.ssh/terraform ] || ssh-keygen -f ~/.ssh/terraform -N '' -t rsa
@@ -23,10 +27,8 @@ create-config: ## Genereate talos configs
talosctl --talosconfig _cfgs/talosconfig config endpoint ${ENDPOINT} talosctl --talosconfig _cfgs/talosconfig config endpoint ${ENDPOINT}
create-templates: create-templates:
@yq ea -P '. as $$item ireduce ({}; . * $$item )' _cfgs/controlplane.yaml templates/controlplane.yaml.tpl > templates/controlplane.yaml @echo 'podSubnets: "10.32.0.0/12,fd40:10:32::/102"' > _cfgs/tfstate.vars
@echo 'podSubnets: "10.32.0.0/12,fd00:10:32::/102"' > _cfgs/tfstate.vars
@echo 'serviceSubnets: "10.200.0.0/22,fd40:10:200::/112"' >> _cfgs/tfstate.vars @echo 'serviceSubnets: "10.200.0.0/22,fd40:10:200::/112"' >> _cfgs/tfstate.vars
@echo 'nodeSubnets: "172.16.0.0/12"' >> _cfgs/tfstate.vars
@echo 'apiDomain: api.cluster.local' >> _cfgs/tfstate.vars @echo 'apiDomain: api.cluster.local' >> _cfgs/tfstate.vars
@yq eval '.cluster.network.dnsDomain' _cfgs/controlplane.yaml | awk '{ print "domain: "$$1}' >> _cfgs/tfstate.vars @yq eval '.cluster.network.dnsDomain' _cfgs/controlplane.yaml | awk '{ print "domain: "$$1}' >> _cfgs/tfstate.vars
@yq eval '.cluster.clusterName' _cfgs/controlplane.yaml | awk '{ print "clusterName: "$$1}' >> _cfgs/tfstate.vars @yq eval '.cluster.clusterName' _cfgs/controlplane.yaml | awk '{ print "clusterName: "$$1}' >> _cfgs/tfstate.vars
@@ -40,8 +42,8 @@ create-templates:
@yq eval -o=json '{"kubernetes": .}' _cfgs/tfstate.vars > terraform.tfvars.json @yq eval -o=json '{"kubernetes": .}' _cfgs/tfstate.vars > terraform.tfvars.json
create-controlplane-bootstrap: create-controlplane-bootstrap:
talosctl --talosconfig _cfgs/talosconfig config endpoint ${ENDPOINT} talosctl --talosconfig _cfgs/talosconfig config endpoint ${CPFIRST}
talosctl --talosconfig _cfgs/talosconfig --nodes 172.16.0.11 bootstrap talosctl --talosconfig _cfgs/talosconfig --nodes ${CPFIRST} bootstrap
create-controlplane: ## Bootstrap first controlplane node create-controlplane: ## Bootstrap first controlplane node
terraform apply -auto-approve -target=hcloud_server.controlplane terraform apply -auto-approve -target=hcloud_server.controlplane
@@ -51,16 +53,10 @@ create-infrastructure: ## Bootstrap all nodes
terraform apply terraform apply
create-kubeconfig: ## Prepare kubeconfig create-kubeconfig: ## Prepare kubeconfig
talosctl --talosconfig _cfgs/talosconfig --nodes 172.16.0.11 kubeconfig . talosctl --talosconfig _cfgs/talosconfig --nodes ${CPFIRST} kubeconfig .
kubectl --kubeconfig=kubeconfig config set clusters.${CLUSTERNAME}.server https://${ENDPOINT}:6443 kubectl --kubeconfig=kubeconfig config set clusters.${CLUSTERNAME}.server https://${ENDPOINT}:6443
kubectl --kubeconfig=kubeconfig config set-context --current --namespace=kube-system kubectl --kubeconfig=kubeconfig config set-context --current --namespace=kube-system
create-deployments:
helm template --namespace=kube-system --version=1.12.7 -f deployments/cilium.yaml cilium \
cilium/cilium > deployments/cilium-result.yaml
helm template --namespace=ingress-nginx --version=4.4.0 -f deployments/ingress.yaml ingress-nginx \
ingress-nginx/ingress-nginx > deployments/ingress-result.yaml
create-secrets: create-secrets:
dd if=/dev/urandom bs=1 count=16 2>/dev/null | hexdump -e '"%00x"' > hcloud-csi-secret.secret dd if=/dev/urandom bs=1 count=16 2>/dev/null | hexdump -e '"%00x"' > hcloud-csi-secret.secret
kubectl --kubeconfig=kubeconfig create secret generic hcloud-csi-secret --from-file=encryptionPassphrase=hcloud-csi-secret.secret kubectl --kubeconfig=kubeconfig create secret generic hcloud-csi-secret --from-file=encryptionPassphrase=hcloud-csi-secret.secret

View File

@@ -155,7 +155,8 @@ Cluster Autoscaler for [Hetzner Cloud](https://github.com/kubernetes/autoscaler/
Create/deploy autoscaler: Create/deploy autoscaler:
```shell ```shell
kubectl -n kube-system create secret generic hcloud-init --from-file=worker=_cfgs/worker-as.yaml.base64 --from-literal=ssh-key=${SSHID} --from-literal=image=${IMAGEID} cat _cfgs/worker-as.yaml | base64 > _cfgs/worker-as.yaml.base64
kubectl -n kube-system create secret generic hcloud-init --from-file=worker=_cfgs/worker-as.yaml.base64 --from-literal=image="os=talos"
kubectl apply -f deployments/hcloud-autoscaler.yaml kubectl apply -f deployments/hcloud-autoscaler.yaml
``` ```

View File

@@ -139,14 +139,14 @@ spec:
spec: spec:
serviceAccountName: cluster-autoscaler serviceAccountName: cluster-autoscaler
nodeSelector: nodeSelector:
node-role.kubernetes.io/control-plane: "" # node-role.kubernetes.io/control-plane: ""
node.cloudprovider.kubernetes.io/platform: hcloud node.cloudprovider.kubernetes.io/platform: hcloud
tolerations: tolerations:
- key: node-role.kubernetes.io/control-plane - key: node-role.kubernetes.io/control-plane
effect: NoSchedule effect: NoSchedule
containers: containers:
- name: cluster-autoscaler - name: cluster-autoscaler
image: k8s.gcr.io/autoscaling/cluster-autoscaler:v1.25.0 image: registry.k8s.io/autoscaling/cluster-autoscaler:v1.26.2
# image: ghcr.io/sergelogvinov/cluster-autoscaler-amd64:dev # image: ghcr.io/sergelogvinov/cluster-autoscaler-amd64:dev
name: cluster-autoscaler name: cluster-autoscaler
resources: resources:
@@ -167,7 +167,7 @@ spec:
- --nodes=0:2:CPX31:NBG1:worker-nbg1 - --nodes=0:2:CPX31:NBG1:worker-nbg1
- --nodes=0:2:CPX31:FSN1:worker-fsn1 - --nodes=0:2:CPX31:FSN1:worker-fsn1
- --nodes=0:2:CPX31:HEL1:worker-hel1 - --nodes=0:2:CPX31:HEL1:worker-hel1
- --v=1 - --v=2
env: env:
- name: HCLOUD_TOKEN - name: HCLOUD_TOKEN
valueFrom: valueFrom:
@@ -179,6 +179,11 @@ spec:
secretKeyRef: secretKeyRef:
name: hcloud name: hcloud
key: network key: network
- name: HCLOUD_SSH_KEY
valueFrom:
secretKeyRef:
name: hcloud
key: sshkey
- name: HCLOUD_IMAGE - name: HCLOUD_IMAGE
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
@@ -189,8 +194,3 @@ spec:
secretKeyRef: secretKeyRef:
name: hcloud-init name: hcloud-init
key: worker key: worker
- name: HCLOUD_SSH_KEY
valueFrom:
secretKeyRef:
name: hcloud-init
key: ssh-key

View File

@@ -1,15 +1,4 @@
--- ---
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: csi.hetzner.cloud
spec:
attachRequired: true
podInfoOnMount: true
volumeLifecycleModes:
- Persistent
fsGroupPolicy: File
---
kind: StorageClass kind: StorageClass
apiVersion: storage.k8s.io/v1 apiVersion: storage.k8s.io/v1
metadata: metadata:
@@ -190,17 +179,46 @@ subjects:
name: hcloud-csi-controller name: hcloud-csi-controller
namespace: kube-system namespace: kube-system
--- ---
kind: StatefulSet apiVersion: v1
kind: Service
metadata:
labels:
app: hcloud-csi-controller
name: hcloud-csi-controller-metrics
namespace: kube-system
spec:
ports:
- name: metrics
port: 9189
targetPort: metrics
selector:
app: hcloud-csi-controller
---
apiVersion: v1
kind: Service
metadata:
labels:
app: hcloud-csi
name: hcloud-csi-node-metrics
namespace: kube-system
spec:
ports:
- name: metrics
port: 9189
targetPort: metrics
selector:
app: hcloud-csi
---
apiVersion: apps/v1 apiVersion: apps/v1
kind: Deployment
metadata: metadata:
name: hcloud-csi-controller name: hcloud-csi-controller
namespace: kube-system namespace: kube-system
spec: spec:
replicas: 1
selector: selector:
matchLabels: matchLabels:
app: hcloud-csi-controller app: hcloud-csi-controller
serviceName: hcloud-csi-controller
replicas: 1
template: template:
metadata: metadata:
labels: labels:
@@ -212,102 +230,84 @@ spec:
tolerations: tolerations:
- key: "node-role.kubernetes.io/control-plane" - key: "node-role.kubernetes.io/control-plane"
effect: NoSchedule effect: NoSchedule
serviceAccount: hcloud-csi-controller
containers: containers:
- name: csi-attacher - args:
image: k8s.gcr.io/sig-storage/csi-attacher:v3.2.1 - --default-fstype=ext4
volumeMounts: image: registry.k8s.io/sig-storage/csi-attacher:v4.1.0
- name: socket-dir name: csi-attacher
mountPath: /run/csi volumeMounts:
securityContext: - mountPath: /run/csi
privileged: true name: socket-dir
capabilities: - image: registry.k8s.io/sig-storage/csi-resizer:v1.7.0
add: ["SYS_ADMIN"] name: csi-resizer
allowPrivilegeEscalation: true volumeMounts:
- name: csi-resizer - mountPath: /run/csi
image: k8s.gcr.io/sig-storage/csi-resizer:v1.2.0 name: socket-dir
volumeMounts: - args:
- name: socket-dir - --feature-gates=Topology=true
mountPath: /run/csi - --default-fstype=ext4
securityContext: image: registry.k8s.io/sig-storage/csi-provisioner:v3.4.0
privileged: true name: csi-provisioner
capabilities: volumeMounts:
add: ["SYS_ADMIN"] - mountPath: /run/csi
allowPrivilegeEscalation: true name: socket-dir
- name: csi-provisioner - command:
image: k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2 - /bin/hcloud-csi-driver-controller
args: env:
- --feature-gates=Topology=true - name: CSI_ENDPOINT
- --default-fstype=ext4 value: unix:///run/csi/socket
volumeMounts: - name: METRICS_ENDPOINT
- name: socket-dir value: 0.0.0.0:9189
mountPath: /run/csi - name: ENABLE_METRICS
securityContext: value: "true"
privileged: true - name: KUBE_NODE_NAME
capabilities: valueFrom:
add: ["SYS_ADMIN"] fieldRef:
allowPrivilegeEscalation: true apiVersion: v1
- name: hcloud-csi-driver fieldPath: spec.nodeName
image: hetznercloud/hcloud-csi-driver:2.0.0 - name: HCLOUD_TOKEN
imagePullPolicy: Always valueFrom:
command: secretKeyRef:
- /bin/hcloud-csi-driver-controller key: token
env: name: hcloud
- name: CSI_ENDPOINT image: hetznercloud/hcloud-csi-driver:2.2.0
value: unix:///run/csi/socket imagePullPolicy: Always
- name: METRICS_ENDPOINT livenessProbe:
value: 0.0.0.0:9189 failureThreshold: 5
- name: ENABLE_METRICS httpGet:
value: "true" path: /healthz
- name: KUBE_NODE_NAME port: healthz
valueFrom: initialDelaySeconds: 10
fieldRef: periodSeconds: 2
apiVersion: v1 timeoutSeconds: 3
fieldPath: spec.nodeName name: hcloud-csi-driver
- name: HCLOUD_TOKEN ports:
valueFrom: - containerPort: 9189
secretKeyRef: name: metrics
name: hcloud - containerPort: 9808
key: token name: healthz
volumeMounts: protocol: TCP
- name: socket-dir volumeMounts:
mountPath: /run/csi - mountPath: /run/csi
ports: name: socket-dir
- containerPort: 9189 - image: registry.k8s.io/sig-storage/livenessprobe:v2.9.0
name: metrics imagePullPolicy: Always
- name: healthz name: liveness-probe
containerPort: 9808 volumeMounts:
protocol: TCP - mountPath: /run/csi
livenessProbe: name: socket-dir
failureThreshold: 5 serviceAccountName: hcloud-csi-controller
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
timeoutSeconds: 3
periodSeconds: 2
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
- name: liveness-probe
imagePullPolicy: Always
image: k8s.gcr.io/sig-storage/livenessprobe:v2.3.0
volumeMounts:
- mountPath: /run/csi
name: socket-dir
volumes: volumes:
- name: socket-dir - emptyDir: {}
emptyDir: {} name: socket-dir
--- ---
kind: DaemonSet
apiVersion: apps/v1 apiVersion: apps/v1
kind: DaemonSet
metadata: metadata:
name: hcloud-csi-node
namespace: kube-system
labels: labels:
app: hcloud-csi app: hcloud-csi
name: hcloud-csi-node
namespace: kube-system
spec: spec:
selector: selector:
matchLabels: matchLabels:
@@ -323,110 +323,89 @@ spec:
- effect: NoSchedule - effect: NoSchedule
operator: Exists operator: Exists
containers: containers:
- name: csi-node-driver-registrar - args:
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.2.0 - --kubelet-registration-path=/var/lib/kubelet/plugins/csi.hetzner.cloud/socket
args: image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0
- --kubelet-registration-path=/var/lib/kubelet/plugins/csi.hetzner.cloud/socket name: csi-node-driver-registrar
env: volumeMounts:
- name: KUBE_NODE_NAME - mountPath: /run/csi
valueFrom: name: plugin-dir
fieldRef: - mountPath: /registration
apiVersion: v1 name: registration-dir
fieldPath: spec.nodeName - command:
volumeMounts: - /bin/hcloud-csi-driver-node
- name: plugin-dir env:
mountPath: /run/csi - name: CSI_ENDPOINT
- name: registration-dir value: unix:///run/csi/socket
mountPath: /registration - name: METRICS_ENDPOINT
securityContext: value: 0.0.0.0:9189
privileged: true - name: ENABLE_METRICS
- name: hcloud-csi-driver value: "true"
image: hetznercloud/hcloud-csi-driver:2.1.0 image: hetznercloud/hcloud-csi-driver:2.2.0
imagePullPolicy: Always imagePullPolicy: Always
command: livenessProbe:
- /bin/hcloud-csi-driver-node failureThreshold: 5
env: httpGet:
- name: CSI_ENDPOINT path: /healthz
value: unix:///run/csi/socket port: healthz
- name: METRICS_ENDPOINT initialDelaySeconds: 10
value: 0.0.0.0:9189 periodSeconds: 2
- name: ENABLE_METRICS timeoutSeconds: 3
value: "true" name: hcloud-csi-driver
volumeMounts: ports:
- name: kubelet-dir - containerPort: 9189
mountPath: /var/lib/kubelet name: metrics
mountPropagation: "Bidirectional" - containerPort: 9808
- name: plugin-dir name: healthz
mountPath: /run/csi protocol: TCP
- name: device-dir securityContext:
mountPath: /dev privileged: true
securityContext: volumeMounts:
privileged: true - mountPath: /var/lib/kubelet
ports: mountPropagation: Bidirectional
- containerPort: 9189 name: kubelet-dir
name: metrics - mountPath: /run/csi
- name: healthz name: plugin-dir
containerPort: 9808 - mountPath: /dev
protocol: TCP name: device-dir
livenessProbe: - image: registry.k8s.io/sig-storage/livenessprobe:v2.9.0
failureThreshold: 5 imagePullPolicy: Always
httpGet: name: liveness-probe
path: /healthz volumeMounts:
port: healthz - mountPath: /run/csi
initialDelaySeconds: 10 name: plugin-dir
timeoutSeconds: 3 tolerations:
periodSeconds: 2 - effect: NoExecute
- name: liveness-probe operator: Exists
imagePullPolicy: Always - effect: NoSchedule
image: k8s.gcr.io/sig-storage/livenessprobe:v2.3.0 operator: Exists
volumeMounts: - key: CriticalAddonsOnly
- mountPath: /run/csi operator: Exists
name: plugin-dir
volumes: volumes:
- name: kubelet-dir - hostPath:
hostPath: path: /var/lib/kubelet
path: /var/lib/kubelet type: Directory
type: Directory name: kubelet-dir
- name: plugin-dir - hostPath:
hostPath: path: /var/lib/kubelet/plugins/csi.hetzner.cloud/
path: /var/lib/kubelet/plugins/csi.hetzner.cloud/ type: DirectoryOrCreate
type: DirectoryOrCreate name: plugin-dir
- name: registration-dir - hostPath:
hostPath: path: /var/lib/kubelet/plugins_registry/
path: /var/lib/kubelet/plugins_registry/ type: Directory
type: Directory name: registration-dir
- name: device-dir - hostPath:
hostPath: path: /dev
path: /dev type: Directory
type: Directory name: device-dir
--- ---
apiVersion: v1 apiVersion: storage.k8s.io/v1
kind: Service kind: CSIDriver
metadata: metadata:
name: hcloud-csi-controller-metrics name: csi.hetzner.cloud
namespace: kube-system
labels:
app: hcloud-csi
spec: spec:
selector: attachRequired: true
app: hcloud-csi-controller fsGroupPolicy: File
ports: podInfoOnMount: true
- port: 9189 volumeLifecycleModes:
name: metrics - Persistent
targetPort: metrics
---
apiVersion: v1
kind: Service
metadata:
name: hcloud-csi-node-metrics
namespace: kube-system
labels:
app: hcloud-csi
spec:
selector:
app: hcloud-csi
ports:
- port: 9189
name: metrics
targetPort: metrics

View File

@@ -21,13 +21,15 @@ spec:
labels: labels:
run: overprovisioning run: overprovisioning
spec: spec:
# nodeSelector:
# node.cloudprovider.kubernetes.io/platform: hcloud
affinity: affinity:
nodeAffinity: nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution: requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms: nodeSelectorTerms:
- matchExpressions: - matchExpressions:
- key: node.kubernetes.io/instance-type # - key: node.kubernetes.io/instance-type
operator: Exists # operator: Exists
# - key: instance.hetzner.cloud/is-root-server # - key: instance.hetzner.cloud/is-root-server
# operator: NotIn # operator: NotIn
# values: # values:
@@ -50,3 +52,12 @@ spec:
resources: resources:
requests: requests:
cpu: "700m" cpu: "700m"
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault

View File

@@ -1,11 +0,0 @@
resource "local_file" "worker" {
content = templatefile("${path.module}/modules/templates/worker-as.yaml.tpl",
merge(var.kubernetes, {
lbv4 = local.ipv4_vip
labels = "project.io/node-pool=worker,hcloud/node-group=worker-as"
})
)
filename = "_cfgs/worker-as.yaml"
file_permission = "0600"
}

View File

@@ -15,10 +15,6 @@ locals {
]) : k.name => k } ]) : k.name => k }
} }
output "instances" {
value = local.controlplanes
}
resource "hcloud_server" "controlplane" { resource "hcloud_server" "controlplane" {
for_each = local.controlplanes for_each = local.controlplanes
location = each.value.region location = each.value.region
@@ -81,19 +77,25 @@ resource "local_file" "controlplane" {
for_each = local.controlplanes for_each = local.controlplanes
content = templatefile("${path.module}/templates/controlplane.yaml.tpl", content = templatefile("${path.module}/templates/controlplane.yaml.tpl",
merge(var.kubernetes, { {
name = each.value.name name = each.value.name
apiDomain = var.kubernetes["apiDomain"]
domain = var.kubernetes["domain"]
podSubnets = var.kubernetes["podSubnets"]
serviceSubnets = var.kubernetes["serviceSubnets"]
ipv4_vip = local.ipv4_vip ipv4_vip = local.ipv4_vip
ipv4_local = each.value.ip ipv4_local = each.value.ip
lbv4_local = local.lbv4_local lbv4_local = local.lbv4_local
lbv4 = local.lbv4 lbv4 = local.lbv4
lbv6 = local.lbv6 lbv6 = local.lbv6
nodeSubnets = hcloud_network_subnet.core.ip_range
hcloud_network = hcloud_network.main.id hcloud_network = hcloud_network.main.id
hcloud_token = var.hcloud_token hcloud_token = var.hcloud_token
hcloud_image = data.hcloud_image.talos["amd64"].id hcloud_image = data.hcloud_image.talos["amd64"].id
hcloud_sshkey = hcloud_ssh_key.infra.id
robot_user = var.robot_user robot_user = var.robot_user
robot_password = var.robot_password robot_password = var.robot_password
}) }
) )
filename = "_cfgs/${each.value.name}.yaml" filename = "_cfgs/${each.value.name}.yaml"
file_permission = "0600" file_permission = "0600"

View File

@@ -1,22 +1,53 @@
module "web" { locals {
source = "./modules/worker" web_prefix = "web"
web_labels = "project.io/node-pool=web"
for_each = var.instances web = { for k in flatten([
location = each.key for regions in var.regions : [
labels = merge(var.tags, { label = "web" }) for inx in range(lookup(try(var.instances[regions], {}), "web_count", 0)) : {
network = hcloud_network.main.id name : "${local.web_prefix}-${regions}-${1 + inx}"
subnet = hcloud_network_subnet.core.ip_range image : data.hcloud_image.talos[startswith(lookup(try(var.instances[regions], {}), "web_type", "cpx11"), "ca") ? "arm64" : "amd64"].id
region : regions
vm_name = "web-${each.key}-" type : lookup(try(var.instances[regions], {}), "web_type", "cpx11")
vm_items = lookup(each.value, "web_count", 0) ip : cidrhost(hcloud_network_subnet.core.ip_range, 40 + 10 * index(var.regions, regions) + inx)
vm_type = lookup(each.value, "web_type", "cx11") }
vm_image = data.hcloud_image.talos.id ]
vm_ip_start = (3 + try(index(var.regions, each.key), 0)) * 10 ]) : k.name => k }
vm_security_group = [hcloud_firewall.web.id] }
vm_params = merge(var.kubernetes, { resource "hcloud_server" "web" {
lbv4 = local.ipv4_vip for_each = local.web
labels = "project.io/node-pool=web,hcloud/node-group=web-${each.key}" location = each.value.region
}) name = each.value.name
image = each.value.image
server_type = each.value.type
ssh_keys = [hcloud_ssh_key.infra.id]
keep_disk = true
labels = merge(var.tags, { label = "web" })
user_data = templatefile("${path.module}/templates/worker.yaml.tpl",
merge(var.kubernetes, {
name = each.value.name
ipv4 = each.value.ip
lbv4 = local.ipv4_vip
nodeSubnets = hcloud_network_subnet.core.ip_range
labels = "${local.web_labels},hcloud/node-group=web-${each.value.region}"
})
)
firewall_ids = [hcloud_firewall.web.id]
network {
network_id = hcloud_network.main.id
ip = each.value.ip
}
lifecycle {
ignore_changes = [
image,
server_type,
user_data,
ssh_keys,
]
}
} }

View File

@@ -0,0 +1,13 @@
resource "local_sensitive_file" "worker-as" {
content = templatefile("${path.module}/templates/worker-as.yaml.tpl",
merge(var.kubernetes, {
lbv4 = local.ipv4_vip
nodeSubnets = var.vpc_main_cidr
labels = "project.io/node-pool=worker,hcloud/node-group=worker-as"
})
)
filename = "_cfgs/worker-as.yaml"
file_permission = "0600"
}

View File

@@ -0,0 +1,75 @@
locals {
worker_prefix = "worker"
worker_labels = "project.io/node-pool=worker"
worker = { for k in flatten([
for regions in var.regions : [
for inx in range(lookup(try(var.instances[regions], {}), "worker_count", 0)) : {
name : "${local.worker_prefix}-${regions}-${1 + inx}"
image : data.hcloud_image.talos[startswith(lookup(try(var.instances[regions], {}), "worker_type", "cpx11"), "ca") ? "arm64" : "amd64"].id
region : regions
type : lookup(try(var.instances[regions], {}), "worker_type", "cpx11")
ip : cidrhost(hcloud_network_subnet.core.ip_range, 80 + 10 * index(var.regions, regions) + inx)
}
]
]) : k.name => k }
}
resource "hcloud_server" "worker" {
for_each = local.worker
location = each.value.region
name = each.value.name
image = each.value.image
server_type = each.value.type
ssh_keys = [hcloud_ssh_key.infra.id]
keep_disk = true
labels = merge(var.tags, { label = "worker" })
user_data = templatefile("${path.module}/templates/worker.yaml.tpl",
merge(var.kubernetes, {
name = each.value.name
ipv4 = each.value.ip
lbv4 = local.ipv4_vip
nodeSubnets = hcloud_network_subnet.core.ip_range
labels = "${local.worker_labels},hcloud/node-group=worker-${each.value.region}"
})
)
firewall_ids = [hcloud_firewall.worker.id]
network {
network_id = hcloud_network.main.id
ip = each.value.ip
}
lifecycle {
ignore_changes = [
image,
server_type,
user_data,
ssh_keys,
]
}
}
# module "worker" {
# source = "./modules/worker"
# for_each = var.instances
# location = each.key
# labels = merge(var.tags, { label = "worker" })
# network = hcloud_network.main.id
# subnet = hcloud_network_subnet.core.ip_range
# vm_name = "worker-${each.key}-"
# vm_items = lookup(each.value, "worker_count", 0)
# vm_type = lookup(each.value, "worker_type", "cx11")
# vm_image = data.hcloud_image.talos.id
# vm_ip_start = (6 + try(index(var.regions, each.key), 0)) * 10
# vm_security_group = [hcloud_firewall.worker.id]
# vm_params = merge(var.kubernetes, {
# lbv4 = local.ipv4_vip
# labels = "project.io/node-pool=worker,hcloud/node-group=worker-${each.key}"
# })
# }

View File

@@ -1,22 +0,0 @@
module "worker" {
source = "./modules/worker"
for_each = var.instances
location = each.key
labels = merge(var.tags, { label = "worker" })
network = hcloud_network.main.id
subnet = hcloud_network_subnet.core.ip_range
vm_name = "worker-${each.key}-"
vm_items = lookup(each.value, "worker_count", 0)
vm_type = lookup(each.value, "worker_type", "cx11")
vm_image = data.hcloud_image.talos.id
vm_ip_start = (6 + try(index(var.regions, each.key), 0)) * 10
vm_security_group = [hcloud_firewall.worker.id]
vm_params = merge(var.kubernetes, {
lbv4 = local.ipv4_vip
labels = "project.io/node-pool=worker,hcloud/node-group=worker-${each.key}"
})
}

View File

@@ -7,20 +7,5 @@ output "controlplane_endpoint" {
output "controlplane_firstnode" { output "controlplane_firstnode" {
description = "Kubernetes controlplane first node" description = "Kubernetes controlplane first node"
value = try(hcloud_server.controlplane[0].ipv4_address, "none") value = try(flatten([for c in hcloud_server.controlplane : c.ipv4_address])[0], "127.0.0.1")
}
output "controlplane_nodes" {
description = "Kubernetes controlplane nodes"
value = [
for s in hcloud_server.controlplane[*] :
{
name = s.name
ipv4_address = s.ipv4_address
ipv6_address = s.ipv6_address
zone = "hetzner"
location = s.location
params = ""
}
]
} }

View File

@@ -110,6 +110,7 @@ cluster:
user: ${base64encode(robot_user)} user: ${base64encode(robot_user)}
password: ${base64encode(robot_password)} password: ${base64encode(robot_password)}
image: ${base64encode(hcloud_image)} image: ${base64encode(hcloud_image)}
sshkey: ${base64encode(hcloud_sshkey)}
externalCloudProvider: externalCloudProvider:
enabled: true enabled: true
manifests: manifests:

View File

@@ -1,13 +0,0 @@
data:
hosts: |
# static hosts
169.254.2.53 dns.local
# terraform
%{ for node in masters ~}
${format("%-24s",node.ipv4_address)} ${node.name}
${format("%-24s",node.ipv6_address)} ${node.name}
%{ endfor ~}
%{ for node in web ~}
${format("%-24s",node.ipv4_address)} ${node.name}
${format("%-24s",node.ipv6_address)} ${node.name}
%{ endfor ~}

View File

@@ -0,0 +1,63 @@
version: v1alpha1
debug: false
persist: true
machine:
type: worker
token: ${tokenMachine}
ca:
crt: ${caMachine}
certSANs: []
nodeLabels:
node.kubernetes.io/disktype: ssd
kubelet:
extraArgs:
cloud-provider: external
rotate-server-certificates: true
node-labels: "${labels}"
clusterDNS:
- 169.254.2.53
- ${cidrhost(split(",",serviceSubnets)[0], 10)}
nodeIP:
validSubnets: ${format("%#v",split(",",nodeSubnets))}
network:
interfaces:
- interface: dummy0
addresses:
- 169.254.2.53/32
extraHostEntries:
- ip: ${lbv4}
aliases:
- ${apiDomain}
install:
wipe: false
sysctls:
net.core.somaxconn: 65535
net.core.netdev_max_backlog: 4096
systemDiskEncryption:
state:
provider: luks2
keys:
- nodeID: {}
slot: 0
ephemeral:
provider: luks2
keys:
- nodeID: {}
slot: 0
options:
- no_read_workqueue
- no_write_workqueue
cluster:
id: ${clusterID}
secret: ${clusterSecret}
controlPlane:
endpoint: https://${apiDomain}:6443
clusterName: ${clusterName}
discovery:
enabled: true
network:
dnsDomain: ${domain}
serviceSubnets: ${format("%#v",split(",",serviceSubnets))}
token: ${token}
ca:
crt: ${ca}

View File

@@ -0,0 +1,64 @@
version: v1alpha1
debug: false
persist: true
machine:
type: worker
token: ${tokenMachine}
ca:
crt: ${caMachine}
certSANs: []
nodeLabels:
node.kubernetes.io/disktype: ssd
kubelet:
extraArgs:
cloud-provider: external
rotate-server-certificates: true
node-labels: "${labels}"
clusterDNS:
- 169.254.2.53
- ${cidrhost(split(",",serviceSubnets)[0], 10)}
nodeIP:
validSubnets: ${format("%#v",split(",",nodeSubnets))}
network:
hostname: "${name}"
interfaces:
- interface: dummy0
addresses:
- 169.254.2.53/32
extraHostEntries:
- ip: ${lbv4}
aliases:
- ${apiDomain}
install:
wipe: false
sysctls:
net.core.somaxconn: 65535
net.core.netdev_max_backlog: 4096
systemDiskEncryption:
state:
provider: luks2
keys:
- nodeID: {}
slot: 0
ephemeral:
provider: luks2
keys:
- nodeID: {}
slot: 0
options:
- no_read_workqueue
- no_write_workqueue
cluster:
id: ${clusterID}
secret: ${clusterSecret}
controlPlane:
endpoint: https://${apiDomain}:6443
clusterName: ${clusterName}
discovery:
enabled: true
network:
dnsDomain: ${domain}
serviceSubnets: ${format("%#v",split(",",serviceSubnets))}
token: ${token}
ca:
crt: ${ca}

View File

@@ -30,9 +30,8 @@ variable "kubernetes" {
default = { default = {
podSubnets = "10.32.0.0/12,fd40:10:32::/102" podSubnets = "10.32.0.0/12,fd40:10:32::/102"
serviceSubnets = "10.200.0.0/22,fd40:10:200::/112" serviceSubnets = "10.200.0.0/22,fd40:10:200::/112"
nodeSubnets = "192.168.0.0/16"
domain = "cluster.local"
apiDomain = "api.cluster.local" apiDomain = "api.cluster.local"
domain = "cluster.local"
clusterName = "talos-k8s-hetzner" clusterName = "talos-k8s-hetzner"
tokenMachine = "" tokenMachine = ""
caMachine = "" caMachine = ""
@@ -72,7 +71,7 @@ variable "controlplane" {
type = "cpx11", type = "cpx11",
}, },
"fsn1" = { "fsn1" = {
count = 1, count = 0,
type = "cpx11", type = "cpx11",
}, },
"hel1" = { "hel1" = {