upgrade proxmox setup

This commit is contained in:
Serge Logvinov
2023-10-23 23:09:14 +03:00
parent 3555d01c17
commit aea43c1c5e
18 changed files with 282 additions and 396 deletions

View File

@@ -9,6 +9,7 @@ endif
help: help:
@awk 'BEGIN {FS = ":.*?## "} /^[0-9a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) @awk 'BEGIN {FS = ":.*?## "} /^[0-9a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
.PHONY: init
init: ## Initialize terraform init: ## Initialize terraform
terraform init -upgrade terraform init -upgrade
@@ -20,7 +21,6 @@ create-config: ## Genereate talos configs
create-templates: create-templates:
@echo 'podSubnets: "10.32.0.0/12,fd00:10:32::/102"' > _cfgs/tfstate.vars @echo 'podSubnets: "10.32.0.0/12,fd00:10:32::/102"' > _cfgs/tfstate.vars
@echo 'serviceSubnets: "10.200.0.0/22,fd40:10:200::/112"' >> _cfgs/tfstate.vars @echo 'serviceSubnets: "10.200.0.0/22,fd40:10:200::/112"' >> _cfgs/tfstate.vars
@echo 'nodeSubnets: "172.16.0.0/12"' >> _cfgs/tfstate.vars
@echo 'apiDomain: api.cluster.local' >> _cfgs/tfstate.vars @echo 'apiDomain: api.cluster.local' >> _cfgs/tfstate.vars
@yq eval '.cluster.network.dnsDomain' _cfgs/controlplane.yaml | awk '{ print "domain: "$$1}' >> _cfgs/tfstate.vars @yq eval '.cluster.network.dnsDomain' _cfgs/controlplane.yaml | awk '{ print "domain: "$$1}' >> _cfgs/tfstate.vars
@yq eval '.cluster.clusterName' _cfgs/controlplane.yaml | awk '{ print "clusterName: "$$1}' >> _cfgs/tfstate.vars @yq eval '.cluster.clusterName' _cfgs/controlplane.yaml | awk '{ print "clusterName: "$$1}' >> _cfgs/tfstate.vars

View File

@@ -1,140 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: local-path-storage
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: local-path-provisioner-service-account
namespace: local-path-storage
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: local-path-provisioner-role
rules:
- apiGroups: [ "" ]
resources: [ "nodes", "persistentvolumeclaims", "configmaps" ]
verbs: [ "get", "list", "watch" ]
- apiGroups: [ "" ]
resources: [ "endpoints", "persistentvolumes", "pods" ]
verbs: [ "*" ]
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "create", "patch" ]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "storageclasses" ]
verbs: [ "get", "list", "watch" ]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: local-path-provisioner-bind
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: local-path-provisioner-role
subjects:
- kind: ServiceAccount
name: local-path-provisioner-service-account
namespace: local-path-storage
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: local-path-provisioner
namespace: local-path-storage
spec:
replicas: 1
selector:
matchLabels:
app: local-path-provisioner
template:
metadata:
labels:
app: local-path-provisioner
spec:
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: "node-role.kubernetes.io/control-plane"
effect: NoSchedule
serviceAccountName: local-path-provisioner-service-account
containers:
- name: local-path-provisioner
image: rancher/local-path-provisioner:v0.0.23
imagePullPolicy: IfNotPresent
command:
- local-path-provisioner
- --debug
- start
- --config
- /etc/config/config.json
volumeMounts:
- name: config-volume
mountPath: /etc/config/
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumes:
- name: config-volume
configMap:
name: local-path-config
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: local-path
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: rancher.io/local-path
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: Delete
---
kind: ConfigMap
apiVersion: v1
metadata:
name: local-path-config
namespace: local-path-storage
data:
config.json: |-
{
"nodePathMap":[
{
"node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
"paths":["/var/data"]
}
]
}
setup: |-
#!/bin/sh
set -eu
mkdir -m 0777 -p "$VOL_DIR"
teardown: |-
#!/bin/sh
set -eu
rm -rf "$VOL_DIR"
helperPod.yaml: |-
apiVersion: v1
kind: Pod
metadata:
name: helper-pod
spec:
priorityClassName: system-node-critical
tolerations:
- key: node.kubernetes.io/disk-pressure
operator: Exists
effect: NoSchedule
containers:
- name: helper-pod
image: busybox
imagePullPolicy: IfNotPresent

View File

@@ -1,197 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: system:aggregated-metrics-reader
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/stats
- namespaces
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: metrics-server
strategy:
rollingUpdate:
maxUnavailable: 0
template:
metadata:
labels:
k8s-app: metrics-server
spec:
nodeSelector:
kubernetes.io/os: linux
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: "node-role.kubernetes.io/control-plane"
effect: NoSchedule
containers:
- args:
- --cert-dir=/tmp
- --secure-port=6443
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
- --authorization-always-allow-paths=/metrics
image: k8s.gcr.io/metrics-server/metrics-server:v0.5.0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
periodSeconds: 10
name: metrics-server
ports:
- containerPort: 6443
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
initialDelaySeconds: 20
periodSeconds: 10
resources:
requests:
cpu: 100m
memory: 200Mi
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- mountPath: /tmp
name: tmp-dir
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
volumes:
- emptyDir: {}
name: tmp-dir
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
labels:
k8s-app: metrics-server
name: v1beta1.metrics.k8s.io
spec:
group: metrics.k8s.io
groupPriorityMinimum: 100
insecureSkipTLSVerify: true
service:
name: metrics-server
namespace: kube-system
version: v1beta1
versionPriority: 100

View File

@@ -0,0 +1,44 @@
apiVersion: v1
kind: Pod
metadata:
name: test
namespace: default
spec:
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
# nodeSelector:
# kubernetes.io/hostname: kube-11
containers:
- name: alpine
image: alpine
command: ["sleep","6000"]
volumeMounts:
- name: pvc
mountPath: /mnt
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
runAsNonRoot: true
terminationGracePeriodSeconds: 1
securityContext:
fsGroup: 65534
runAsGroup: 65534
runAsUser: 65534
volumes:
- name: pvc
ephemeral:
volumeClaimTemplate:
metadata:
labels:
type: pvc-volume
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: proxmox-zfs
resources:
requests:
storage: 5Gi

View File

@@ -0,0 +1,41 @@
controller:
plugin:
image:
pullPolicy: Always
tag: edge
node:
plugin:
image:
pullPolicy: Always
tag: edge
nodeSelector:
node.cloudprovider.kubernetes.io/platform: nocloud
tolerations:
- operator: Exists
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
config:
clusters:
- region: "dev-1"
token_id: "root@pam!terraform"
token_secret: "cb6e5561-ce10-4e7e-8b99-155ff6371a48"
url: "https://192.168.10.4:8006/api2/json"
insecure: true
storageClass:
- name: proxmox
storage: local-lvm
reclaimPolicy: Delete
fstype: xfs
- name: proxmox-zfs
storage: zfs
reclaimPolicy: Delete
fstype: xfs

View File

@@ -0,0 +1,30 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pvc-test
spec:
accessModes:
- ReadWriteOnce
capacity:
storage: 10Gi
csi:
driver: csi.proxmox.sinextra.dev
fsType: xfs
volumeAttributes:
storage: zfs
volumeHandle: dev-1/pve-m-4/zfs/vm-9999-pvc-test
storageClassName: proxmox-zfs
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: storage-test-0
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storageClassName: proxmox-zfs
volumeName: pvc-test

View File

@@ -0,0 +1,49 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: test
namespace: kube-system
labels:
app: alpine
spec:
podManagementPolicy: Parallel # default is OrderedReady
serviceName: test
replicas: 1
template:
metadata:
labels:
app: alpine
spec:
terminationGracePeriodSeconds: 3
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
nodeSelector:
# kubernetes.io/hostname: kube-21
# topology.kubernetes.io/zone: hvm-1
containers:
- name: alpine
image: alpine
command: ["sleep","1d"]
securityContext:
seccompProfile:
type: RuntimeDefault
capabilities:
drop: ["ALL"]
volumeMounts:
- name: storage
mountPath: /mnt
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app: alpine
volumeClaimTemplates:
- metadata:
name: storage
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 5Gi
storageClassName: proxmox-zfs

2
proxmox/images/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
*.qcow2.xz
*.qcow2

View File

@@ -1,6 +1,18 @@
#
REGISTRY ?= ghcr.io/siderolabs
TAG ?= 1.5.4
clean:
rm -f nocloud-*.qcow2.xz
rm -f nocloud-*.qcow2
init: init:
packer init -upgrade . packer init -upgrade .
release: release:
packer build -only=release.proxmox.talos . packer build -only=release.proxmox.talos .
images: clean
docker run --rm -i -v /dev:/dev --privileged $(REGISTRY)/imager:v$(TAG) oracle \
--extra-kernel-arg talos.dashboard.disabled=1 --platform nocloud --arch amd64 --tar-to-stdout | tar xz
xz -d nocloud-amd64.qcow2.xz

View File

@@ -50,12 +50,12 @@ resource "proxmox_vm_qemu" "controlplane" {
target_node = each.value.node_name target_node = each.value.node_name
clone = var.proxmox_image clone = var.proxmox_image
agent = 0 agent = 0
define_connection_info = false define_connection_info = false
os_type = "ubuntu" os_type = "ubuntu"
qemu_os = "l26" qemu_os = "l26"
ipconfig0 = each.value.ip0 # ipconfig1 = each.value.ip0
ipconfig1 = "ip=${each.value.ipv4},gw=${each.value.gwv4}" ipconfig0 = "ip=${each.value.ipv4},gw=${each.value.gwv4}"
cicustom = "meta=local:snippets/${each.value.name}.metadata.yaml" cicustom = "meta=local:snippets/${each.value.name}.metadata.yaml"
cloudinit_cdrom_storage = var.proxmox_storage cloudinit_cdrom_storage = var.proxmox_storage
@@ -75,15 +75,15 @@ resource "proxmox_vm_qemu" "controlplane" {
type = "socket" type = "socket"
} }
network {
model = "virtio"
bridge = "vmbr0"
firewall = true
}
network { network {
model = "virtio" model = "virtio"
bridge = "vmbr1" bridge = "vmbr0"
# firewall = true
} }
# network {
# model = "virtio"
# bridge = "vmbr1"
# }
boot = "order=scsi0" boot = "order=scsi0"
disk { disk {

View File

@@ -31,9 +31,10 @@ resource "null_resource" "web_machineconfig" {
provisioner "file" { provisioner "file" {
# source = "${path.module}/_cfgs/worker.yaml" # source = "${path.module}/_cfgs/worker.yaml"
content = templatefile("${path.module}/templates/web.yaml.tpl", content = templatefile("${path.module}/templates/web.yaml.tpl",
merge(var.kubernetes, { merge(var.kubernetes, try(var.instances["all"], {}), {
lbv4 = local.ipv4_vip lbv4 = local.ipv4_vip
nodeSubnets = var.vpc_main_cidr nodeSubnets = var.vpc_main_cidr
clusterDns = cidrhost(split(",", var.kubernetes["serviceSubnets"])[0], 10)
labels = local.web_labels labels = local.web_labels
})) }))
@@ -77,12 +78,12 @@ resource "proxmox_vm_qemu" "web" {
target_node = each.value.node_name target_node = each.value.node_name
clone = var.proxmox_image clone = var.proxmox_image
agent = 0 agent = 0
define_connection_info = false define_connection_info = false
os_type = "ubuntu" os_type = "ubuntu"
qemu_os = "l26" qemu_os = "l26"
ipconfig0 = each.value.ip0 # ipconfig0 = each.value.ip0
ipconfig1 = "ip=${each.value.ipv4},gw=${each.value.gwv4}" ipconfig0 = "ip=${each.value.ipv4},gw=${each.value.gwv4}"
cicustom = "user=local:snippets/${local.web_prefix}.yaml,meta=local:snippets/${each.value.name}.metadata.yaml" cicustom = "user=local:snippets/${local.web_prefix}.yaml,meta=local:snippets/${each.value.name}.metadata.yaml"
cloudinit_cdrom_storage = var.proxmox_storage cloudinit_cdrom_storage = var.proxmox_storage
@@ -91,6 +92,7 @@ resource "proxmox_vm_qemu" "web" {
sockets = 1 sockets = 1
cores = each.value.cpu cores = each.value.cpu
memory = each.value.mem memory = each.value.mem
numa = true
scsihw = "virtio-scsi-single" scsihw = "virtio-scsi-single"
vga { vga {
@@ -107,10 +109,10 @@ resource "proxmox_vm_qemu" "web" {
bridge = "vmbr0" bridge = "vmbr0"
firewall = true firewall = true
} }
network { # network {
model = "virtio" # model = "virtio"
bridge = "vmbr1" # bridge = "vmbr1"
} # }
boot = "order=scsi0" boot = "order=scsi0"
disk { disk {

View File

@@ -30,9 +30,10 @@ resource "null_resource" "worker_machineconfig" {
provisioner "file" { provisioner "file" {
content = templatefile("${path.module}/templates/worker.yaml.tpl", content = templatefile("${path.module}/templates/worker.yaml.tpl",
merge(var.kubernetes, { merge(var.kubernetes, try(var.instances["all"], {}), {
lbv4 = local.ipv4_vip lbv4 = local.ipv4_vip
nodeSubnets = var.vpc_main_cidr nodeSubnets = var.vpc_main_cidr
clusterDns = cidrhost(split(",", var.kubernetes["serviceSubnets"])[0], 10)
labels = local.worker_labels labels = local.worker_labels
})) }))
destination = "/var/lib/vz/snippets/${local.worker_prefix}.yaml" destination = "/var/lib/vz/snippets/${local.worker_prefix}.yaml"
@@ -149,12 +150,12 @@ resource "proxmox_vm_qemu" "worker" {
target_node = each.value.node_name target_node = each.value.node_name
clone = var.proxmox_image clone = var.proxmox_image
agent = 0 agent = 0
define_connection_info = false define_connection_info = false
os_type = "ubuntu" os_type = "ubuntu"
qemu_os = "l26" qemu_os = "l26"
ipconfig0 = each.value.ip0 # ipconfig0 = each.value.ip0
ipconfig1 = "ip=${each.value.ipv4},gw=${each.value.gwv4}" ipconfig0 = "ip=${each.value.ipv4},gw=${each.value.gwv4}"
cicustom = "user=local:snippets/${local.worker_prefix}.yaml,meta=local:snippets/${each.value.name}.metadata.yaml" cicustom = "user=local:snippets/${local.worker_prefix}.yaml,meta=local:snippets/${each.value.name}.metadata.yaml"
cloudinit_cdrom_storage = var.proxmox_storage cloudinit_cdrom_storage = var.proxmox_storage
@@ -163,6 +164,7 @@ resource "proxmox_vm_qemu" "worker" {
sockets = 1 sockets = 1
cores = each.value.cpu cores = each.value.cpu
memory = each.value.mem memory = each.value.mem
numa = true
scsihw = "virtio-scsi-single" scsihw = "virtio-scsi-single"
vga { vga {
@@ -193,15 +195,6 @@ resource "proxmox_vm_qemu" "worker" {
ssd = 1 ssd = 1
backup = false backup = false
} }
disk {
type = "scsi"
storage = var.proxmox_storage
size = "128G"
cache = "none"
iothread = 1
ssd = 1
backup = false
}
lifecycle { lifecycle {
ignore_changes = [ ignore_changes = [

View File

@@ -1,6 +1,6 @@
locals { locals {
zones = [for k, v in var.instances : k] zones = [for k, v in var.instances : k if k != "all"]
controlplane_subnet = cidrsubnet(var.vpc_main_cidr, 5, var.network_shift) controlplane_subnet = cidrsubnet(var.vpc_main_cidr, 5, var.network_shift)
subnets = { for inx, zone in local.zones : zone => cidrsubnet(var.vpc_main_cidr, 5, var.network_shift + inx + 1) } subnets = { for inx, zone in local.zones : zone => cidrsubnet(var.vpc_main_cidr, 5, var.network_shift + inx + 1) }

View File

@@ -10,7 +10,7 @@ machine:
network: network:
hostname: "${name}" hostname: "${name}"
interfaces: interfaces:
- interface: eth1 - interface: eth0
vip: vip:
ip: ${ipv4_vip} ip: ${ipv4_vip}
- interface: dummy0 - interface: dummy0
@@ -49,7 +49,7 @@ machine:
- kube-system - kube-system
cluster: cluster:
adminKubeconfig: adminKubeconfig:
certLifetime: 8h0m0s certLifetime: 48h0m0s
controlPlane: controlPlane:
endpoint: https://${apiDomain}:6443 endpoint: https://${apiDomain}:6443
network: network:

View File

@@ -7,13 +7,15 @@ machine:
ca: ca:
crt: ${caMachine} crt: ${caMachine}
kubelet: kubelet:
image: ghcr.io/siderolabs/kubelet:${version}
defaultRuntimeSeccompProfileEnabled: true
extraArgs: extraArgs:
cloud-provider: external cloud-provider: external
rotate-server-certificates: true rotate-server-certificates: true
node-labels: "${labels}" node-labels: ${labels}
clusterDNS: clusterDNS:
- 169.254.2.53 - 169.254.2.53
- ${cidrhost(split(",",serviceSubnets)[0], 10)} - ${clusterDns}
nodeIP: nodeIP:
validSubnets: ${format("%#v",split(",",nodeSubnets))} validSubnets: ${format("%#v",split(",",nodeSubnets))}
network: network:
@@ -25,23 +27,45 @@ machine:
- ip: ${lbv4} - ip: ${lbv4}
aliases: aliases:
- ${apiDomain} - ${apiDomain}
nameservers:
- 2606:4700:4700::1111
- 1.1.1.1
- 2001:4860:4860::8888
time:
servers:
- 2.europe.pool.ntp.org
- time.cloudflare.com
sysctls: sysctls:
net.core.somaxconn: 65535 net.core.somaxconn: 65535
net.core.netdev_max_backlog: 4096 net.core.netdev_max_backlog: 4096
net.ipv4.tcp_keepalive_intvl: 60
net.ipv4.tcp_keepalive_time: 600
vm.max_map_count: 128000
install:
wipe: true
extraKernelArgs:
- talos.dashboard.disabled=1
systemDiskEncryption: systemDiskEncryption:
state: state:
provider: luks2 provider: luks2
options:
- no_read_workqueue
- no_write_workqueue
keys: keys:
- nodeID: {} - nodeID: {}
slot: 0 slot: 0
ephemeral: ephemeral:
provider: luks2 provider: luks2
keys:
- nodeID: {}
slot: 0
options: options:
- no_read_workqueue - no_read_workqueue
- no_write_workqueue - no_write_workqueue
keys:
- nodeID: {}
slot: 0
features:
rbac: true
stableHostname: true
apidCheckExtKeyUsage: true
cluster: cluster:
id: ${clusterID} id: ${clusterID}
secret: ${clusterSecret} secret: ${clusterSecret}
@@ -53,6 +77,8 @@ cluster:
network: network:
dnsDomain: ${domain} dnsDomain: ${domain}
serviceSubnets: ${format("%#v",split(",",serviceSubnets))} serviceSubnets: ${format("%#v",split(",",serviceSubnets))}
proxy:
disabled: false
token: ${token} token: ${token}
ca: ca:
crt: ${ca} crt: ${ca}

View File

@@ -7,13 +7,15 @@ machine:
ca: ca:
crt: ${caMachine} crt: ${caMachine}
kubelet: kubelet:
image: ghcr.io/siderolabs/kubelet:${version}
defaultRuntimeSeccompProfileEnabled: true
extraArgs: extraArgs:
cloud-provider: external cloud-provider: external
rotate-server-certificates: true rotate-server-certificates: true
node-labels: "${labels}" node-labels: ${labels}
clusterDNS: clusterDNS:
- 169.254.2.53 - 169.254.2.53
- ${cidrhost(split(",",serviceSubnets)[0], 10)} - ${clusterDns}
nodeIP: nodeIP:
validSubnets: ${format("%#v",split(",",nodeSubnets))} validSubnets: ${format("%#v",split(",",nodeSubnets))}
network: network:
@@ -25,27 +27,45 @@ machine:
- ip: ${lbv4} - ip: ${lbv4}
aliases: aliases:
- ${apiDomain} - ${apiDomain}
nameservers:
- 2606:4700:4700::1111
- 1.1.1.1
- 2001:4860:4860::8888
time:
servers:
- 2.europe.pool.ntp.org
- time.cloudflare.com
sysctls: sysctls:
net.core.somaxconn: 65535 net.core.somaxconn: 65535
net.core.netdev_max_backlog: 4096 net.core.netdev_max_backlog: 4096
net.ipv4.tcp_keepalive_intvl: 60
net.ipv4.tcp_keepalive_time: 600
vm.max_map_count: 128000
install:
wipe: true
extraKernelArgs:
- talos.dashboard.disabled=1
systemDiskEncryption: systemDiskEncryption:
state: state:
provider: luks2 provider: luks2
options:
- no_read_workqueue
- no_write_workqueue
keys: keys:
- nodeID: {} - nodeID: {}
slot: 0 slot: 0
ephemeral: ephemeral:
provider: luks2 provider: luks2
keys:
- nodeID: {}
slot: 0
options: options:
- no_read_workqueue - no_read_workqueue
- no_write_workqueue - no_write_workqueue
disks: keys:
- device: /dev/sdb - nodeID: {}
partitions: slot: 0
- mountpoint: /var/data features:
rbac: true
stableHostname: true
apidCheckExtKeyUsage: true
cluster: cluster:
id: ${clusterID} id: ${clusterID}
secret: ${clusterSecret} secret: ${clusterSecret}
@@ -57,6 +77,8 @@ cluster:
network: network:
dnsDomain: ${domain} dnsDomain: ${domain}
serviceSubnets: ${format("%#v",split(",",serviceSubnets))} serviceSubnets: ${format("%#v",split(",",serviceSubnets))}
proxy:
disabled: false
token: ${token} token: ${token}
ca: ca:
crt: ${ca} crt: ${ca}

View File

@@ -48,7 +48,6 @@ variable "kubernetes" {
default = { default = {
podSubnets = "10.32.0.0/12,fd40:10:32::/102" podSubnets = "10.32.0.0/12,fd40:10:32::/102"
serviceSubnets = "10.200.0.0/22,fd40:10:200::/112" serviceSubnets = "10.200.0.0/22,fd40:10:200::/112"
nodeSubnets = "192.168.0.0/16"
domain = "cluster.local" domain = "cluster.local"
apiDomain = "api.cluster.local" apiDomain = "api.cluster.local"
clusterName = "talos-k8s-proxmox" clusterName = "talos-k8s-proxmox"
@@ -97,6 +96,9 @@ variable "instances" {
description = "Map of VMs launched on proxmox hosts" description = "Map of VMs launched on proxmox hosts"
type = map(any) type = map(any)
default = { default = {
"all" = {
version = "v1.28.2"
},
"node1" = { "node1" = {
web_id = 1000 web_id = 1000
web_count = 0, web_count = 0,

View File

@@ -6,7 +6,7 @@ terraform {
} }
# proxmox = { # proxmox = {
# source = "bpg/proxmox" # source = "bpg/proxmox"
# version = "0.17.0-rc1" # version = "~> 0.35.1"
# } # }
} }
required_version = ">= 1.0" required_version = ">= 1.0"