update talos

Signed-off-by: Serge Logvinov <serge.logvinov@sinextra.dev>
This commit is contained in:
Serge Logvinov
2022-12-20 14:35:03 +02:00
parent 2f9e98f3cd
commit 9e77102e15
20 changed files with 121 additions and 131 deletions

View File

@@ -53,7 +53,16 @@ create-kubeconfig: ## Prepare kubeconfig
kubectl --kubeconfig=kubeconfig config set-context --current --namespace=kube-system
create-deployments:
helm template --namespace=kube-system --version=1.12.1 -f deployments/cilium.yaml cilium \
helm template --namespace=kube-system --version=1.12.4 -f deployments/cilium.yaml cilium \
cilium/cilium > deployments/cilium-result.yaml
helm template --namespace=ingress-nginx --version=4.2.4 -f deployments/ingress.yaml ingress-nginx \
helm template --namespace=ingress-nginx --version=4.4.0 -f deployments/ingress.yaml ingress-nginx \
ingress-nginx/ingress-nginx > deployments/ingress-result.yaml
deploy-csi:
dd if=/dev/urandom bs=1 count=16 2>/dev/null | hexdump -e '"%00x"' > scw-csi-secret.secret
kubectl --kubeconfig=kubeconfig create secret generic scw-csi-secret --from-file=encryptionPassphrase=scw-csi-secret.secret
kubectl --kubeconfig=kubeconfig apply -f deployments/scaleway-csi.yaml
kubectl --kubeconfig=kubeconfig apply -f deployments/scaleway-csi-node.yaml
kubectl --kubeconfig=kubeconfig apply -f deployments/scaleway-storage.yaml
rm -f scw-csi-secret.secret

View File

@@ -148,6 +148,7 @@ data:
kube-proxy-replacement: "strict"
kube-proxy-replacement-healthz-bind-address: ""
bpf-lb-sock: "false"
host-reachable-services-protos:
enable-health-check-nodeport: "true"
node-port-bind-protection: "true"
enable-auto-protect-node-port-range: "true"
@@ -174,7 +175,6 @@ data:
bpf-root: "/sys/fs/bpf"
cgroup-root: "/sys/fs/cgroup"
enable-k8s-terminating-endpoint: "true"
annotate-k8s-node: "true"
remove-cilium-node-taints: "true"
set-cilium-is-up-condition: "true"
unmanaged-pod-watcher-interval: "15"
@@ -221,13 +221,6 @@ rules:
- get
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
# To annotate the k8s node with Cilium's metadata
- patch
- apiGroups:
- apiextensions.k8s.io
resources:
@@ -557,7 +550,7 @@ spec:
spec:
containers:
- name: cilium-agent
image: "quay.io/cilium/cilium:v1.12.1@sha256:ea2db1ee21b88127b5c18a96ad155c25485d0815a667ef77c2b7c7f31cab601b"
image: "quay.io/cilium/cilium:v1.12.4@sha256:4b074fcfba9325c18e97569ed1988464309a5ebf64bbc79bec6f3d58cafcb8cf"
imagePullPolicy: IfNotPresent
command:
- cilium-agent
@@ -664,6 +657,7 @@ spec:
protocol: TCP
securityContext:
privileged: true
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- name: bpf-maps
mountPath: /sys/fs/bpf
@@ -691,7 +685,7 @@ spec:
mountPath: /run/xtables.lock
initContainers:
- name: clean-cilium-state
image: "quay.io/cilium/cilium:v1.12.1@sha256:ea2db1ee21b88127b5c18a96ad155c25485d0815a667ef77c2b7c7f31cab601b"
image: "quay.io/cilium/cilium:v1.12.4@sha256:4b074fcfba9325c18e97569ed1988464309a5ebf64bbc79bec6f3d58cafcb8cf"
imagePullPolicy: IfNotPresent
command:
- /init-container.sh
@@ -712,6 +706,7 @@ spec:
value: "api.cluster.local"
- name: KUBERNETES_SERVICE_PORT
value: "6443"
terminationMessagePolicy: FallbackToLogsOnError
securityContext:
privileged: true
volumeMounts:
@@ -817,14 +812,14 @@ spec:
metadata:
annotations:
# ensure pods roll when configmap updates
cilium.io/cilium-configmap-checksum: "10bcfd4171cc8219b04f7404f8c9add742e0de9272cd864272e80f23ec406384"
cilium.io/cilium-configmap-checksum: "c3ffdb3de5df1007b50c84e0af5ba77bc44d069f56d62d3232573a21084f2f80"
labels:
io.cilium/app: operator
name: cilium-operator
spec:
containers:
- name: cilium-operator
image: quay.io/cilium/operator-generic:v1.12.1@sha256:93d5aaeda37d59e6c4325ff05030d7b48fabde6576478e3fdbfb9bb4a68ec4a1
image: "quay.io/cilium/operator-generic:v1.12.4@sha256:071089ec5bca1f556afb8e541d9972a0dfb09d1e25504ae642ced021ecbedbd1"
imagePullPolicy: IfNotPresent
command:
- cilium-operator-generic
@@ -865,6 +860,7 @@ spec:
- name: cilium-config-path
mountPath: /tmp/cilium/config-map
readOnly: true
terminationMessagePolicy: FallbackToLogsOnError
hostNetwork: true
restartPolicy: Always
priorityClassName: system-cluster-critical
@@ -881,8 +877,10 @@ spec:
topologyKey: kubernetes.io/hostname
nodeSelector:
kubernetes.io/os: linux
node-role.kubernetes.io/control-plane: ""
tolerations:
- operator: Exists
- effect: NoSchedule
operator: Exists
volumes:
# To read the configuration from the config map
- name: cilium-config-path

View File

@@ -9,6 +9,11 @@ operator:
replicas: 1
prometheus:
enabled: false
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- operator: Exists
effect: NoSchedule
identityAllocationMode: crd
kubeProxyReplacement: strict
@@ -20,12 +25,6 @@ autoDirectNodeRoutes: false
devices: [eth+]
healthChecking: true
annotateK8sNode: true
# l7Proxy: false
# encryption:
# enabled: true
# type: wireguard
cni:
install: true
@@ -42,6 +41,8 @@ ipv4:
enabled: true
ipv6:
enabled: true
hostServices:
enabled: true
hostPort:
enabled: true
nodePort:
@@ -50,6 +51,8 @@ externalIPs:
enabled: true
hostFirewall:
enabled: true
ingressController:
enabled: false
securityContext:
privileged: true

View File

@@ -105,9 +105,6 @@ spec:
serviceAccountName: coredns
enableServiceLinks: false
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Exists
@@ -117,7 +114,7 @@ spec:
hostNetwork: true
containers:
- name: coredns
image: coredns/coredns:1.9.2
image: coredns/coredns:1.9.4
imagePullPolicy: IfNotPresent
resources:
limits:

View File

@@ -4,10 +4,10 @@ apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: ingress-nginx-4.2.4
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
@@ -20,10 +20,10 @@ apiVersion: v1
kind: ConfigMap
metadata:
labels:
helm.sh/chart: ingress-nginx-4.2.4
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
@@ -66,10 +66,10 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: ingress-nginx-4.2.4
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
@@ -136,16 +136,24 @@ rules:
- get
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
# Source: ingress-nginx/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.2.4
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
@@ -163,10 +171,10 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
helm.sh/chart: ingress-nginx-4.2.4
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
@@ -230,7 +238,7 @@ rules:
resources:
- configmaps
resourceNames:
- ingress-controller-leader
- ingress-nginx-leader
verbs:
- get
- update
@@ -245,7 +253,7 @@ rules:
resources:
- leases
resourceNames:
- ingress-controller-leader
- ingress-nginx-leader
verbs:
- get
- update
@@ -262,16 +270,24 @@ rules:
verbs:
- create
- patch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
# Source: ingress-nginx/templates/controller-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.2.4
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
@@ -292,10 +308,10 @@ kind: Service
metadata:
annotations:
labels:
helm.sh/chart: ingress-nginx-4.2.4
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
@@ -329,10 +345,10 @@ apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
helm.sh/chart: ingress-nginx-4.2.4
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
@@ -363,7 +379,7 @@ spec:
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: controller
image: "registry.k8s.io/ingress-nginx/controller:v1.3.0@sha256:d1707ca76d3b044ab8a28277a2466a02100ee9f58a86af1535a3edf9323ea1b5"
image: "registry.k8s.io/ingress-nginx/controller:v1.5.1@sha256:4ba73c697770664c1e00e9f968de14e08f606ff961c76e5d7033a4a9c593c629"
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
@@ -372,7 +388,7 @@ spec:
- /wait-shutdown
args:
- /nginx-ingress-controller
- --election-id=ingress-controller-leader
- --election-id=ingress-nginx-leader
- --controller-class=k8s.io/ingress-nginx
- --ingress-class=nginx
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
@@ -455,10 +471,10 @@ apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
labels:
helm.sh/chart: ingress-nginx-4.2.4
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.3.1"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller

View File

@@ -59,15 +59,15 @@ spec:
labels:
app: local-path-provisioner
spec:
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: "node-role.kubernetes.io/master"
effect: NoSchedule
- key: "node-role.kubernetes.io/control-plane"
effect: NoSchedule
serviceAccountName: local-path-provisioner-service-account
containers:
- name: local-path-provisioner
image: rancher/local-path-provisioner:v0.0.19
image: rancher/local-path-provisioner:v0.0.23
imagePullPolicy: IfNotPresent
command:
- local-path-provisioner
@@ -117,40 +117,12 @@ data:
}
setup: |-
#!/bin/sh
while getopts "m:s:p:" opt
do
case $opt in
p)
absolutePath=$OPTARG
;;
s)
sizeInBytes=$OPTARG
;;
m)
volMode=$OPTARG
;;
esac
done
mkdir -m 0777 -p ${absolutePath}
set -eu
mkdir -m 0777 -p "$VOL_DIR"
teardown: |-
#!/bin/sh
while getopts "m:s:p:" opt
do
case $opt in
p)
absolutePath=$OPTARG
;;
s)
sizeInBytes=$OPTARG
;;
m)
volMode=$OPTARG
;;
esac
done
rm -rf ${absolutePath}
set -eu
rm -rf "$VOL_DIR"
helperPod.yaml: |-
apiVersion: v1
kind: Pod

View File

@@ -131,19 +131,16 @@ spec:
kubernetes.io/os: linux
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
- key: "node-role.kubernetes.io/master"
effect: NoSchedule
- key: "node-role.kubernetes.io/control-plane"
effect: NoSchedule
containers:
- args:
- --cert-dir=/tmp
- --secure-port=443
- --secure-port=6443
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
- --authorization-always-allow-paths=/metrics
image: k8s.gcr.io/metrics-server/metrics-server:v0.5.0
imagePullPolicy: IfNotPresent
livenessProbe:
@@ -155,7 +152,7 @@ spec:
periodSeconds: 10
name: metrics-server
ports:
- containerPort: 443
- containerPort: 6443
name: https
protocol: TCP
readinessProbe:

View File

@@ -131,23 +131,23 @@ spec:
serviceAccountName: cloud-controller-manager
nodeSelector:
node-role.kubernetes.io/control-plane: ""
node.cloudprovider.kubernetes.io/platform: scaleway
tolerations:
- key: "node.cloudprovider.kubernetes.io/uninitialized"
value: "true"
effect: "NoSchedule"
- key: "node-role.kubernetes.io/master"
effect: NoSchedule
- key: "node-role.kubernetes.io/control-plane"
effect: NoSchedule
containers:
- name: scaleway-cloud-controller-manager
image: ghcr.io/sergelogvinov/scaleway-cloud-controller-manager:1fa94b15f6d87e1f951331a7dca148302fe7318b
image: scaleway/scaleway-cloud-controller-manager:v0.21.6
# image: ghcr.io/sergelogvinov/scaleway-cloud-controller-manager:1fa94b15f6d87e1f951331a7dca148302fe7318b
imagePullPolicy: IfNotPresent
args:
- --cloud-provider=scaleway
- --leader-elect=true
- --allow-untagged-cloud
- --controllers=cloud-node,cloud-node-lifecycle
- --controllers=cloud-node-lifecycle
resources:
limits:
cpu: 500m

View File

@@ -47,11 +47,13 @@ spec:
hostNetwork: true
dnsPolicy: Default
serviceAccount: scaleway-csi-node
priorityClassName: system-node-critical
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-node-critical
node.cloudprovider.kubernetes.io/platform: scaleway
tolerations:
- operator: "Exists"
- effect: NoSchedule
operator: Exists
containers:
- name: scaleway-csi-plugin
image: scaleway/scaleway-csi:v0.1.7

View File

@@ -195,20 +195,15 @@ spec:
serviceAccount: scaleway-csi-controller
nodeSelector:
kubernetes.io/os: linux
node-role.kubernetes.io/control-plane: ""
node.cloudprovider.kubernetes.io/platform: scaleway
priorityClassName: system-cluster-critical
tolerations:
- key: "node-role.kubernetes.io/master"
operator: "Exists"
effect: "NoSchedule"
- key: "node-role.kubernetes.io/controlplane"
operator: "Exists"
effect: "NoSchedule"
- key: "node-role.kubernetes.io/control-plane"
operator: "Exists"
effect: "NoSchedule"
effect: NoSchedule
containers:
- name: scaleway-csi-plugin
image: scaleway/scaleway-csi:v0.1.8
image: scaleway/scaleway-csi:v0.2.0
args :
- "--endpoint=$(CSI_ENDPOINT)"
- "--mode=controller"
@@ -248,7 +243,7 @@ spec:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: csi-attacher
image: k8s.gcr.io/sig-storage/csi-attacher:v3.3.0
image: k8s.gcr.io/sig-storage/csi-attacher:v3.4.0
args:
- "--v=5"
- "--csi-address=$(CSI_ADDRESS)"
@@ -284,11 +279,9 @@ spec:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: liveness-probe
image: k8s.gcr.io/sig-storage/livenessprobe:v2.2.0
image: k8s.gcr.io/sig-storage/livenessprobe:v2.6.0
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
- --v=2
volumeMounts:
- name: socket-dir
mountPath: /csi

View File

@@ -25,6 +25,11 @@ spec:
volumeMounts:
- name: persistent-storage
mountPath: /mnt/scaleway
securityContext:
seccompProfile:
type: RuntimeDefault
capabilities:
drop: ["ALL"]
updateStrategy:
type: RollingUpdate
selector:
@@ -38,4 +43,4 @@ spec:
resources:
requests:
storage: 10Gi
storageClassName: scw-bssd
storageClassName: scw-bssd-enc

View File

@@ -30,7 +30,7 @@ resource "scaleway_instance_server" "controlplane" {
ipv4_local = cidrhost(local.main_subnet, 11 + count.index)
lbv4 = local.lbv4
ipv4 = scaleway_instance_ip.controlplane[count.index].address
labels = "${local.controlplane_labels},node.kubernetes.io/instance-type=${lookup(var.controlplane, "type", "DEV1-M")}"
labels = "node.kubernetes.io/instance-type=${lookup(var.controlplane, "type", "DEV1-M")}"
access = var.scaleway_access
secret = var.scaleway_secret
project_id = var.scaleway_project_id

View File

@@ -7,6 +7,13 @@ machine:
- "${lbv4}"
- "${ipv4}"
- "${apiDomain}"
features:
kubernetesTalosAPIAccess:
enabled: true
allowedRoles:
- os:reader
allowedKubernetesNamespaces:
- kube-system
kubelet:
extraArgs:
node-ip: "${ipv4_local}"
@@ -28,9 +35,6 @@ machine:
- interface: dummy0
addresses:
- 169.254.2.53/32
nameservers:
- 1.1.1.1
- 8.8.8.8
kubespan:
enabled: true
allowDownPeerBypass: true
@@ -65,11 +69,6 @@ cluster:
clusterName: ${clusterName}
discovery:
enabled: true
registries:
kubernetes:
disabled: false
service:
disabled: true
network:
dnsDomain: ${domain}
podSubnets: ${format("%#v",split(",",podSubnets))}
@@ -100,7 +99,8 @@ cluster:
namespaces:
- kube-system
- ingress-nginx
- local-path-provisioner
- monitoring
- local-path-storage
- local-lvm
runtimeClasses: []
usernames: []
@@ -134,6 +134,7 @@ cluster:
externalCloudProvider:
enabled: true
manifests:
- https://raw.githubusercontent.com/siderolabs/talos-cloud-controller-manager/main/docs/deploy/cloud-controller-manager.yml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/scaleway/deployments/scaleway-cloud-controller-manager.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/scaleway/deployments/kubelet-serving-cert-approver.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/scaleway/deployments/metrics-server.yaml

View File

@@ -7,6 +7,8 @@ machine:
ca:
crt: ${caMachine}
certSANs: []
nodeLabels:
node.kubernetes.io/disktype: ssd
kubelet:
extraArgs:
cloud-provider: external
@@ -71,11 +73,6 @@ cluster:
clusterName: ${clusterName}
discovery:
enabled: true
registries:
kubernetes:
disabled: false
service:
disabled: true
network:
dnsDomain: ${domain}
serviceSubnets: ${format("%#v",split(",",serviceSubnets))}

View File

@@ -3,7 +3,7 @@ terraform {
required_providers {
scaleway = {
source = "scaleway/scaleway"
version = "~> 2.2.9"
version = "~> 2.8.0"
}
}
required_version = ">= 1.0"