Update Talos

This commit is contained in:
Toboshii Nakama
2023-05-22 22:04:45 -05:00
parent 836144c74d
commit fd33defec9
7 changed files with 550 additions and 127 deletions

99
.taskfiles/TalosTasks.yml Normal file
View File

@@ -0,0 +1,99 @@
---
version: "3"
vars:
DOMAIN: "dfw.56k.sh"
# renovate: datasource=github-releases depName=siderolabs/talos
TALOS_VERSION: "v1.4.4"
# renovate: datasource=github-releases depName=budimanjojo/talhelper
TALHELPER_VERSION: "v1.7.3"
# renovate: datasource=github-releases depName=siderolabs/kubelet
KUBERNETES_VERSION: "v1.26.5"
tasks:
init:
desc: Init talosctl and grab kubeconfig (task talos:init)
cmds:
- mkdir config || true
- sudo curl -Lo /usr/local/bin/talosctl https://github.com/siderolabs/talos/releases/download/{{.TALOS_VERSION}}/talosctl-$(uname -s | tr "[:upper:]" "[:lower:]")-$(uname -m) && sudo chmod +x /usr/local/bin/talosctl
- curl -Lo /tmp/talhelper-{{.TALHELPER_VERSION}}.tar.gz https://github.com/budimanjojo/talhelper/releases/download/{{.TALHELPER_VERSION}}/talhelper_$(uname -s | tr "[:upper:]" "[:lower:]")_$(uname -m).tar.gz && tar -xzf /tmp/talhelper-{{.TALHELPER_VERSION}}.tar.gz -C /tmp && sudo mv /tmp/talhelper /usr/local/bin/talhelper
- task: generate
- talosctl --talosconfig=talos/clusterconfig/talosconfig kubeconfig --nodes k8s-control01 $KUBECONFIG
install:cni:
desc: Install CNI and cert approver helm releases (task talos:cni)
cmds:
- kubectl kustomize talos --enable-helm | kubectl apply -f -
upgrade:k8s:
dir: talos
desc: Upgrade Kubernetes to {{ .KUBERNETES_VERSION }} (task talos:upgrade:k8s)
cmds:
- talosctl --nodes k8s-control01 upgrade-k8s --to {{ .KUBERNETES_VERSION }}
upgrade:all:
desc: Upgrade all nodes to Talos version {{ .TALOS_VERSION }} (task talos:upgrade:all)
dir: talos
cmds:
- task: generate
# control-plane
- task: upgrade
vars: {NODE: k8s-control01}
- task: upgrade
vars: {NODE: k8s-control02}
- task: upgrade
vars: {NODE: k8s-control03}
# workers
- task: upgrade
vars: {NODE: k8s-worker01}
- task: upgrade
vars: {NODE: k8s-worker02}
- task: upgrade
vars: {NODE: k8s-worker03}
# Restart nginx, sometimes it has issues...
- kubectl -n networking rollout restart deployment ingress-nginx-controller
generate:
internal: True
desc: Generate Talos machine configurations (task talos:generate)
dir: talos
cmds:
- talhelper genconfig
sources:
- talconfig.yaml
- talenv.sops.yaml
- talenv.yaml
- talsecret.sops.yaml
generates:
- clusterconfig/*.yaml
- clusterconfig/talosconfig
apply:
desc: Apply Talos config to a specific node (task talos:apply NODE=k8s-control01)
dir: talos
cmds:
- task: generate
- talosctl apply-config --nodes {{ .NODE }} --file clusterconfig/cluster01-{{ .NODE }}.{{ .DOMAIN }}.yaml {{ .CLI_ARGS }}
upgrade:
internal: True
desc: Upgrade a single node to Talos version {{ .TALOS_VERSION }} (task talos:upgrade NODE=k8s-control01)
dir: talos
status:
- talosctl version --nodes {{ .NODE }} --short | grep 'Tag.*{{ .TALOS_VERSION }}'
cmds:
- task: wait_for_health
vars: {TIMEOUT: 10m}
- task: apply
vars: {NODE: "{{ .NODE }}", CLI_ARGS: "-m staged"}
- talosctl upgrade --nodes {{ .NODE }} --image ghcr.io/siderolabs/installer:{{ .TALOS_VERSION }}
wait_for_health:
internal: True
desc: Wait for services in cluster to be healthy
cmds:
# Ensure CephCluster is healthy
- kubectl -n rook-ceph wait --for jsonpath='{.status.ceph.health}'='HEALTH_OK' --timeout {{ .TIMEOUT | default "30s" }} cephcluster rook-ceph
# Ensure CloudNative-PG cluster has 3 ready instances
- kubectl -n database wait --for jsonpath='{.status.readyInstances}'='3' --timeout {{ .TIMEOUT | default "30s" }} cluster postgres

View File

@@ -23,5 +23,6 @@
"editor.bracketPairColorization.enabled": true,
"editor.guides.bracketPairs":"active",
"editor.hover.delay": 1500,
"explorer.autoReveal": false
"explorer.autoReveal": false,
"ansible.python.interpreterPath": "/bin/python3"
}

View File

@@ -16,4 +16,5 @@ includes:
precommit: .taskfiles/PreCommitTasks.yml
restore: .taskfiles/RestoreTasks.yml
rook: .taskfiles/RookTasks.yml
talos: .taskfiles/TalosTasks.yml
terraform: .taskfiles/TerraformTasks.yml

View File

@@ -21,6 +21,27 @@ metadata:
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
annotations:
meta.helm.sh/release-name: cilium
meta.helm.sh/release-namespace: kube-system
labels:
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: cilium
name: cilium-config-agent
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
@@ -28,6 +49,7 @@ metadata:
meta.helm.sh/release-namespace: kube-system
labels:
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: cilium
name: cilium
rules:
- apiGroups:
@@ -58,40 +80,71 @@ rules:
- get
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- create
- list
- watch
- update
- get
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies
- ciliumnetworkpolicies/status
- ciliumloadbalancerippools
- ciliumbgppeeringpolicies
- ciliumclusterwideenvoyconfigs
- ciliumclusterwidenetworkpolicies
- ciliumclusterwidenetworkpolicies/status
- ciliumegressgatewaypolicies
- ciliumendpoints
- ciliumendpoints/status
- ciliumnodes
- ciliumnodes/status
- ciliumendpointslices
- ciliumenvoyconfigs
- ciliumidentities
- ciliumlocalredirectpolicies
- ciliumlocalredirectpolicies/status
- ciliumegressnatpolicies
- ciliumendpointslices
- ciliumnetworkpolicies
- ciliumnodes
- ciliumnodeconfigs
verbs:
- '*'
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumidentities
- ciliumendpoints
- ciliumnodes
verbs:
- create
- apiGroups:
- cilium.io
resources:
- ciliumidentities
verbs:
- update
- apiGroups:
- cilium.io
resources:
- ciliumendpoints
verbs:
- delete
- get
- apiGroups:
- cilium.io
resources:
- ciliumnodes
- ciliumnodes/status
verbs:
- get
- update
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies/status
- ciliumclusterwidenetworkpolicies/status
- ciliumendpoints/status
- ciliumendpoints
verbs:
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
@@ -101,6 +154,7 @@ metadata:
meta.helm.sh/release-namespace: kube-system
labels:
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: cilium
name: cilium-operator
rules:
- apiGroups:
@@ -137,23 +191,23 @@ rules:
- apiGroups:
- ""
resources:
- services
- services/status
verbs:
- update
- patch
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services/status
verbs:
- update
- apiGroups:
- ""
resources:
- services
- endpoints
- namespaces
verbs:
- get
- list
@@ -162,26 +216,68 @@ rules:
- cilium.io
resources:
- ciliumnetworkpolicies
- ciliumnetworkpolicies/status
- ciliumnetworkpolicies/finalizers
- ciliumclusterwidenetworkpolicies
- ciliumclusterwidenetworkpolicies/status
- ciliumclusterwidenetworkpolicies/finalizers
- ciliumendpoints
- ciliumendpoints/status
- ciliumendpoints/finalizers
- ciliumnodes
- ciliumnodes/status
- ciliumnodes/finalizers
- ciliumidentities
- ciliumendpointslices
- ciliumidentities/status
- ciliumidentities/finalizers
- ciliumlocalredirectpolicies
- ciliumlocalredirectpolicies/status
- ciliumlocalredirectpolicies/finalizers
verbs:
- '*'
- create
- update
- deletecollection
- patch
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies/status
- ciliumclusterwidenetworkpolicies/status
verbs:
- patch
- update
- apiGroups:
- cilium.io
resources:
- ciliumendpoints
- ciliumidentities
verbs:
- delete
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumidentities
verbs:
- update
- apiGroups:
- cilium.io
resources:
- ciliumnodes
verbs:
- create
- update
- get
- list
- watch
- delete
- apiGroups:
- cilium.io
resources:
- ciliumnodes/status
verbs:
- update
- apiGroups:
- cilium.io
resources:
- ciliumendpointslices
- ciliumenvoyconfigs
verbs:
- create
- update
- get
- list
- watch
- delete
- patch
- apiGroups:
- apiextensions.k8s.io
resources:
@@ -190,8 +286,42 @@ rules:
- create
- get
- list
- update
- watch
- apiGroups:
- apiextensions.k8s.io
resourceNames:
- ciliumloadbalancerippools.cilium.io
- ciliumbgppeeringpolicies.cilium.io
- ciliumclusterwideenvoyconfigs.cilium.io
- ciliumclusterwidenetworkpolicies.cilium.io
- ciliumegressgatewaypolicies.cilium.io
- ciliumendpoints.cilium.io
- ciliumendpointslices.cilium.io
- ciliumenvoyconfigs.cilium.io
- ciliumexternalworkloads.cilium.io
- ciliumidentities.cilium.io
- ciliumlocalredirectpolicies.cilium.io
- ciliumnetworkpolicies.cilium.io
- ciliumnodes.cilium.io
- ciliumnodeconfigs.cilium.io
resources:
- customresourcedefinitions
verbs:
- update
- apiGroups:
- cilium.io
resources:
- ciliumloadbalancerippools
verbs:
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumloadbalancerippools/status
verbs:
- patch
- apiGroups:
- coordination.k8s.io
resources:
@@ -202,6 +332,26 @@ rules:
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
annotations:
meta.helm.sh/release-name: cilium
meta.helm.sh/release-namespace: kube-system
labels:
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: cilium
name: cilium-config-agent
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cilium-config-agent
subjects:
- kind: ServiceAccount
name: cilium
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
@@ -209,6 +359,7 @@ metadata:
meta.helm.sh/release-namespace: kube-system
labels:
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: cilium
name: cilium
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -227,6 +378,7 @@ metadata:
meta.helm.sh/release-namespace: kube-system
labels:
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: cilium
name: cilium-operator
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -240,25 +392,28 @@ subjects:
apiVersion: v1
data:
agent-not-ready-taint-key: node.cilium.io/agent-not-ready
annotate-k8s-node: "true"
arping-refresh-period: 30s
auto-direct-node-routes: "true"
bpf-lb-algorithm: maglev
bpf-lb-algorithm: random
bpf-lb-external-clusterip: "false"
bpf-lb-map-max: "65536"
bpf-lb-mode: dsr
bpf-lb-mode: snat
bpf-lb-sock: "false"
bpf-map-dynamic-size-ratio: "0.0025"
bpf-policy-map-max: "16384"
cgroup-root: /run/cilium/cgroupv2
bpf-root: /sys/fs/bpf
cgroup-root: /sys/fs/cgroup
cilium-endpoint-gc-interval: 5m0s
cluster-id: "1"
cluster-name: cluster01
cni-uninstall: "true"
custom-cni-conf: "false"
debug: "false"
debug-verbose: ""
disable-cnp-status-updates: "true"
disable-endpoint-crd: "false"
enable-auto-protect-node-port-range: "true"
enable-bandwidth-manager: "false"
enable-bgp-control-plane: "false"
enable-bpf-clock-probe: "true"
enable-endpoint-health-checking: "true"
enable-endpoint-routes: "true"
@@ -267,6 +422,7 @@ data:
enable-ipv4: "true"
enable-ipv4-masquerade: "true"
enable-ipv6: "false"
enable-ipv6-big-tcp: "false"
enable-ipv6-masquerade: "true"
enable-k8s-terminating-endpoint: "true"
enable-l2-neigh-discovery: "true"
@@ -274,11 +430,14 @@ data:
enable-local-redirect-policy: "true"
enable-policy: default
enable-remote-node-identity: "true"
enable-session-affinity: "true"
enable-sctp: "false"
enable-svc-source-range-check: "true"
enable-vtep: "false"
enable-well-known-identities: "false"
enable-xt-socket-fallback: "true"
identity-allocation-mode: crd
install-iptables-rules: "true"
identity-gc-interval: 15m0s
identity-heartbeat-timeout: 30m0s
install-no-conntrack-iptables-rules: "false"
ipam: kubernetes
ipv4-native-routing-cidr: 172.22.0.0/16
@@ -291,11 +450,25 @@ data:
nodes-gc-interval: 5m0s
operator-api-serve-addr: 127.0.0.1:9234
preallocate-bpf-maps: "false"
procfs: /host/proc
remove-cilium-node-taints: "true"
set-cilium-is-up-condition: "true"
sidecar-istio-proxy-image: cilium/istio_proxy
skip-cnp-status-startup-clean: "false"
synchronize-k8s-nodes: "true"
tofqdns-dns-reject-response-code: refused
tofqdns-enable-dns-compression: "true"
tofqdns-endpoint-max-ip-per-hostname: "50"
tofqdns-idle-connection-grace-period: 0s
tofqdns-max-deferred-connection-deletes: "10000"
tofqdns-min-ttl: "3600"
tofqdns-proxy-response-max-delay: 100ms
tunnel: disabled
unmanaged-pod-watcher-interval: "15"
vtep-cidr: ""
vtep-endpoint: ""
vtep-mac: ""
vtep-mask: ""
kind: ConfigMap
metadata:
annotations:
@@ -314,6 +487,8 @@ metadata:
meta.helm.sh/release-namespace: kube-system
labels:
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: cilium-operator
app.kubernetes.io/part-of: cilium
io.cilium/app: operator
name: cilium-operator
name: cilium-operator
@@ -333,11 +508,13 @@ spec:
template:
metadata:
annotations:
cilium.io/cilium-configmap-checksum: bfa52dc85e13bebac352d2556de22d1a0945f1e9a627897a6cf7361aeedf1559
cilium.io/cilium-configmap-checksum: 80543994a5c2c9292d8551389666721719e8474a42a3598e24b6c6c230710e63
meta.helm.sh/release-name: cilium
meta.helm.sh/release-namespace: kube-system
labels:
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: cilium-operator
app.kubernetes.io/part-of: cilium
io.cilium/app: operator
name: cilium-operator
spec:
@@ -345,12 +522,11 @@ spec:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: io.cilium/app
operator: In
values:
- operator
matchLabels:
app.kubernetes.io/managed-by: Helm
io.cilium/app: operator
topologyKey: kubernetes.io/hostname
automountServiceAccountToken: true
containers:
- args:
- --config-dir=/tmp/cilium/config-map
@@ -378,7 +554,7 @@ spec:
value: 10.75.40.10
- name: KUBERNETES_SERVICE_PORT
value: "6443"
image: quay.io/cilium/operator-generic:v1.11.6@sha256:9f6063c7bcaede801a39315ec7c166309f6a6783e98665f6693939cf1701bc17
image: quay.io/cilium/operator-generic:v1.13.2@sha256:a1982c0a22297aaac3563e428c330e17668305a41865a842dec53d241c5490ab
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
@@ -390,11 +566,14 @@ spec:
periodSeconds: 10
timeoutSeconds: 3
name: cilium-operator
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /tmp/cilium/config-map
name: cilium-config-path
readOnly: true
hostNetwork: true
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
restartPolicy: Always
serviceAccount: cilium-operator
@@ -414,6 +593,8 @@ metadata:
meta.helm.sh/release-namespace: kube-system
labels:
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: cilium-agent
app.kubernetes.io/part-of: cilium
k8s-app: cilium
name: cilium
namespace: kube-system
@@ -425,36 +606,25 @@ spec:
template:
metadata:
annotations:
cilium.io/cilium-configmap-checksum: bfa52dc85e13bebac352d2556de22d1a0945f1e9a627897a6cf7361aeedf1559
cilium.io/cilium-configmap-checksum: 80543994a5c2c9292d8551389666721719e8474a42a3598e24b6c6c230710e63
container.apparmor.security.beta.kubernetes.io/cilium-agent: unconfined
container.apparmor.security.beta.kubernetes.io/clean-cilium-state: unconfined
meta.helm.sh/release-name: cilium
meta.helm.sh/release-namespace: kube-system
labels:
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: cilium-agent
app.kubernetes.io/part-of: cilium
k8s-app: cilium
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- matchExpressions:
- key: beta.kubernetes.io/os
operator: In
values:
- linux
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values:
- cilium
matchLabels:
k8s-app: cilium
topologyKey: kubernetes.io/hostname
automountServiceAccountToken: true
containers:
- args:
- --config-dir=/tmp/cilium/config-map
@@ -489,15 +659,16 @@ spec:
value: 10.75.40.10
- name: KUBERNETES_SERVICE_PORT
value: "6443"
image: quay.io/cilium/cilium:v1.11.6@sha256:f7f93c26739b6641a3fa3d76b1e1605b15989f25d06625260099e01c8243f54c
image: quay.io/cilium/cilium:v1.13.2@sha256:85708b11d45647c35b9288e0de0706d24a5ce8a378166cadc700f756cc1a38d6
imagePullPolicy: IfNotPresent
lifecycle:
postStart:
exec:
command:
- /cni-install.sh
- --enable-debug=false
- --cni-exclusive=true
- bash
- -c
- |
/cni-install.sh --enable-debug=false --cni-exclusive=true --log-file=/var/run/cilium/cilium-cni.log
preStop:
exec:
command:
@@ -530,7 +701,24 @@ spec:
successThreshold: 1
timeoutSeconds: 5
securityContext:
privileged: true
capabilities:
add:
- CHOWN
- KILL
- NET_ADMIN
- NET_RAW
- IPC_LOCK
- SYS_ADMIN
- SYS_RESOURCE
- DAC_OVERRIDE
- FOWNER
- SETGID
- SETUID
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
startupProbe:
failureThreshold: 105
httpGet:
@@ -543,51 +731,74 @@ spec:
scheme: HTTP
periodSeconds: 2
successThreshold: 1
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /host/proc/sys/net
name: host-proc-sys-net
- mountPath: /host/proc/sys/kernel
name: host-proc-sys-kernel
- mountPath: /sys/fs/bpf
mountPropagation: Bidirectional
mountPropagation: HostToContainer
name: bpf-maps
- mountPath: /sys/fs/cgroup
name: cilium-cgroup
- mountPath: /var/run/cilium
name: cilium-run
- mountPath: /host/opt/cni/bin
name: cni-path
- mountPath: /host/etc/cni/net.d
name: etc-cni-netd
- mountPath: /var/lib/cilium/clustermesh
name: clustermesh-secrets
readOnly: true
- mountPath: /tmp/cilium/config-map
name: cilium-config-path
readOnly: true
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
- mountPath: /tmp
name: tmp
hostNetwork: true
initContainers:
- command:
- sh
- -ec
- |
cp /usr/bin/cilium-mount /hostbin/cilium-mount;
nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT;
rm /hostbin/cilium-mount
- cilium
- build-config
env:
- name: CGROUP_ROOT
value: /run/cilium/cgroupv2
- name: BIN_PATH
value: /opt/cni/bin
image: quay.io/cilium/cilium:v1.11.6@sha256:f7f93c26739b6641a3fa3d76b1e1605b15989f25d06625260099e01c8243f54c
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: KUBERNETES_SERVICE_HOST
value: 10.75.40.10
- name: KUBERNETES_SERVICE_PORT
value: "6443"
image: quay.io/cilium/cilium:v1.13.2@sha256:85708b11d45647c35b9288e0de0706d24a5ce8a378166cadc700f756cc1a38d6
imagePullPolicy: IfNotPresent
name: mount-cgroup
name: config
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /tmp
name: tmp
- args:
- mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf
command:
- /bin/bash
- -c
- --
image: quay.io/cilium/cilium:v1.13.2@sha256:85708b11d45647c35b9288e0de0706d24a5ce8a378166cadc700f756cc1a38d6
imagePullPolicy: IfNotPresent
name: mount-bpf-fs
securityContext:
privileged: true
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
- mountPath: /sys/fs/bpf
mountPropagation: Bidirectional
name: bpf-maps
- command:
- /init-container.sh
env:
@@ -607,7 +818,7 @@ spec:
value: 10.75.40.10
- name: KUBERNETES_SERVICE_PORT
value: "6443"
image: quay.io/cilium/cilium:v1.11.6@sha256:f7f93c26739b6641a3fa3d76b1e1605b15989f25d06625260099e01c8243f54c
image: quay.io/cilium/cilium:v1.13.2@sha256:85708b11d45647c35b9288e0de0706d24a5ce8a378166cadc700f756cc1a38d6
imagePullPolicy: IfNotPresent
name: clean-cilium-state
resources:
@@ -615,15 +826,47 @@ spec:
cpu: 100m
memory: 100Mi
securityContext:
privileged: true
capabilities:
add:
- NET_ADMIN
- SYS_ADMIN
- SYS_RESOURCE
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /sys/fs/bpf
name: bpf-maps
- mountPath: /run/cilium/cgroupv2
- mountPath: /sys/fs/cgroup
mountPropagation: HostToContainer
name: cilium-cgroup
- mountPath: /var/run/cilium
name: cilium-run
- command:
- /install-plugin.sh
image: quay.io/cilium/cilium:v1.13.2@sha256:85708b11d45647c35b9288e0de0706d24a5ce8a378166cadc700f756cc1a38d6
imagePullPolicy: IfNotPresent
name: install-cni-binaries
resources:
requests:
cpu: 100m
memory: 10Mi
securityContext:
capabilities:
drop:
- ALL
seLinuxOptions:
level: s0
type: spc_t
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-path
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-node-critical
restartPolicy: Always
serviceAccount: cilium
@@ -632,6 +875,8 @@ spec:
tolerations:
- operator: Exists
volumes:
- emptyDir: {}
name: tmp
- hostPath:
path: /var/run/cilium
type: DirectoryOrCreate
@@ -641,11 +886,7 @@ spec:
type: DirectoryOrCreate
name: bpf-maps
- hostPath:
path: /proc
type: Directory
name: hostproc
- hostPath:
path: /run/cilium/cgroupv2
path: /sys/fs/cgroup
type: DirectoryOrCreate
name: cilium-cgroup
- hostPath:
@@ -668,9 +909,14 @@ spec:
defaultMode: 256
optional: true
secretName: cilium-clustermesh
- configMap:
name: cilium-config
name: cilium-config-path
- hostPath:
path: /proc/sys/net
type: Directory
name: host-proc-sys-net
- hostPath:
path: /proc/sys/kernel
type: Directory
name: host-proc-sys-kernel
updateStrategy:
rollingUpdate:
maxUnavailable: 2

View File

@@ -4,7 +4,7 @@ kind: Kustomization
helmCharts:
- name: cilium
repo: https://helm.cilium.io/
version: 1.11.6
version: 1.13.2
releaseName: cilium
namespace: kube-system
valuesFile: values.yaml

View File

@@ -10,8 +10,8 @@ ipv4NativeRoutingCIDR: 172.22.0.0/16
k8sServiceHost: 10.75.40.10
k8sServicePort: 6443
loadBalancer:
algorithm: "maglev"
mode: "dsr"
algorithm: random
mode: snat
tunnel: "disabled"
autoDirectNodeRoutes: true
endpointRoutes:
@@ -23,10 +23,85 @@ operator:
containerRuntime:
integration: containerd
securityContext:
privileged: true
privileged: false
capabilities:
# -- Capabilities for the `cilium-agent` container
ciliumAgent:
# Use to set socket permission
- CHOWN
# Used to terminate envoy child process
- KILL
# Used since cilium modifies routing tables, etc...
- NET_ADMIN
# Used since cilium creates raw sockets, etc...
- NET_RAW
# Used since cilium monitor uses mmap
- IPC_LOCK
# Used in iptables. Consider removing once we are iptables-free
# - SYS_MODULE
# We need it for now but might not need it for >= 5.11 specially
# for the 'SYS_RESOURCE'.
# In >= 5.8 there's already BPF and PERMON capabilities
- SYS_ADMIN
# Could be an alternative for the SYS_ADMIN for the RLIMIT_NPROC
- SYS_RESOURCE
# Both PERFMON and BPF requires kernel 5.8, container runtime
# cri-o >= v1.22.0 or containerd >= v1.5.0.
# If available, SYS_ADMIN can be removed.
#- PERFMON
#- BPF
# Allow discretionary access control (e.g. required for package installation)
- DAC_OVERRIDE
# Allow to set Access Control Lists (ACLs) on arbitrary files (e.g. required for package installation)
- FOWNER
# Allow to execute program that changes GID (e.g. required for package installation)
- SETGID
# Allow to execute program that changes UID (e.g. required for package installation)
- SETUID
# -- Capabilities for the `mount-cgroup` init container
mountCgroup:
# Only used for 'mount' cgroup
- SYS_ADMIN
# Used for nsenter
- SYS_CHROOT
- SYS_PTRACE
# -- capabilities for the `apply-sysctl-overwrites` init container
applySysctlOverwrites:
# Required in order to access host's /etc/sysctl.d dir
- SYS_ADMIN
# Used for nsenter
- SYS_CHROOT
- SYS_PTRACE
# -- Capabilities for the `clean-cilium-state` init container
cleanCiliumState:
# Most of the capabilities here are the same ones used in the
# cilium-agent's container because this container can be used to
# uninstall all Cilium resources, and therefore it is likely that
# will need the same capabilities.
# Used since cilium modifies routing tables, etc...
- NET_ADMIN
# Used in iptables. Consider removing once we are iptables-free
# - SYS_MODULE
# We need it for now but might not need it for >= 5.11 specially
# for the 'SYS_RESOURCE'.
# In >= 5.8 there's already BPF and PERMON capabilities
- SYS_ADMIN
# Could be an alternative for the SYS_ADMIN for the RLIMIT_NPROC
- SYS_RESOURCE
# Both PERFMON and BPF requires kernel 5.8, container runtime
# cri-o >= v1.22.0 or containerd >= v1.5.0.
# If available, SYS_ADMIN can be removed.
#- PERFMON
#- BPF
hubble:
enabled: false
bgp:
enabled: false
announce:
loadbalancerIP: true
loadbalancerIP: true
podCIDR: false
cgroup:
autoMount:
enabled: false
hostRoot: /sys/fs/cgroup

View File

@@ -1,7 +1,7 @@
---
clusterName: cluster01
talosVersion: v1.2.6
kubernetesVersion: v1.24.3
talosVersion: v1.3.7
kubernetesVersion: v1.26.5
endpoint: https://cluster01.${domainName}:6443
clusterPodNets:
- 172.22.0.0/16
@@ -72,6 +72,7 @@ controlPlane:
crt: ${k8sAggregatorCert}
key: ${k8sAggregatorCertKey}
apiServer:
disablePodSecurityPolicy: true
admissionControl: []
certSANs:
- ${clusterEndpointIP}
@@ -96,7 +97,7 @@ controlPlane:
extraArgs:
listen-metrics-urls: http://0.0.0.0:2381
extraManifests:
- https://raw.githubusercontent.com/alex1989hu/kubelet-serving-cert-approver/v0.5.1/deploy/ha-install.yaml
- https://raw.githubusercontent.com/alex1989hu/kubelet-serving-cert-approver/v0.6.10/deploy/ha-install.yaml
proxy:
disabled: true
extraArgs:
@@ -121,7 +122,7 @@ controlPlane:
[plugins."io.containerd.grpc.v1.cri"]
enable_unprivileged_ports = true
enable_unprivileged_icmp = true
path: /var/cri/conf.d/allow-unpriv-ports.toml
path: /etc/cri/conf.d/20-customization.part
op: create
install:
extraKernelArgs:
@@ -177,7 +178,7 @@ worker:
[plugins."io.containerd.grpc.v1.cri"]
enable_unprivileged_ports = true
enable_unprivileged_icmp = true
path: /var/cri/conf.d/allow-unpriv-ports.toml
path: /etc/cri/conf.d/20-customization.part
op: create
install:
extraKernelArgs: