Update talos version

This commit is contained in:
Serge Logvinov
2022-07-12 19:39:13 +03:00
parent fe55ef6e08
commit bf709933a3
23 changed files with 713 additions and 159 deletions

View File

@@ -9,7 +9,7 @@ The goal is to create all cloud services from scratch.
|---|---|---|---|---|
| [Azure](azure) | 1.1.0 | CCM,CSI,Autoscaler | many regions, many zones | ✓ |
| [GCP](gcp-zonal) | 0.14.0 | CCM,CSI,Autoscaler | one region, many zones | ✓ |
| [Hetzner](hetzner) | 0.14.0 | CCM,CSI,Autoscaler | many regions | ✗ |
| [Hetzner](hetzner) | 1.1.0 | CCM,CSI,Autoscaler | many regions | ✗ |
| [Openstack](openstack) | 1.1.0 | CCM,CSI | many regions, many zones | ✓ |
| [Oracle](oracle) | 1.0.0 | | many regions, many zones | ✓ |
| [Scaleway](scaleway) | 1.1.0 | CCM,CSI | one region | ✓ |

View File

@@ -13,6 +13,7 @@ create-lb: ## Create load balancer
create-config: ## Genereate talos configs
talosctl gen config --output-dir _cfgs --with-docs=false --with-examples=false talos-k8s-hetzner https://${ENDPOINT}:6443
talosctl --talosconfig _cfgs/talosconfig config endpoint ${ENDPOINT}
create-templates:
@yq ea -P '. as $$item ireduce ({}; . * $$item )' _cfgs/controlplane.yaml templates/controlplane.yaml.tpl > templates/controlplane.yaml
@@ -22,6 +23,8 @@ create-templates:
@echo 'apiDomain: api.cluster.local' >> _cfgs/tfstate.vars
@yq eval '.cluster.network.dnsDomain' _cfgs/controlplane.yaml | awk '{ print "domain: "$$1}' >> _cfgs/tfstate.vars
@yq eval '.cluster.clusterName' _cfgs/controlplane.yaml | awk '{ print "clusterName: "$$1}' >> _cfgs/tfstate.vars
@yq eval '.cluster.id' _cfgs/controlplane.yaml | awk '{ print "clusterID: "$$1}' >> _cfgs/tfstate.vars
@yq eval '.cluster.secret' _cfgs/controlplane.yaml | awk '{ print "clusterSecret: "$$1}'>> _cfgs/tfstate.vars
@yq eval '.machine.token' _cfgs/controlplane.yaml | awk '{ print "tokenMachine: "$$1}' >> _cfgs/tfstate.vars
@yq eval '.machine.ca.crt' _cfgs/controlplane.yaml | awk '{ print "caMachine: "$$1}' >> _cfgs/tfstate.vars
@yq eval '.cluster.token' _cfgs/controlplane.yaml | awk '{ print "token: "$$1}' >> _cfgs/tfstate.vars
@@ -37,7 +40,10 @@ create-infrastructure: ## Bootstrap all nodes
terraform apply
create-kubeconfig: ## Prepare kubeconfig
talosctl --talosconfig _cfgs/talosconfig --nodes 172.16.0.11 kubeconfig
talosctl --talosconfig _cfgs/talosconfig --nodes 172.16.0.11 kubeconfig .
create-deployments:
helm template --namespace=kube-system --version=1.11.0 -f deployments/cilium.yaml cilium cilium/cilium > deployments/cilium_result.yaml
helm template --namespace=kube-system --version=1.11.6 -f deployments/cilium.yaml cilium \
cilium/cilium > deployments/cilium-result.yaml
helm template --namespace=ingress-nginx --version=4.1.4 -f deployments/ingress.yaml ingress-nginx \
ingress-nginx/ingress-nginx > deployments/ingress-result.yaml

View File

@@ -33,6 +33,7 @@ data:
# setting it to "kvstore".
identity-allocation-mode: crd
cilium-endpoint-gc-interval: "5m0s"
nodes-gc-interval: "5m0s"
# Disable the usage of CiliumEndpoint CRD
disable-endpoint-crd: "false"
@@ -141,14 +142,19 @@ data:
enable-bandwidth-manager: "false"
enable-local-redirect-policy: "true"
enable-host-firewall: "true"
# List of devices used to attach bpf_host.o (implements BPF NodePort,
# host-firewall and BPF masquerading)
devices: "eth+"
kube-proxy-replacement: "strict"
kube-proxy-replacement-healthz-bind-address: ""
enable-host-reachable-services: "true"
enable-health-check-nodeport: "true"
node-port-bind-protection: "true"
enable-auto-protect-node-port-range: "true"
enable-session-affinity: "true"
enable-l2-neigh-discovery: "true"
arping-refresh-period: "30s"
k8s-require-ipv4-pod-cidr: "true"
k8s-require-ipv6-pod-cidr: "true"
enable-endpoint-health-checking: "true"
@@ -161,6 +167,11 @@ data:
enable-k8s-endpoint-slice: "true"
cgroup-root: "/sys/fs/cgroup"
enable-k8s-terminating-endpoint: "true"
annotate-k8s-node: "true"
remove-cilium-node-taints: "true"
set-cilium-is-up-condition: "true"
unmanaged-pod-watcher-interval: "15"
agent-not-ready-taint-key: "node.cilium.io/agent-not-ready"
---
# Source: cilium/templates/cilium-agent/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
@@ -189,38 +200,19 @@ rules:
resources:
- namespaces
- services
- nodes
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- pods
- pods/finalizers
verbs:
- get
- list
- watch
- update
- delete
- apiGroups:
- ""
resources:
- endpoints
- nodes
verbs:
- get
- list
- watch
- update
- apiGroups:
- ""
resources:
- nodes
- nodes/status
verbs:
# To annotate the k8s node with Cilium's metadata
- patch
- apiGroups:
- apiextensions.k8s.io
@@ -242,21 +234,15 @@ rules:
resources:
- ciliumnetworkpolicies
- ciliumnetworkpolicies/status
- ciliumnetworkpolicies/finalizers
- ciliumclusterwidenetworkpolicies
- ciliumclusterwidenetworkpolicies/status
- ciliumclusterwidenetworkpolicies/finalizers
- ciliumendpoints
- ciliumendpoints/status
- ciliumendpoints/finalizers
- ciliumnodes
- ciliumnodes/status
- ciliumnodes/finalizers
- ciliumidentities
- ciliumidentities/finalizers
- ciliumlocalredirectpolicies
- ciliumlocalredirectpolicies/status
- ciliumlocalredirectpolicies/finalizers
- ciliumegressnatpolicies
- ciliumendpointslices
verbs:
@@ -271,14 +257,30 @@ rules:
- apiGroups:
- ""
resources:
# to automatically delete [core|kube]dns pods so that are starting to being
# managed by Cilium
- pods
verbs:
- get
- list
- watch
# to automatically delete [core|kube]dns pods so that are starting to being
# managed by Cilium
- delete
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
# To remove node taints
- nodes
# To set NetworkUnavailable false on startup
- nodes/status
verbs:
- patch
- apiGroups:
- discovery.k8s.io
resources:
@@ -434,11 +436,6 @@ spec:
annotations:
prometheus.io/port: "9090"
prometheus.io/scrape: "true"
# This annotation plus the CriticalAddonsOnly toleration makes
# cilium to be a critical pod in the cluster, which ensures cilium
# gets priority scheduling.
# https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/
scheduler.alpha.kubernetes.io/critical-pod: ""
labels:
k8s-app: cilium
spec:
@@ -467,7 +464,7 @@ spec:
topologyKey: kubernetes.io/hostname
containers:
- name: cilium-agent
image: "quay.io/cilium/cilium:v1.11.0@sha256:ea677508010800214b0b5497055f38ed3bff57963fa2399bcb1c69cf9476453a"
image: "quay.io/cilium/cilium:v1.11.6@sha256:f7f93c26739b6641a3fa3d76b1e1605b15989f25d06625260099e01c8243f54c"
imagePullPolicy: IfNotPresent
command:
- cilium-agent
@@ -477,7 +474,7 @@ spec:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9876
port: 9879
scheme: HTTP
httpHeaders:
- name: "brief"
@@ -489,7 +486,7 @@ spec:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9876
port: 9879
scheme: HTTP
httpHeaders:
- name: "brief"
@@ -502,7 +499,7 @@ spec:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9876
port: 9879
scheme: HTTP
httpHeaders:
- name: "brief"
@@ -537,7 +534,7 @@ spec:
key: custom-cni-conf
optional: true
- name: KUBERNETES_SERVICE_HOST
value: "172.16.0.10"
value: "api.cluster.local"
- name: KUBERNETES_SERVICE_PORT
value: "6443"
lifecycle:
@@ -552,6 +549,9 @@ spec:
command:
- /cni-uninstall.sh
resources:
limits:
cpu: 2
memory: 2Gi
requests:
cpu: 100m
memory: 128Mi
@@ -594,7 +594,7 @@ spec:
hostNetwork: true
initContainers:
- name: clean-cilium-state
image: "quay.io/cilium/cilium:v1.11.0@sha256:ea677508010800214b0b5497055f38ed3bff57963fa2399bcb1c69cf9476453a"
image: "quay.io/cilium/cilium:v1.11.6@sha256:f7f93c26739b6641a3fa3d76b1e1605b15989f25d06625260099e01c8243f54c"
imagePullPolicy: IfNotPresent
command:
- /init-container.sh
@@ -612,7 +612,7 @@ spec:
key: clean-cilium-bpf-state
optional: true
- name: KUBERNETES_SERVICE_HOST
value: "172.16.0.10"
value: "api.cluster.local"
- name: KUBERNETES_SERVICE_PORT
value: "6443"
securityContext:
@@ -727,7 +727,7 @@ spec:
topologyKey: kubernetes.io/hostname
containers:
- name: cilium-operator
image: quay.io/cilium/operator-generic:v1.11.0@sha256:b522279577d0d5f1ad7cadaacb7321d1b172d8ae8c8bc816e503c897b420cfe3
image: quay.io/cilium/operator-generic:v1.11.6@sha256:9f6063c7bcaede801a39315ec7c166309f6a6783e98665f6693939cf1701bc17
imagePullPolicy: IfNotPresent
command:
- cilium-operator-generic
@@ -752,7 +752,7 @@ spec:
name: cilium-config
optional: true
- name: KUBERNETES_SERVICE_HOST
value: "172.16.0.10"
value: "api.cluster.local"
- name: KUBERNETES_SERVICE_PORT
value: "6443"
livenessProbe:

View File

@@ -1,11 +1,8 @@
---
k8sServiceHost: "172.16.0.10"
k8sServiceHost: "api.cluster.local"
k8sServicePort: "6443"
agent:
enabled: true
operator:
enabled: true
replicas: 1
@@ -17,10 +14,11 @@ kubeProxyReplacement: strict
enableK8sEndpointSlice: true
localRedirectPolicy: true
healthChecking: true
tunnel: "vxlan"
autoDirectNodeRoutes: false
devices: [eth+]
healthChecking: true
cni:
install: true
@@ -38,11 +36,11 @@ ipv4:
ipv6:
enabled: true
hostServices:
enabled: false
enabled: true
hostPort:
enabled: true
nodePort:
enabled: false
enabled: true
externalIPs:
enabled: true
hostFirewall:
@@ -60,9 +58,9 @@ cgroup:
hostRoot: /sys/fs/cgroup
resources:
# limits:
# cpu: 4000m
# memory: 4Gi
limits:
cpu: 2
memory: 2Gi
requests:
cpu: 100m
memory: 128Mi

View File

@@ -18,7 +18,6 @@ data:
hosts: |
# static hosts
169.254.2.53 dns.local
fd00::169:254:2:53 dns.local
Corefile.local: |
(empty) {
@@ -27,7 +26,7 @@ data:
.:53 {
errors
bind 169.254.2.53 fd00::169:254:2:53
bind 169.254.2.53
health 127.0.0.1:8091 {
lameduck 5s
@@ -39,7 +38,7 @@ data:
}
kubernetes cluster.local in-addr.arpa ip6.arpa {
endpoint https://172.16.0.10:6443
endpoint https://api.cluster.local:6443
kubeconfig /etc/coredns/kubeconfig.conf coredns
pods insecure
ttl 60
@@ -62,7 +61,7 @@ data:
clusters:
- cluster:
certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
server: https://172.16.0.10:6443
server: https://api.cluster.local:6443
name: default
contexts:
- context:
@@ -106,15 +105,19 @@ spec:
serviceAccountName: coredns
enableServiceLinks: false
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node.cloudprovider.kubernetes.io/uninitialized
effect: NoSchedule
value: "true"
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Exists
- effect: NoSchedule
key: node.cloudprovider.kubernetes.io/uninitialized
operator: Exists
hostNetwork: true
containers:
- name: coredns
image: coredns/coredns:1.8.6
image: coredns/coredns:1.9.2
imagePullPolicy: IfNotPresent
resources:
limits:

View File

@@ -62,7 +62,7 @@ spec:
- key: node-role.kubernetes.io/control-plane
operator: Exists
containers:
- image: hetznercloud/hcloud-cloud-controller-manager:v1.12.0
- image: hetznercloud/hcloud-cloud-controller-manager:v1.12.1
name: hcloud-cloud-controller-manager
command:
- "/bin/hcloud-cloud-controller-manager"

View File

@@ -12,10 +12,9 @@ spec:
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
namespace: kube-system
name: hcloud-volumes
annotations:
storageclass.kubernetes.io/is-default-class: "true"
storageclass.kubernetes.io/is-default-class: "false"
provisioner: csi.hetzner.cloud
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true

View File

@@ -0,0 +1,11 @@
apiVersion: v1
kind: Namespace
metadata:
name: ingress-nginx
labels:
pod-security.kubernetes.io/enforce: baseline
pod-security.kubernetes.io/enforce-version: latest
pod-security.kubernetes.io/audit: baseline
pod-security.kubernetes.io/audit-version: latest
pod-security.kubernetes.io/warn: baseline
pod-security.kubernetes.io/warn-version: latest

View File

@@ -0,0 +1,440 @@
---
# Source: ingress-nginx/templates/controller-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.4
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
automountServiceAccountToken: true
---
# Source: ingress-nginx/templates/controller-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.4
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
data:
allow-snippet-annotations: "true"
client-body-timeout: "30"
client-header-timeout: "30"
enable-access-log-for-default-backend: "true"
error-log-level: "error"
hsts: "true"
hsts-include-subdomains: "true"
hsts-max-age: "31536000"
hsts-preload: "true"
http-redirect-code: "301"
limit-req-status-code: "429"
log-format-escape-json: "true"
log-format-upstream: "{\"ip\":\"$remote_addr\", \"ssl\":\"$ssl_protocol\", \"method\":\"$request_method\", \"proto\":\"$scheme\", \"host\":\"$host\", \"uri\":\"$request_uri\", \"status\":$status, \"size\":$bytes_sent, \"agent\":\"$http_user_agent\", \"referer\":\"$http_referer\", \"namespace\":\"$namespace\"}"
proxy-connect-timeout: "10"
proxy-headers-hash-bucket-size: "128"
proxy-hide-headers: "strict-transport-security"
proxy-read-timeout: "60"
proxy-real-ip-cidr: "173.245.48.0/20,103.21.244.0/22,103.22.200.0/22,103.31.4.0/22,141.101.64.0/18,108.162.192.0/18,190.93.240.0/20,188.114.96.0/20,197.234.240.0/22,198.41.128.0/17,162.158.0.0/15,172.64.0.0/13,131.0.72.0/22,104.16.0.0/13,104.24.0.0/14,172.16.0.0/12"
proxy-send-timeout: "60"
server-name-hash-bucket-size: "64"
server-name-hash-max-size: "512"
server-tokens: "false"
ssl-protocols: "TLSv1.3"
upstream-keepalive-connections: "32"
use-forwarded-headers: "true"
use-geoip: "false"
use-geoip2: "false"
use-gzip: "true"
worker-cpu-affinity: "auto"
worker-processes: "auto"
---
# Source: ingress-nginx/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.4
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
- namespaces
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
---
# Source: ingress-nginx/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.4
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: "ingress-nginx"
---
# Source: ingress-nginx/templates/controller-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.4
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
- ingress-controller-leader
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
---
# Source: ingress-nginx/templates/controller-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.4
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: "ingress-nginx"
---
# Source: ingress-nginx/templates/controller-service.yaml
apiVersion: v1
kind: Service
metadata:
annotations:
labels:
helm.sh/chart: ingress-nginx-4.1.4
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
type: ClusterIP
clusterIP: None
ipFamilyPolicy: RequireDualStack
ipFamilies:
- IPv4
- IPv6
ports:
- name: http
port: 80
protocol: TCP
targetPort: http
appProtocol: http
- name: https
port: 443
protocol: TCP
targetPort: https
appProtocol: https
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.4
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
selector:
matchLabels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
revisionHistoryLimit: 2
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
minReadySeconds: 15
template:
metadata:
annotations:
prometheus.io/port: "10254"
prometheus.io/scrape: "true"
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
spec:
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: controller
image: "registry.k8s.io/ingress-nginx/controller:v1.2.1@sha256:5516d103a9c2ecc4f026efbd4b40662ce22dc1f824fb129ed121460aaa5c47f8"
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
args:
- /nginx-ingress-controller
- --election-id=ingress-controller-leader
- --controller-class=k8s.io/ingress-nginx
- --ingress-class=nginx
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
runAsUser: 101
allowPrivilegeEscalation: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LD_PRELOAD
value: /usr/local/lib/libmimalloc.so
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 15
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 1
ports:
- name: http
containerPort: 80
protocol: TCP
- name: https
containerPort: 443
protocol: TCP
resources:
limits:
cpu: 1
memory: 1Gi
requests:
cpu: 100m
memory: 128Mi
hostNetwork: true
nodeSelector:
kubernetes.io/os: linux
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: project.io/node-pool
operator: In
values:
- web
serviceAccountName: ingress-nginx
terminationGracePeriodSeconds: 300
---
# Source: ingress-nginx/templates/controller-ingressclass.yaml
# We don't support namespaced ingressClass yet
# So a ClusterRole and a ClusterRoleBinding is required
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.4
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: nginx
spec:
controller: k8s.io/ingress-nginx

View File

@@ -0,0 +1,116 @@
controller:
kind: DaemonSet
hostNetwork: true
hostPort:
enabled: false
ports:
http: 80
https: 443
dnsPolicy: ClusterFirstWithHostNet
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
publishService:
enabled: false
config:
worker-processes: "auto"
worker-cpu-affinity: "auto"
error-log-level: "error"
server-tokens: "false"
http-redirect-code: "301"
use-gzip: "true"
use-geoip: "false"
use-geoip2: "false"
use-forwarded-headers: "true"
# curl https://www.cloudflare.com/ips-v4 2>/dev/null | tr '\n' ','
proxy-real-ip-cidr: "173.245.48.0/20,103.21.244.0/22,103.22.200.0/22,103.31.4.0/22,141.101.64.0/18,108.162.192.0/18,190.93.240.0/20,188.114.96.0/20,197.234.240.0/22,198.41.128.0/17,162.158.0.0/15,172.64.0.0/13,131.0.72.0/22,104.16.0.0/13,104.24.0.0/14,172.16.0.0/12"
enable-access-log-for-default-backend: "true"
log-format-escape-json: "true"
log-format-upstream: '{"ip":"$remote_addr", "ssl":"$ssl_protocol", "method":"$request_method", "proto":"$scheme", "host":"$host", "uri":"$request_uri", "status":$status, "size":$bytes_sent, "agent":"$http_user_agent", "referer":"$http_referer", "namespace":"$namespace"}'
upstream-keepalive-connections: "32"
proxy-connect-timeout: "10"
proxy-read-timeout: "60"
proxy-send-timeout: "60"
ssl-protocols: "TLSv1.3"
hsts: "true"
hsts-max-age: "31536000"
hsts-include-subdomains: "true"
hsts-preload: "true"
proxy-hide-headers: "strict-transport-security"
proxy-headers-hash-bucket-size: "128"
server-name-hash-bucket-size: "64"
server-name-hash-max-size: "512"
limit-req-status-code: "429"
client-header-timeout: "30"
client-body-timeout: "30"
minReadySeconds: 15
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "10254"
extraEnvs:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
livenessProbe:
initialDelaySeconds: 15
periodSeconds: 30
readinessProbe:
periodSeconds: 30
resources:
limits:
cpu: 1
memory: 1Gi
requests:
cpu: 100m
memory: 128Mi
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: project.io/node-pool
operator: In
values:
- web
service:
enabled: true
type: ClusterIP
clusterIP: None
ipFamilyPolicy: "RequireDualStack"
ipFamilies:
- IPv4
- IPv6
admissionWebhooks:
enabled: false
metrics:
enabled: false
revisionHistoryLimit: 2
defaultBackend:
enabled: false

View File

@@ -176,9 +176,9 @@ spec:
tolerations:
- key: "node.cloudprovider.kubernetes.io/uninitialized"
value: "true"
effect: "NoSchedule"
effect: NoSchedule
- key: "CriticalAddonsOnly"
operator: "Exists"
operator: Exists
- key: "node-role.kubernetes.io/master"
effect: NoSchedule
affinity:
@@ -200,7 +200,7 @@ spec:
fieldRef:
fieldPath: metadata.namespace
image: ghcr.io/alex1989hu/kubelet-serving-cert-approver:main
imagePullPolicy: Always
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /healthz
@@ -223,7 +223,7 @@ spec:
memory: 32Mi
requests:
cpu: 10m
memory: 12Mi
memory: 16Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
@@ -245,46 +245,6 @@ spec:
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Exists
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: runtime/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: runtime/default
labels:
app.kubernetes.io/instance: kubelet-serving-cert-approver
app.kubernetes.io/name: kubelet-serving-cert-approver
name: kubelet-serving-cert-approver
namespace: kubelet-serving-cert-approver
spec:
allowPrivilegeEscalation: false
forbiddenSysctls:
- '*'
fsGroup:
ranges:
- max: 65534
min: 65534
rule: MustRunAs
hostIPC: false
hostNetwork: false
hostPID: false
privileged: false
readOnlyRootFilesystem: true
requiredDropCapabilities:
- ALL
runAsUser:
ranges:
- max: 65534
min: 65534
rule: MustRunAs
seLinux:
rule: RunAsAny
supplementalGroups:
ranges:
- max: 65534
min: 65534
rule: MustRunAs
volumes:
- downwardAPI
- secret
- effect: NoSchedule
key: node.cloudprovider.kubernetes.io/uninitialized
operator: Exists

View File

@@ -60,6 +60,8 @@ spec:
app: local-path-provisioner
spec:
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
- key: "node-role.kubernetes.io/master"
effect: NoSchedule
serviceAccountName: local-path-provisioner-service-account
@@ -106,12 +108,12 @@ metadata:
data:
config.json: |-
{
"nodePathMap":[
{
"node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
"paths":["/var/local-path-provisioner"]
}
]
"nodePathMap":[
{
"node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
"paths":["/var/local-path-provisioner"]
}
]
}
setup: |-
#!/bin/sh

View File

@@ -127,6 +127,9 @@ spec:
labels:
k8s-app: metrics-server
spec:
nodeSelector:
kubernetes.io/os: linux
node-role.kubernetes.io/master: ""
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
@@ -172,8 +175,6 @@ spec:
volumeMounts:
- mountPath: /tmp
name: tmp-dir
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
volumes:

View File

@@ -18,7 +18,6 @@ resource "hcloud_server" "controlplane" {
user_data = templatefile("${path.module}/templates/controlplane.yaml",
merge(var.kubernetes, {
name = "master-${count.index + 1}"
type = "controlplane"
ipv4_vip = local.ipv4_vip
ipv4_local = cidrhost(hcloud_network_subnet.core.ip_range, 11 + count.index)
lbv4_local = local.lbv4_local

View File

@@ -10,13 +10,13 @@ module "web" {
vm_name = "web-${each.key}-"
vm_items = lookup(each.value, "web_count", 0)
vm_type = lookup(each.value, "web_instance_type", "cx11")
vm_type = lookup(each.value, "web_type", "cx11")
vm_image = data.hcloud_image.talos.id
vm_ip_start = (3 + index(var.regions, each.key)) * 10
vm_ip_start = (3 + try(index(var.regions, each.key), 0)) * 10
vm_security_group = [hcloud_firewall.web.id]
vm_params = merge(var.kubernetes, {
lbv4 = local.ipv4_vip
labels = "node.kubernetes.io/role=web,node.kubernetes.io/disktype=ssd,topology.kubernetes.io/region=${each.key}"
labels = "project.io/node-pool=web,node.kubernetes.io/disktype=ssd,topology.kubernetes.io/region=${each.key}"
})
}

View File

@@ -10,13 +10,13 @@ module "worker" {
vm_name = "worker-${each.key}-"
vm_items = lookup(each.value, "worker_count", 0)
vm_type = lookup(each.value, "worker_instance_type", "cx11")
vm_type = lookup(each.value, "worker_type", "cx11")
vm_image = data.hcloud_image.talos.id
vm_ip_start = (6 + index(var.regions, each.key)) * 10
vm_ip_start = (6 + try(index(var.regions, each.key), 0)) * 10
vm_security_group = [hcloud_firewall.worker.id]
vm_params = merge(var.kubernetes, {
lbv4 = local.ipv4_vip
labels = "node.kubernetes.io/role=worker,node.kubernetes.io/disktype=ssd,topology.kubernetes.io/region=${each.key}"
labels = "project.io/node-pool=web,node.kubernetes.io/disktype=ssd,topology.kubernetes.io/region=${each.key}"
})
}

View File

@@ -24,13 +24,13 @@ machine:
- interface: dummy0
addresses:
- 169.254.2.53/32
- fd00::169:254:2:53/128
extraHostEntries:
- ip: ${lbv4}
aliases:
- ${apiDomain}
sysctls:
net.core.somaxconn: 65535
net.core.netdev_max_backlog: 4096
net.ipv4.tcp_keepalive_time: 600
net.ipv4.tcp_keepalive_intvl: 60
fs.inotify.max_user_instances: 256
install:
wipe: false
systemDiskEncryption:
@@ -53,8 +53,10 @@ machine:
endpoints:
- https://registry-1.docker.io
cluster:
id: ${clusterID}
secret: ${clusterSecret}
controlPlane:
endpoint: https://${lbv4}:6443
endpoint: https://${apiDomain}:6443
clusterName: ${clusterName}
network:
dnsDomain: ${domain}

View File

@@ -2,7 +2,7 @@ version: v1alpha1
debug: false
persist: true
machine:
type: ${type}
type: controlplane
certSANs:
- "${lbv4}"
- "${lbv6}"
@@ -38,7 +38,13 @@ machine:
- interface: dummy0
addresses:
- 169.254.2.53/32
- fd00::169:254:2:53/128
kubespan:
enabled: false
allowDownPeerBypass: true
extraHostEntries:
- ip: ${ipv4_vip}
aliases:
- ${apiDomain}
install:
wipe: false
sysctls:
@@ -59,8 +65,16 @@ machine:
- no_read_workqueue
- no_write_workqueue
cluster:
id: ${clusterID}
secret: ${clusterSecret}
controlPlane:
endpoint: https://${ipv4_vip}:6443
endpoint: https://${apiDomain}:6443
clusterName: ${clusterName}
discovery:
enabled: true
registries:
service:
disabled: true
network:
dnsDomain: ${domain}
podSubnets: ${format("%#v",split(",",podSubnets))}
@@ -68,7 +82,7 @@ cluster:
cni:
name: custom
urls:
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/hetzner/deployments/cilium_result.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/hetzner/deployments/cilium-result.yaml
proxy:
disabled: true
mode: ipvs
@@ -85,7 +99,8 @@ cluster:
node-cidr-mask-size-ipv4: 24
node-cidr-mask-size-ipv6: 112
scheduler: {}
etcd: {}
etcd:
subnet: ${nodeSubnets}
inlineManifests:
- name: hcloud-secret
contents: |-
@@ -101,8 +116,11 @@ cluster:
externalCloudProvider:
enabled: true
manifests:
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/hetzner/deployments/coredns-local.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/hetzner/deployments/hcloud-cloud-controller-manager.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/hetzner/deployments/hcloud-csi.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/hetzner/deployments/kubelet-serving-cert-approver.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/hetzner/deployments/metrics-server.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/hetzner/deployments/local-path-storage.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/hetzner/deployments/coredns-local.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/hetzner/deployments/ingress-ns.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/hetzner/deployments/ingress-result.yaml

View File

@@ -2,7 +2,6 @@ data:
hosts: |
# static hosts
169.254.2.53 dns.local
fd00::169:254:2:53 dns.local
# terraform
%{ for node in masters ~}
${format("%-24s",node.ipv4_address)} ${node.name}

View File

@@ -54,22 +54,22 @@ variable "instances" {
type = map(any)
default = {
"nbg1" = {
web_count = 0,
web_instance_type = "cx11",
worker_count = 0,
worker_instance_type = "cx11",
web_count = 0,
web_type = "cx11",
worker_count = 0,
worker_type = "cx11",
},
"fsn1" = {
web_count = 0,
web_instance_type = "cx11",
worker_count = 0,
worker_instance_type = "cx11",
web_count = 0,
web_type = "cx11",
worker_count = 0,
worker_type = "cx11",
}
"hel1" = {
web_count = 0,
web_instance_type = "cx11",
worker_count = 0,
worker_instance_type = "cx11",
web_count = 0,
web_type = "cx11",
worker_count = 0,
worker_type = "cx11",
}
}
}

View File

@@ -5,5 +5,5 @@ terraform {
version = "~> 1.32.2"
}
}
required_version = ">= 1.0"
required_version = ">= 1.2"
}

View File

@@ -2,7 +2,7 @@
packer {
required_plugins {
hcloud = {
version = ">= 1.0.0"
version = ">= 1.0.4"
source = "github.com/hashicorp/hcloud"
}
}

View File

@@ -7,7 +7,7 @@ variable "hcloud_token" {
variable "talos_version" {
type = string
default = "v1.0.3"
default = "v1.2.0"
}
locals {