update packages

This commit is contained in:
Serge Logvinov
2022-11-21 18:33:35 +02:00
parent 2b937b7a31
commit 7e3be623a0
15 changed files with 336 additions and 188 deletions

View File

@@ -37,3 +37,7 @@ az ad sp create-for-rbac --name "kubernetes-csi" --role kubernetes-csi --scopes=
* [metrics-server](https://github.com/kubernetes-sigs/metrics-server) 0.5.0
* [rancher.io/local-path](https://github.com/rancher/local-path-provisioner) 0.0.19
* [ingress-nginx](https://kubernetes.github.io/ingress-nginx/) 4.1.2
TODO:
* ipv6 route

View File

@@ -3,3 +3,19 @@ provider "azurerm" {
features {}
subscription_id = local.subscription_id
}
# data "azurerm_virtual_machine_size" "size" {
# name = "Standard_D2pls_v5"
# location = "westeurope"
# }
# resource "azurerm_linux_virtual_machine_scale_set" "worker" {
# source_image_reference {
# location = "westeurope"
# publisher = "Canonical"
# offer = "0001-com-ubuntu-server-jammy"
# sku = "22_04-lts-${data.azurerm_virtual_machine_size.size.architecture == "Arm64" ? "arm64" : "gen2"}"
# version = "latest"
# }
# }

View File

@@ -62,3 +62,10 @@
# tags = merge(var.tags, { os = "talos" })
# }
# data "azurerm_platform_image" "example" {
# location = "westeurope"
# publisher = "Canonical"
# offer = "0001-com-ubuntu-server-focal"
# sku = "20_04-lts-arm64"
# }

View File

@@ -56,7 +56,7 @@ create-kubeconfig: ## Prepare kubeconfig
kubectl --kubeconfig=kubeconfig config set-context --current --namespace=kube-system
create-deployments:
helm template --namespace=kube-system --version=1.11.6 -f deployments/cilium.yaml cilium \
helm template --namespace=kube-system --version=1.12.3 -f deployments/cilium.yaml cilium \
cilium/cilium > deployments/cilium-result.yaml
helm template --namespace=ingress-nginx --version=4.1.4 -f deployments/ingress.yaml ingress-nginx \
helm template --namespace=ingress-nginx --version=4.4.0 -f deployments/ingress.yaml ingress-nginx \
ingress-nginx/ingress-nginx > deployments/ingress-result.yaml

View File

@@ -49,10 +49,10 @@ data:
# "cilium-metrics-config" ConfigMap
# NOTE that this will open the port on ALL nodes where Cilium pods are
# scheduled.
prometheus-serve-addr: ":9090"
# Port to expose Envoy metrics (e.g. "9095"). Envoy metrics listener will be disabled if this
prometheus-serve-addr: ":9962"
# Port to expose Envoy metrics (e.g. "9964"). Envoy metrics listener will be disabled if this
# field is not set.
proxy-prometheus-port: "9095"
proxy-prometheus-port: "9964"
# Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4
# address.
@@ -119,14 +119,14 @@ data:
cluster-name: default
# Unique ID of the cluster. Must be unique across all conneted clusters and
# in the range of 1 and 255. Only relevant when building a mesh of clusters.
cluster-id: ""
cluster-id: "0"
# Encapsulation mode for communication between nodes
# Possible values:
# - disabled
# - vxlan (default)
# - geneve
tunnel: vxlan
tunnel: "vxlan"
# Enables L7 proxy for L7 policy enforcement and visibility
enable-l7-proxy: "true"
@@ -139,20 +139,20 @@ data:
install-no-conntrack-iptables-rules: "false"
auto-direct-node-routes: "false"
enable-bandwidth-manager: "false"
enable-local-redirect-policy: "true"
enable-host-firewall: "true"
# List of devices used to attach bpf_host.o (implements BPF NodePort,
# host-firewall and BPF masquerading)
devices: "eth+"
kube-proxy-replacement: "strict"
kube-proxy-replacement: "strict"
kube-proxy-replacement-healthz-bind-address: ""
enable-host-reachable-services: "true"
bpf-lb-sock: "false"
host-reachable-services-protos:
enable-health-check-nodeport: "true"
node-port-bind-protection: "true"
enable-auto-protect-node-port-range: "true"
enable-session-affinity: "true"
enable-svc-source-range-check: "true"
enable-l2-neigh-discovery: "true"
arping-refresh-period: "30s"
k8s-require-ipv4-pod-cidr: "true"
@@ -161,16 +161,30 @@ data:
enable-health-checking: "true"
enable-well-known-identities: "false"
enable-remote-node-identity: "true"
synchronize-k8s-nodes: "true"
operator-api-serve-addr: "127.0.0.1:9234"
ipam: "kubernetes"
disable-cnp-status-updates: "true"
enable-vtep: "false"
vtep-endpoint: ""
vtep-cidr: ""
vtep-mask: ""
vtep-mac: ""
enable-k8s-endpoint-slice: "true"
enable-bgp-control-plane: "false"
bpf-root: "/sys/fs/bpf"
cgroup-root: "/sys/fs/cgroup"
enable-k8s-terminating-endpoint: "true"
annotate-k8s-node: "true"
remove-cilium-node-taints: "true"
set-cilium-is-up-condition: "true"
unmanaged-pod-watcher-interval: "15"
tofqdns-dns-reject-response-code: "refused"
tofqdns-enable-dns-compression: "true"
tofqdns-endpoint-max-ip-per-hostname: "50"
tofqdns-idle-connection-grace-period: "0s"
tofqdns-max-deferred-connection-deletes: "10000"
tofqdns-min-ttl: "3600"
tofqdns-proxy-response-max-delay: "100ms"
agent-not-ready-taint-key: "node.cilium.io/agent-not-ready"
---
# Source: cilium/templates/cilium-agent/clusterrole.yaml
@@ -207,24 +221,13 @@ rules:
- get
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
# To annotate the k8s node with Cilium's metadata
- patch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
# Deprecated for removal in v1.10
- create
- list
- watch
- update
# This is used when validating policies in preflight. This will need to stay
# until we figure out how to avoid "get" inside the preflight, and then
# should be removed ideally.
@@ -232,21 +235,61 @@ rules:
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies
- ciliumnetworkpolicies/status
- ciliumbgploadbalancerippools
- ciliumbgppeeringpolicies
- ciliumclusterwideenvoyconfigs
- ciliumclusterwidenetworkpolicies
- ciliumclusterwidenetworkpolicies/status
- ciliumegressgatewaypolicies
- ciliumegressnatpolicies
- ciliumendpoints
- ciliumendpoints/status
- ciliumnodes
- ciliumnodes/status
- ciliumendpointslices
- ciliumenvoyconfigs
- ciliumidentities
- ciliumlocalredirectpolicies
- ciliumlocalredirectpolicies/status
- ciliumegressnatpolicies
- ciliumendpointslices
- ciliumnetworkpolicies
- ciliumnodes
verbs:
- '*'
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumidentities
- ciliumendpoints
- ciliumnodes
verbs:
- create
- apiGroups:
- cilium.io
# To synchronize garbage collection of such resources
resources:
- ciliumidentities
verbs:
- update
- apiGroups:
- cilium.io
resources:
- ciliumendpoints
verbs:
- delete
- get
- apiGroups:
- cilium.io
resources:
- ciliumnodes
- ciliumnodes/status
verbs:
- get
- update
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies/status
- ciliumclusterwidenetworkpolicies/status
- ciliumendpoints/status
- ciliumendpoints
verbs:
- patch
---
# Source: cilium/templates/cilium-operator/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
@@ -289,14 +332,6 @@ rules:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
@@ -304,14 +339,21 @@ rules:
- services/status
verbs:
- update
- apiGroups:
- ""
resources:
# to check apiserver connectivity
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
# to perform the translation of a CNP that contains `ToGroup` to its endpoints
- services
- endpoints
# to check apiserver connectivity
- namespaces
verbs:
- get
- list
@@ -320,26 +362,73 @@ rules:
- cilium.io
resources:
- ciliumnetworkpolicies
- ciliumnetworkpolicies/status
- ciliumnetworkpolicies/finalizers
- ciliumclusterwidenetworkpolicies
- ciliumclusterwidenetworkpolicies/status
- ciliumclusterwidenetworkpolicies/finalizers
- ciliumendpoints
- ciliumendpoints/status
- ciliumendpoints/finalizers
- ciliumnodes
- ciliumnodes/status
- ciliumnodes/finalizers
- ciliumidentities
- ciliumendpointslices
- ciliumidentities/status
- ciliumidentities/finalizers
- ciliumlocalredirectpolicies
- ciliumlocalredirectpolicies/status
- ciliumlocalredirectpolicies/finalizers
verbs:
- '*'
# Create auto-generated CNPs and CCNPs from Policies that have 'toGroups'
- create
- update
- deletecollection
# To update the status of the CNPs and CCNPs
- patch
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies/status
- ciliumclusterwidenetworkpolicies/status
verbs:
# Update the auto-generated CNPs and CCNPs status.
- patch
- update
- apiGroups:
- cilium.io
resources:
- ciliumendpoints
- ciliumidentities
verbs:
# To perform garbage collection of such resources
- delete
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumidentities
verbs:
# To synchronize garbage collection of such resources
- update
- apiGroups:
- cilium.io
resources:
- ciliumnodes
verbs:
- create
- update
- get
- list
- watch
# To perform CiliumNode garbage collector
- delete
- apiGroups:
- cilium.io
resources:
- ciliumnodes/status
verbs:
- update
- apiGroups:
- cilium.io
resources:
- ciliumendpointslices
- ciliumenvoyconfigs
verbs:
- create
- update
- get
- list
- watch
- delete
- apiGroups:
- apiextensions.k8s.io
resources:
@@ -348,8 +437,28 @@ rules:
- create
- get
- list
- update
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- update
resourceNames:
- ciliumbgploadbalancerippools.cilium.io
- ciliumbgppeeringpolicies.cilium.io
- ciliumclusterwideenvoyconfigs.cilium.io
- ciliumclusterwidenetworkpolicies.cilium.io
- ciliumegressgatewaypolicies.cilium.io
- ciliumegressnatpolicies.cilium.io
- ciliumendpoints.cilium.io
- ciliumendpointslices.cilium.io
- ciliumenvoyconfigs.cilium.io
- ciliumexternalworkloads.cilium.io
- ciliumidentities.cilium.io
- ciliumlocalredirectpolicies.cilium.io
- ciliumnetworkpolicies.cilium.io
- ciliumnodes.cilium.io
# For cilium-operator running in HA mode.
#
# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election
@@ -401,7 +510,7 @@ metadata:
namespace: kube-system
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9095"
prometheus.io/port: "9964"
labels:
k8s-app: cilium
spec:
@@ -411,7 +520,7 @@ spec:
k8s-app: cilium
ports:
- name: envoy-metrics
port: 9095
port: 9964
protocol: TCP
targetPort: envoy-metrics
---
@@ -434,37 +543,14 @@ spec:
template:
metadata:
annotations:
prometheus.io/port: "9090"
prometheus.io/port: "9962"
prometheus.io/scrape: "true"
labels:
k8s-app: cilium
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- matchExpressions:
- key: beta.kubernetes.io/os
operator: In
values:
- linux
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values:
- cilium
topologyKey: kubernetes.io/hostname
containers:
- name: cilium-agent
image: "quay.io/cilium/cilium:v1.11.6@sha256:f7f93c26739b6641a3fa3d76b1e1605b15989f25d06625260099e01c8243f54c"
image: "quay.io/cilium/cilium:v1.12.3@sha256:30de50c4dc0a1e1077e9e7917a54d5cab253058b3f779822aec00f5c817ca826"
imagePullPolicy: IfNotPresent
command:
- cilium-agent
@@ -544,6 +630,7 @@ spec:
- "/cni-install.sh"
- "--enable-debug=false"
- "--cni-exclusive=true"
- "--log-file=/var/run/cilium/cilium-cni.log"
preStop:
exec:
command:
@@ -556,16 +643,21 @@ spec:
cpu: 100m
memory: 128Mi
ports:
- name: peer-service
containerPort: 4244
hostPort: 4244
protocol: TCP
- name: prometheus
containerPort: 9090
hostPort: 9090
containerPort: 9962
hostPort: 9962
protocol: TCP
- name: envoy-metrics
containerPort: 9095
hostPort: 9095
containerPort: 9964
hostPort: 9964
protocol: TCP
securityContext:
privileged: true
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- name: bpf-maps
mountPath: /sys/fs/bpf
@@ -591,10 +683,9 @@ spec:
readOnly: true
- name: xtables-lock
mountPath: /run/xtables.lock
hostNetwork: true
initContainers:
- name: clean-cilium-state
image: "quay.io/cilium/cilium:v1.11.6@sha256:f7f93c26739b6641a3fa3d76b1e1605b15989f25d06625260099e01c8243f54c"
image: "quay.io/cilium/cilium:v1.12.3@sha256:30de50c4dc0a1e1077e9e7917a54d5cab253058b3f779822aec00f5c817ca826"
imagePullPolicy: IfNotPresent
command:
- /init-container.sh
@@ -615,6 +706,7 @@ spec:
value: "api.cluster.local"
- name: KUBERNETES_SERVICE_PORT
value: "6443"
terminationMessagePolicy: FallbackToLogsOnError
securityContext:
privileged: true
volumeMounts:
@@ -629,12 +721,22 @@ spec:
resources:
requests:
cpu: 100m
memory: 100Mi
memory: 100Mi # wait-for-kube-proxy
restartPolicy: Always
priorityClassName: system-node-critical
serviceAccount: "cilium"
serviceAccountName: "cilium"
terminationGracePeriodSeconds: 1
hostNetwork: true
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
k8s-app: cilium
topologyKey: kubernetes.io/hostname
nodeSelector:
kubernetes.io/os: linux
tolerations:
- operator: Exists
volumes:
@@ -713,21 +815,9 @@ spec:
io.cilium/app: operator
name: cilium-operator
spec:
# In HA mode, cilium-operator pods must not be scheduled on the same
# node as they will clash with each other.
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: io.cilium/app
operator: In
values:
- operator
topologyKey: kubernetes.io/hostname
containers:
- name: cilium-operator
image: quay.io/cilium/operator-generic:v1.11.6@sha256:9f6063c7bcaede801a39315ec7c166309f6a6783e98665f6693939cf1701bc17
image: "quay.io/cilium/operator-generic:v1.12.3@sha256:816ec1da586139b595eeb31932c61a7c13b07fb4a0255341c0e0f18608e84eff"
imagePullPolicy: IfNotPresent
command:
- cilium-operator-generic
@@ -768,13 +858,27 @@ spec:
- name: cilium-config-path
mountPath: /tmp/cilium/config-map
readOnly: true
terminationMessagePolicy: FallbackToLogsOnError
hostNetwork: true
restartPolicy: Always
priorityClassName: system-cluster-critical
serviceAccount: "cilium-operator"
serviceAccountName: "cilium-operator"
# In HA mode, cilium-operator pods must not be scheduled on the same
# node as they will clash with each other.
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
io.cilium/app: operator
topologyKey: kubernetes.io/hostname
nodeSelector:
kubernetes.io/os: linux
node-role.kubernetes.io/control-plane: ""
tolerations:
- operator: Exists
- effect: NoSchedule
operator: Exists
volumes:
# To read the configuration from the config map
- name: cilium-config-path

View File

@@ -8,6 +8,11 @@ operator:
replicas: 1
prometheus:
enabled: false
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- operator: Exists
effect: NoSchedule
identityAllocationMode: crd
kubeProxyReplacement: strict
@@ -45,6 +50,11 @@ externalIPs:
enabled: true
hostFirewall:
enabled: true
ingressController:
enabled: false
securityContext:
privileged: true
hubble:
enabled: false

View File

@@ -105,9 +105,6 @@ spec:
serviceAccountName: coredns
enableServiceLinks: false
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Exists
@@ -117,7 +114,7 @@ spec:
hostNetwork: true
containers:
- name: coredns
image: coredns/coredns:1.9.2
image: coredns/coredns:1.9.4
imagePullPolicy: IfNotPresent
resources:
limits:

View File

@@ -138,18 +138,11 @@ spec:
prometheus.io/port: '8085'
spec:
serviceAccountName: cluster-autoscaler
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
containers:
- name: cluster-autoscaler
# image: k8s.gcr.io/autoscaling/cluster-autoscaler:v1.23.1

View File

@@ -4,10 +4,10 @@ apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.4
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.1"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
@@ -20,10 +20,10 @@ apiVersion: v1
kind: ConfigMap
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.4
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.1"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
@@ -66,10 +66,10 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.4
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.1"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
@@ -86,6 +86,13 @@ rules:
verbs:
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- list
- watch
- apiGroups:
- ""
resources:
@@ -129,16 +136,24 @@ rules:
- get
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
# Source: ingress-nginx/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.4
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.1"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
@@ -156,10 +171,10 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.4
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.1"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
@@ -213,12 +228,17 @@ rules:
- get
- list
- watch
# TODO(Jintao Zhang)
# Once we release a new version of the controller,
# we will be able to remove the configmap related permissions
# We have used the Lease API for selection
# ref: https://github.com/kubernetes/ingress-nginx/pull/8921
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
- ingress-controller-leader
- ingress-nginx-leader
verbs:
- get
- update
@@ -228,6 +248,21 @@ rules:
- configmaps
verbs:
- create
- apiGroups:
- coordination.k8s.io
resources:
- leases
resourceNames:
- ingress-nginx-leader
verbs:
- get
- update
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- apiGroups:
- ""
resources:
@@ -235,16 +270,24 @@ rules:
verbs:
- create
- patch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
# Source: ingress-nginx/templates/controller-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.4
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.1"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
@@ -265,10 +308,10 @@ kind: Service
metadata:
annotations:
labels:
helm.sh/chart: ingress-nginx-4.1.4
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.1"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
@@ -302,10 +345,10 @@ apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.4
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.1"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
@@ -336,7 +379,7 @@ spec:
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: controller
image: "registry.k8s.io/ingress-nginx/controller:v1.2.1@sha256:5516d103a9c2ecc4f026efbd4b40662ce22dc1f824fb129ed121460aaa5c47f8"
image: "registry.k8s.io/ingress-nginx/controller:v1.5.1@sha256:4ba73c697770664c1e00e9f968de14e08f606ff961c76e5d7033a4a9c593c629"
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
@@ -345,7 +388,7 @@ spec:
- /wait-shutdown
args:
- /nginx-ingress-controller
- --election-id=ingress-controller-leader
- --election-id=ingress-nginx-leader
- --controller-class=k8s.io/ingress-nginx
- --ingress-class=nginx
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
@@ -428,10 +471,10 @@ apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.4
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.1"
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller

View File

@@ -59,15 +59,15 @@ spec:
labels:
app: local-path-provisioner
spec:
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: "node-role.kubernetes.io/master"
effect: NoSchedule
- key: "node-role.kubernetes.io/control-plane"
effect: NoSchedule
serviceAccountName: local-path-provisioner-service-account
containers:
- name: local-path-provisioner
image: rancher/local-path-provisioner:v0.0.19
image: rancher/local-path-provisioner:v0.0.23
imagePullPolicy: IfNotPresent
command:
- local-path-provisioner
@@ -117,40 +117,12 @@ data:
}
setup: |-
#!/bin/sh
while getopts "m:s:p:" opt
do
case $opt in
p)
absolutePath=$OPTARG
;;
s)
sizeInBytes=$OPTARG
;;
m)
volMode=$OPTARG
;;
esac
done
mkdir -m 0777 -p ${absolutePath}
set -eu
mkdir -m 0777 -p "$VOL_DIR"
teardown: |-
#!/bin/sh
while getopts "m:s:p:" opt
do
case $opt in
p)
absolutePath=$OPTARG
;;
s)
sizeInBytes=$OPTARG
;;
m)
volMode=$OPTARG
;;
esac
done
rm -rf ${absolutePath}
set -eu
rm -rf "$VOL_DIR"
helperPod.yaml: |-
apiVersion: v1
kind: Pod

View File

@@ -131,10 +131,6 @@ spec:
kubernetes.io/os: linux
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
- key: "node-role.kubernetes.io/master"
effect: NoSchedule
- key: "node-role.kubernetes.io/control-plane"
effect: NoSchedule
containers:
@@ -144,7 +140,7 @@ spec:
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
image: k8s.gcr.io/metrics-server/metrics-server:v0.5.0
image: k8s.gcr.io/metrics-server/metrics-server:v0.6.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3

View File

@@ -28,7 +28,6 @@ resource "hcloud_server" "controlplane" {
hcloud_image = data.hcloud_image.talos.id
robot_user = var.robot_user
robot_password = var.robot_password
labels = "topology.kubernetes.io/region=${element(var.regions, count.index)}"
})
)

View File

@@ -10,11 +10,17 @@ machine:
- "${ipv4_local}"
- "${ipv4_vip}"
- "${apiDomain}"
features:
kubernetesTalosAPIAccess:
enabled: true
allowedRoles:
- os:reader
allowedKubernetesNamespaces:
- kube-system
kubelet:
extraArgs:
node-ip: "${ipv4_local}"
rotate-server-certificates: true
node-labels: "${labels}"
clusterDNS:
- 169.254.2.53
- ${cidrhost(split(",",serviceSubnets)[0], 10)}
@@ -143,6 +149,7 @@ cluster:
externalCloudProvider:
enabled: true
manifests:
- https://raw.githubusercontent.com/siderolabs/talos-cloud-controller-manager/main/docs/deploy/cloud-controller-manager.yml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/hetzner/deployments/hcloud-cloud-controller-manager.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/hetzner/deployments/hcloud-csi.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/hetzner/deployments/kubelet-serving-cert-approver.yaml

View File

@@ -42,7 +42,7 @@ build {
sources = ["source.hcloud.talos"]
provisioner "file" {
source = "../../../talos/_out/hcloud-amd64.raw.xz"
source = "hcloud-amd64.raw.xz"
destination = "/tmp/talos.raw.xz"
}
provisioner "shell" {

View File

@@ -7,7 +7,7 @@ variable "hcloud_token" {
variable "talos_version" {
type = string
default = "v1.2.2"
default = "v1.2.6"
}
locals {