Update version

This commit is contained in:
Serge Logvinov
2022-06-23 11:45:12 +03:00
parent 77e992894c
commit 2cbc969728
17 changed files with 807 additions and 78 deletions

View File

@@ -7,6 +7,7 @@ help:
create-lb: ## Create load balancer
terraform init
terraform apply -auto-approve -target=scaleway_vpc_public_gateway_ip.main -target=output.controlplane_endpoint
terraform refresh
create-config: ## Genereate talos configs
talosctl gen config --output-dir _cfgs --with-docs=false --with-examples=false talos-k8s-scaleway https://${ENDPOINT}:6443
@@ -39,4 +40,7 @@ create-kubeconfig: ## Prepare kubeconfig
talosctl --talosconfig _cfgs/talosconfig --nodes 172.16.0.11 kubeconfig .
create-deployments:
helm template --namespace=kube-system --version=1.11.1 -f deployments/cilium.yaml cilium cilium/cilium > deployments/cilium_result.yaml
helm template --namespace=kube-system --version=1.11.6 -f deployments/cilium.yaml cilium \
cilium/cilium > deployments/cilium-result.yaml
helm template --namespace=ingress-nginx --version=4.1.4 -f deployments/ingress.yaml ingress-nginx \
ingress-nginx/ingress-nginx > deployments/ingress-result.yaml

View File

@@ -23,7 +23,7 @@ Use [packer](../system_os/scaleway) to upload the Talos image.
Generate the default talos config
```shell
make create-config create-templates
make create-lb create-config create-templates
```
open config file **terraform.tfvars** and add params

View File

@@ -33,6 +33,7 @@ data:
# setting it to "kvstore".
identity-allocation-mode: crd
cilium-endpoint-gc-interval: "5m0s"
nodes-gc-interval: "5m0s"
# Disable the usage of CiliumEndpoint CRD
disable-endpoint-crd: "false"
@@ -127,12 +128,11 @@ data:
# - geneve
tunnel: vxlan
# Enables L7 proxy for L7 policy enforcement and visibility
enable-l7-proxy: "false"
enable-l7-proxy: "true"
enable-ipv4-masquerade: "true"
enable-ipv6-masquerade: "true"
enable-bpf-masquerade: "false"
enable-wireguard: "true"
enable-xt-socket-fallback: "true"
install-iptables-rules: "true"
@@ -144,15 +144,17 @@ data:
enable-host-firewall: "true"
# List of devices used to attach bpf_host.o (implements BPF NodePort,
# host-firewall and BPF masquerading)
devices: "eth0 eth1"
devices: "eth+"
kube-proxy-replacement: "strict"
kube-proxy-replacement-healthz-bind-address: ""
enable-host-reachable-services: "true"
enable-health-check-nodeport: "true"
node-port-bind-protection: "true"
enable-auto-protect-node-port-range: "true"
enable-session-affinity: "true"
enable-l2-neigh-discovery: "true"
arping-refresh-period: "30s"
k8s-require-ipv4-pod-cidr: "true"
k8s-require-ipv6-pod-cidr: "true"
enable-endpoint-health-checking: "true"
@@ -165,6 +167,11 @@ data:
enable-k8s-endpoint-slice: "true"
cgroup-root: "/sys/fs/cgroup"
enable-k8s-terminating-endpoint: "true"
annotate-k8s-node: "true"
remove-cilium-node-taints: "true"
set-cilium-is-up-condition: "true"
unmanaged-pod-watcher-interval: "15"
agent-not-ready-taint-key: "node.cilium.io/agent-not-ready"
---
# Source: cilium/templates/cilium-agent/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
@@ -193,38 +200,19 @@ rules:
resources:
- namespaces
- services
- nodes
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- pods
- pods/finalizers
verbs:
- get
- list
- watch
- update
- delete
- apiGroups:
- ""
resources:
- endpoints
- nodes
verbs:
- get
- list
- watch
- update
- apiGroups:
- ""
resources:
- nodes
- nodes/status
verbs:
# To annotate the k8s node with Cilium's metadata
- patch
- apiGroups:
- apiextensions.k8s.io
@@ -246,21 +234,15 @@ rules:
resources:
- ciliumnetworkpolicies
- ciliumnetworkpolicies/status
- ciliumnetworkpolicies/finalizers
- ciliumclusterwidenetworkpolicies
- ciliumclusterwidenetworkpolicies/status
- ciliumclusterwidenetworkpolicies/finalizers
- ciliumendpoints
- ciliumendpoints/status
- ciliumendpoints/finalizers
- ciliumnodes
- ciliumnodes/status
- ciliumnodes/finalizers
- ciliumidentities
- ciliumidentities/finalizers
- ciliumlocalredirectpolicies
- ciliumlocalredirectpolicies/status
- ciliumlocalredirectpolicies/finalizers
- ciliumegressnatpolicies
- ciliumendpointslices
verbs:
@@ -275,14 +257,30 @@ rules:
- apiGroups:
- ""
resources:
# to automatically delete [core|kube]dns pods so that are starting to being
# managed by Cilium
- pods
verbs:
- get
- list
- watch
# to automatically delete [core|kube]dns pods so that are starting to being
# managed by Cilium
- delete
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
# To remove node taints
- nodes
# To set NetworkUnavailable false on startup
- nodes/status
verbs:
- patch
- apiGroups:
- discovery.k8s.io
resources:
@@ -438,11 +436,6 @@ spec:
annotations:
prometheus.io/port: "9090"
prometheus.io/scrape: "true"
# This annotation plus the CriticalAddonsOnly toleration makes
# cilium to be a critical pod in the cluster, which ensures cilium
# gets priority scheduling.
# https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/
scheduler.alpha.kubernetes.io/critical-pod: ""
labels:
k8s-app: cilium
spec:
@@ -471,7 +464,7 @@ spec:
topologyKey: kubernetes.io/hostname
containers:
- name: cilium-agent
image: "quay.io/cilium/cilium:v1.11.1@sha256:251ff274acf22fd2067b29a31e9fda94253d2961c061577203621583d7e85bd2"
image: "quay.io/cilium/cilium:v1.11.6@sha256:f7f93c26739b6641a3fa3d76b1e1605b15989f25d06625260099e01c8243f54c"
imagePullPolicy: IfNotPresent
command:
- cilium-agent
@@ -481,7 +474,7 @@ spec:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9876
port: 9879
scheme: HTTP
httpHeaders:
- name: "brief"
@@ -493,7 +486,7 @@ spec:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9876
port: 9879
scheme: HTTP
httpHeaders:
- name: "brief"
@@ -506,7 +499,7 @@ spec:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9876
port: 9879
scheme: HTTP
httpHeaders:
- name: "brief"
@@ -541,7 +534,7 @@ spec:
key: custom-cni-conf
optional: true
- name: KUBERNETES_SERVICE_HOST
value: "172.16.0.5"
value: "api.cluster.local"
- name: KUBERNETES_SERVICE_PORT
value: "6443"
lifecycle:
@@ -556,6 +549,9 @@ spec:
command:
- /cni-uninstall.sh
resources:
limits:
cpu: 2
memory: 2Gi
requests:
cpu: 100m
memory: 128Mi
@@ -598,7 +594,7 @@ spec:
hostNetwork: true
initContainers:
- name: clean-cilium-state
image: "quay.io/cilium/cilium:v1.11.1@sha256:251ff274acf22fd2067b29a31e9fda94253d2961c061577203621583d7e85bd2"
image: "quay.io/cilium/cilium:v1.11.6@sha256:f7f93c26739b6641a3fa3d76b1e1605b15989f25d06625260099e01c8243f54c"
imagePullPolicy: IfNotPresent
command:
- /init-container.sh
@@ -616,7 +612,7 @@ spec:
key: clean-cilium-bpf-state
optional: true
- name: KUBERNETES_SERVICE_HOST
value: "172.16.0.5"
value: "api.cluster.local"
- name: KUBERNETES_SERVICE_PORT
value: "6443"
securityContext:
@@ -731,7 +727,7 @@ spec:
topologyKey: kubernetes.io/hostname
containers:
- name: cilium-operator
image: quay.io/cilium/operator-generic:v1.11.1@sha256:977240a4783c7be821e215ead515da3093a10f4a7baea9f803511a2c2b44a235
image: quay.io/cilium/operator-generic:v1.11.6@sha256:9f6063c7bcaede801a39315ec7c166309f6a6783e98665f6693939cf1701bc17
imagePullPolicy: IfNotPresent
command:
- cilium-operator-generic
@@ -756,7 +752,7 @@ spec:
name: cilium-config
optional: true
- name: KUBERNETES_SERVICE_HOST
value: "172.16.0.5"
value: "api.cluster.local"
- name: KUBERNETES_SERVICE_PORT
value: "6443"
livenessProbe:

View File

@@ -1,11 +1,8 @@
---
k8sServiceHost: "172.16.0.5"
k8sServiceHost: "api.cluster.local"
k8sServicePort: "6443"
agent:
enabled: true
operator:
enabled: true
replicas: 1
@@ -17,16 +14,16 @@ kubeProxyReplacement: strict
enableK8sEndpointSlice: true
localRedirectPolicy: true
healthChecking: true
tunnel: "vxlan"
autoDirectNodeRoutes: false
devices: [eth0,eth1]
devices: [eth+]
l7Proxy: false
encryption:
enabled: true
type: wireguard
healthChecking: true
# l7Proxy: false
# encryption:
# enabled: true
# type: wireguard
cni:
install: true
@@ -44,11 +41,11 @@ ipv4:
ipv6:
enabled: true
hostServices:
enabled: false
enabled: true
hostPort:
enabled: true
nodePort:
enabled: false
enabled: true
externalIPs:
enabled: true
hostFirewall:
@@ -66,9 +63,9 @@ cgroup:
hostRoot: /sys/fs/cgroup
resources:
# limits:
# cpu: 4000m
# memory: 4Gi
limits:
cpu: 2
memory: 2Gi
requests:
cpu: 100m
memory: 128Mi

View File

@@ -0,0 +1,156 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns-local
namespace: kube-system
data:
empty.db: |
@ 60 IN SOA localnet. root.localnet. (
1 ; serial
60 ; refresh
60 ; retry
60 ; expiry
60 ) ; minimum
;
@ IN NS localnet.
hosts: |
# static hosts
169.254.2.53 dns.local
Corefile.local: |
(empty) {
file /etc/coredns/empty.db
}
.:53 {
errors
bind 169.254.2.53
health 127.0.0.1:8091 {
lameduck 5s
}
hosts /etc/coredns/hosts {
reload 60s
fallthrough
}
kubernetes cluster.local in-addr.arpa ip6.arpa {
endpoint https://api.cluster.local:6443
kubeconfig /etc/coredns/kubeconfig.conf coredns
pods insecure
ttl 60
}
prometheus :9153
forward . /etc/resolv.conf {
policy sequential
expire 30s
}
cache 300
loop
reload
loadbalance
}
kubeconfig.conf: |-
apiVersion: v1
kind: Config
clusters:
- cluster:
certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
server: https://api.cluster.local:6443
name: default
contexts:
- context:
cluster: default
namespace: kube-system
user: coredns
name: coredns
current-context: coredns
users:
- name: coredns
user:
tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: coredns-local
namespace: kube-system
labels:
k8s-app: kube-dns-local
kubernetes.io/name: CoreDNS
spec:
updateStrategy:
type: RollingUpdate
minReadySeconds: 15
selector:
matchLabels:
k8s-app: kube-dns-local
kubernetes.io/name: CoreDNS
template:
metadata:
labels:
k8s-app: kube-dns-local
kubernetes.io/name: CoreDNS
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9153"
spec:
priorityClassName: system-node-critical
serviceAccount: coredns
serviceAccountName: coredns
enableServiceLinks: false
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Exists
- effect: NoSchedule
key: node.cloudprovider.kubernetes.io/uninitialized
operator: Exists
hostNetwork: true
containers:
- name: coredns
image: coredns/coredns:1.9.2
imagePullPolicy: IfNotPresent
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 50m
memory: 64Mi
args: [ "-conf", "/etc/coredns/Corefile.local" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
livenessProbe:
httpGet:
host: 127.0.0.1
path: /health
port: 8091
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns-local

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: ingress-nginx

View File

@@ -0,0 +1,440 @@
---
# Source: ingress-nginx/templates/controller-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.4
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
automountServiceAccountToken: true
---
# Source: ingress-nginx/templates/controller-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.4
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
data:
allow-snippet-annotations: "true"
client-body-timeout: "30"
client-header-timeout: "30"
enable-access-log-for-default-backend: "true"
error-log-level: "error"
hsts: "true"
hsts-include-subdomains: "true"
hsts-max-age: "31536000"
hsts-preload: "true"
http-redirect-code: "301"
limit-req-status-code: "429"
log-format-escape-json: "true"
log-format-upstream: "{\"ip\":\"$remote_addr\", \"ssl\":\"$ssl_protocol\", \"method\":\"$request_method\", \"proto\":\"$scheme\", \"host\":\"$host\", \"uri\":\"$request_uri\", \"status\":$status, \"size\":$bytes_sent, \"agent\":\"$http_user_agent\", \"referer\":\"$http_referer\", \"namespace\":\"$namespace\"}"
proxy-connect-timeout: "10"
proxy-headers-hash-bucket-size: "128"
proxy-hide-headers: "strict-transport-security"
proxy-read-timeout: "60"
proxy-real-ip-cidr: "173.245.48.0/20,103.21.244.0/22,103.22.200.0/22,103.31.4.0/22,141.101.64.0/18,108.162.192.0/18,190.93.240.0/20,188.114.96.0/20,197.234.240.0/22,198.41.128.0/17,162.158.0.0/15,172.64.0.0/13,131.0.72.0/22,104.16.0.0/13,104.24.0.0/14,172.16.0.0/12"
proxy-send-timeout: "60"
server-name-hash-bucket-size: "64"
server-name-hash-max-size: "512"
server-tokens: "false"
ssl-protocols: "TLSv1.3"
upstream-keepalive-connections: "32"
use-forwarded-headers: "true"
use-geoip: "false"
use-geoip2: "false"
use-gzip: "true"
worker-cpu-affinity: "auto"
worker-processes: "auto"
---
# Source: ingress-nginx/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.4
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
- namespaces
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
---
# Source: ingress-nginx/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.4
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: "ingress-nginx"
---
# Source: ingress-nginx/templates/controller-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.4
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
- ingress-controller-leader
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
---
# Source: ingress-nginx/templates/controller-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.4
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: "ingress-nginx"
---
# Source: ingress-nginx/templates/controller-service.yaml
apiVersion: v1
kind: Service
metadata:
annotations:
labels:
helm.sh/chart: ingress-nginx-4.1.4
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
type: ClusterIP
clusterIP: None
ipFamilyPolicy: RequireDualStack
ipFamilies:
- IPv4
- IPv6
ports:
- name: http
port: 80
protocol: TCP
targetPort: http
appProtocol: http
- name: https
port: 443
protocol: TCP
targetPort: https
appProtocol: https
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.4
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
selector:
matchLabels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
revisionHistoryLimit: 2
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
minReadySeconds: 15
template:
metadata:
annotations:
prometheus.io/port: "10254"
prometheus.io/scrape: "true"
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
spec:
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: controller
image: "registry.k8s.io/ingress-nginx/controller:v1.2.1@sha256:5516d103a9c2ecc4f026efbd4b40662ce22dc1f824fb129ed121460aaa5c47f8"
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
args:
- /nginx-ingress-controller
- --election-id=ingress-controller-leader
- --controller-class=k8s.io/ingress-nginx
- --ingress-class=nginx
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
runAsUser: 101
allowPrivilegeEscalation: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LD_PRELOAD
value: /usr/local/lib/libmimalloc.so
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 15
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 1
ports:
- name: http
containerPort: 80
protocol: TCP
- name: https
containerPort: 443
protocol: TCP
resources:
limits:
cpu: 1
memory: 1Gi
requests:
cpu: 100m
memory: 128Mi
hostNetwork: true
nodeSelector:
kubernetes.io/os: linux
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: project.io/node-pool
operator: In
values:
- web
serviceAccountName: ingress-nginx
terminationGracePeriodSeconds: 300
---
# Source: ingress-nginx/templates/controller-ingressclass.yaml
# We don't support namespaced ingressClass yet
# So a ClusterRole and a ClusterRoleBinding is required
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.4
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: nginx
spec:
controller: k8s.io/ingress-nginx

View File

@@ -0,0 +1,116 @@
controller:
kind: DaemonSet
hostNetwork: true
hostPort:
enabled: false
ports:
http: 80
https: 443
dnsPolicy: ClusterFirstWithHostNet
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
publishService:
enabled: false
config:
worker-processes: "auto"
worker-cpu-affinity: "auto"
error-log-level: "error"
server-tokens: "false"
http-redirect-code: "301"
use-gzip: "true"
use-geoip: "false"
use-geoip2: "false"
use-forwarded-headers: "true"
# curl https://www.cloudflare.com/ips-v4 2>/dev/null | tr '\n' ','
proxy-real-ip-cidr: "173.245.48.0/20,103.21.244.0/22,103.22.200.0/22,103.31.4.0/22,141.101.64.0/18,108.162.192.0/18,190.93.240.0/20,188.114.96.0/20,197.234.240.0/22,198.41.128.0/17,162.158.0.0/15,172.64.0.0/13,131.0.72.0/22,104.16.0.0/13,104.24.0.0/14,172.16.0.0/12"
enable-access-log-for-default-backend: "true"
log-format-escape-json: "true"
log-format-upstream: '{"ip":"$remote_addr", "ssl":"$ssl_protocol", "method":"$request_method", "proto":"$scheme", "host":"$host", "uri":"$request_uri", "status":$status, "size":$bytes_sent, "agent":"$http_user_agent", "referer":"$http_referer", "namespace":"$namespace"}'
upstream-keepalive-connections: "32"
proxy-connect-timeout: "10"
proxy-read-timeout: "60"
proxy-send-timeout: "60"
ssl-protocols: "TLSv1.3"
hsts: "true"
hsts-max-age: "31536000"
hsts-include-subdomains: "true"
hsts-preload: "true"
proxy-hide-headers: "strict-transport-security"
proxy-headers-hash-bucket-size: "128"
server-name-hash-bucket-size: "64"
server-name-hash-max-size: "512"
limit-req-status-code: "429"
client-header-timeout: "30"
client-body-timeout: "30"
minReadySeconds: 15
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "10254"
extraEnvs:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
livenessProbe:
initialDelaySeconds: 15
periodSeconds: 30
readinessProbe:
periodSeconds: 30
resources:
limits:
cpu: 1
memory: 1Gi
requests:
cpu: 100m
memory: 128Mi
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: project.io/node-pool
operator: In
values:
- web
service:
enabled: true
type: ClusterIP
clusterIP: None
ipFamilyPolicy: "RequireDualStack"
ipFamilies:
- IPv4
- IPv6
admissionWebhooks:
enabled: false
metrics:
enabled: false
revisionHistoryLimit: 2
defaultBackend:
enabled: false

View File

@@ -135,6 +135,8 @@ spec:
effect: "NoSchedule"
- key: "node-role.kubernetes.io/master"
effect: NoSchedule
- key: "node-role.kubernetes.io/control-plane"
effect: NoSchedule
containers:
- name: scaleway-cloud-controller-manager
image: scaleway/scaleway-cloud-controller-manager:v0.21.4

View File

@@ -3,6 +3,10 @@ resource "scaleway_instance_ip" "controlplane" {
count = lookup(var.controlplane, "count", 0)
}
locals {
controlplane_labels = "topology.kubernetes.io/region=fr-par,topology.kubernetes.io/zone=${var.regions[0]}"
}
resource "scaleway_instance_server" "controlplane" {
count = lookup(var.controlplane, "count", 0)
name = "master-${count.index + 1}"
@@ -27,7 +31,7 @@ resource "scaleway_instance_server" "controlplane" {
ipv4_local = cidrhost(local.main_subnet, 11 + count.index)
lbv4 = local.lbv4
ipv4 = scaleway_instance_ip.controlplane[count.index].address
labels = "topology.kubernetes.io/region=fr-par"
labels = "${local.controlplane_labels},node.kubernetes.io/instance-type=${lookup(var.controlplane, "type", "DEV1-M")}"
access = var.scaleway_access
secret = var.scaleway_secret
project_id = var.scaleway_project_id

View File

@@ -22,7 +22,7 @@ resource "scaleway_vpc_public_gateway_dhcp" "main" {
lifecycle {
ignore_changes = [
dns_server_override
dns_servers_override
]
}
}

View File

@@ -1,7 +1,7 @@
output "controlplane_endpoint" {
description = "Kubernetes controlplane endpoint"
value = local.lbv4
value = try(local.lbv4, "127.0.0.1")
}
output "controlplane_firstnode" {

View File

@@ -29,13 +29,16 @@ machine:
- interface: dummy0
addresses:
- 169.254.2.53/32
- fd00::169:254:2:53/128
nameservers:
- 1.1.1.1
- 8.8.8.8
kubespan:
enabled: false
allowDownPeerBypass: true
extraHostEntries:
- ip: ${ipv4_vip}
aliases:
- ${apiDomain}
install:
wipe: false
sysctls:
@@ -56,8 +59,11 @@ machine:
- no_read_workqueue
- no_write_workqueue
cluster:
id: ${clusterID}
secret: ${clusterSecret}
controlPlane:
endpoint: https://${ipv4_vip}:6443
clusterName: ${clusterName}
discovery:
enabled: true
registries:
@@ -70,7 +76,7 @@ cluster:
cni:
name: custom
urls:
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/scaleway/deployments/cilium_result.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/scaleway/deployments/cilium-result.yaml
proxy:
disabled: true
apiServer:
@@ -82,7 +88,8 @@ cluster:
node-cidr-mask-size-ipv4: 24
node-cidr-mask-size-ipv6: 112
scheduler: {}
etcd: {}
etcd:
subnet: ${nodeSubnets}
inlineManifests:
- name: scaleway-secret
contents: |-
@@ -103,3 +110,6 @@ cluster:
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/scaleway/deployments/kubelet-serving-cert-approver.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/scaleway/deployments/metrics-server.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/scaleway/deployments/local-path-storage.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/scaleway/deployments/coredns-local.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/scaleway/deployments/ingress-ns.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/scaleway/deployments/ingress-result.yaml

View File

@@ -31,18 +31,18 @@ machine:
- interface: dummy0
addresses:
- 169.254.2.53/32
- fd00::169:254:2:53/128
kubespan:
enabled: false
allowDownPeerBypass: true
extraHostEntries:
- ip: ${ipv4_vip}
aliases:
- ${apiDomain}
install:
wipe: true
sysctls:
net.core.somaxconn: 65535
net.core.netdev_max_backlog: 4096
net.ipv4.tcp_keepalive_time: 600
net.ipv4.tcp_keepalive_intvl: 60
fs.inotify.max_user_instances: 256
systemDiskEncryption:
state:
provider: luks2
@@ -56,7 +56,7 @@ cluster:
endpoint: https://${ipv4_vip}:6443
clusterName: ${clusterName}
discovery:
enabled: true
enabled: false
registries:
service:
disabled: true

View File

@@ -3,7 +3,7 @@ terraform {
required_providers {
scaleway = {
source = "scaleway/scaleway"
version = "~> 2.2.0"
version = "~> 2.2.2"
}
}
required_version = ">= 1.0"

View File

@@ -2,7 +2,7 @@
packer {
required_plugins {
scaleway = {
version = ">= 1.0.0"
version = "= 1.0.3"
source = "github.com/hashicorp/scaleway"
}
}

View File

@@ -23,7 +23,7 @@ variable "scaleway_zone" {
variable "talos_version" {
type = string
default = "v1.0.3"
default = "v1.1.0"
}
locals {