From e28e45ca6142471e92a625e45e648c12a48eabf7 Mon Sep 17 00:00:00 2001 From: Serge Logvinov Date: Sun, 23 Oct 2022 13:14:08 +0300 Subject: [PATCH] add deployments --- exoscale/Makefile | 7 +- exoscale/deployments/coredns-local.yaml | 153 ++++++ exoscale/deployments/ingress-ns.yaml | 4 + exoscale/deployments/ingress-result.yaml | 483 +++++++++++++++++++ exoscale/deployments/ingress.yaml | 116 +++++ exoscale/deployments/local-path-storage.yaml | 168 +++++++ exoscale/deployments/metrics-server.yaml | 195 ++++++++ exoscale/instances-controlplane.tf | 2 +- exoscale/instances-web.tf | 2 +- exoscale/instances-werker.tf | 4 +- exoscale/talos.tf | 4 +- exoscale/templates/controlplane.yaml.tpl | 8 + exoscale/templates/worker.yaml.tpl | 2 + 13 files changed, 1138 insertions(+), 10 deletions(-) create mode 100644 exoscale/deployments/coredns-local.yaml create mode 100644 exoscale/deployments/ingress-ns.yaml create mode 100644 exoscale/deployments/ingress-result.yaml create mode 100644 exoscale/deployments/ingress.yaml create mode 100644 exoscale/deployments/local-path-storage.yaml create mode 100644 exoscale/deployments/metrics-server.yaml diff --git a/exoscale/Makefile b/exoscale/Makefile index 6e27296..46edd17 100644 --- a/exoscale/Makefile +++ b/exoscale/Makefile @@ -9,10 +9,7 @@ help: create-controlplane: ## Bootstrap first controlplane node terraform refresh - terraform apply -target=scaleway_instance_server.controlplane -target=scaleway_vpc_public_gateway_dhcp_reservation.controlplane - - sleep 30 - @$(MAKE) create-controlplane-bootstrap + terraform apply -target=exoscale_instance_pool.controlplane create-infrastructure: ## Bootstrap all nodes terraform apply @@ -25,3 +22,5 @@ create-kubeconfig: ## Prepare kubeconfig create-deployments: helm template --namespace=kube-system --version=1.12.3 -f deployments/cilium.yaml cilium \ cilium/cilium > deployments/cilium-result.yaml + helm template --namespace=ingress-nginx --version=4.3.0 -f deployments/ingress.yaml ingress-nginx \ + ingress-nginx/ingress-nginx > deployments/ingress-result.yaml diff --git a/exoscale/deployments/coredns-local.yaml b/exoscale/deployments/coredns-local.yaml new file mode 100644 index 0000000..9928643 --- /dev/null +++ b/exoscale/deployments/coredns-local.yaml @@ -0,0 +1,153 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: coredns-local + namespace: kube-system +data: + empty.db: | + @ 60 IN SOA localnet. root.localnet. ( + 1 ; serial + 60 ; refresh + 60 ; retry + 60 ; expiry + 60 ) ; minimum + ; + @ IN NS localnet. + + hosts: | + # static hosts + 169.254.2.53 dns.local + + Corefile.local: | + (empty) { + file /etc/coredns/empty.db + } + + .:53 { + errors + bind 169.254.2.53 + + health 127.0.0.1:8091 { + lameduck 5s + } + + hosts /etc/coredns/hosts { + reload 60s + fallthrough + } + + kubernetes cluster.local in-addr.arpa ip6.arpa { + endpoint https://api.cluster.local:6443 + kubeconfig /etc/coredns/kubeconfig.conf coredns + pods insecure + ttl 60 + } + prometheus :9153 + + forward . /etc/resolv.conf { + policy sequential + expire 30s + } + + cache 300 + loop + reload + loadbalance + } + kubeconfig.conf: |- + apiVersion: v1 + kind: Config + clusters: + - cluster: + certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + server: https://api.cluster.local:6443 + name: default + contexts: + - context: + cluster: default + namespace: kube-system + user: coredns + name: coredns + current-context: coredns + users: + - name: coredns + user: + tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: coredns-local + namespace: kube-system + labels: + k8s-app: kube-dns-local + kubernetes.io/name: CoreDNS +spec: + updateStrategy: + type: RollingUpdate + minReadySeconds: 15 + selector: + matchLabels: + k8s-app: kube-dns-local + kubernetes.io/name: CoreDNS + template: + metadata: + labels: + k8s-app: kube-dns-local + kubernetes.io/name: CoreDNS + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9153" + spec: + priorityClassName: system-node-critical + serviceAccount: coredns + serviceAccountName: coredns + enableServiceLinks: false + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + hostNetwork: true + containers: + - name: coredns + image: coredns/coredns:1.9.2 + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 50m + memory: 64Mi + args: [ "-conf", "/etc/coredns/Corefile.local" ] + volumeMounts: + - name: config-volume + mountPath: /etc/coredns + readOnly: true + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /health + port: 8091 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - all + readOnlyRootFilesystem: true + dnsPolicy: Default + volumes: + - name: config-volume + configMap: + name: coredns-local diff --git a/exoscale/deployments/ingress-ns.yaml b/exoscale/deployments/ingress-ns.yaml new file mode 100644 index 0000000..6878f0b --- /dev/null +++ b/exoscale/deployments/ingress-ns.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: ingress-nginx diff --git a/exoscale/deployments/ingress-result.yaml b/exoscale/deployments/ingress-result.yaml new file mode 100644 index 0000000..c53c0c2 --- /dev/null +++ b/exoscale/deployments/ingress-result.yaml @@ -0,0 +1,483 @@ +--- +# Source: ingress-nginx/templates/controller-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + helm.sh/chart: ingress-nginx-4.3.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: "1.4.0" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx + namespace: ingress-nginx +automountServiceAccountToken: true +--- +# Source: ingress-nginx/templates/controller-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + helm.sh/chart: ingress-nginx-4.3.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: "1.4.0" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx-controller + namespace: ingress-nginx +data: + allow-snippet-annotations: "true" + client-body-timeout: "30" + client-header-timeout: "30" + enable-access-log-for-default-backend: "true" + error-log-level: "error" + hsts: "true" + hsts-include-subdomains: "true" + hsts-max-age: "31536000" + hsts-preload: "true" + http-redirect-code: "301" + limit-req-status-code: "429" + log-format-escape-json: "true" + log-format-upstream: "{\"ip\":\"$remote_addr\", \"ssl\":\"$ssl_protocol\", \"method\":\"$request_method\", \"proto\":\"$scheme\", \"host\":\"$host\", \"uri\":\"$request_uri\", \"status\":$status, \"size\":$bytes_sent, \"agent\":\"$http_user_agent\", \"referer\":\"$http_referer\", \"namespace\":\"$namespace\"}" + proxy-connect-timeout: "10" + proxy-headers-hash-bucket-size: "128" + proxy-hide-headers: "strict-transport-security" + proxy-read-timeout: "60" + proxy-real-ip-cidr: "173.245.48.0/20,103.21.244.0/22,103.22.200.0/22,103.31.4.0/22,141.101.64.0/18,108.162.192.0/18,190.93.240.0/20,188.114.96.0/20,197.234.240.0/22,198.41.128.0/17,162.158.0.0/15,172.64.0.0/13,131.0.72.0/22,104.16.0.0/13,104.24.0.0/14,172.16.0.0/12" + proxy-send-timeout: "60" + server-name-hash-bucket-size: "64" + server-name-hash-max-size: "512" + server-tokens: "false" + ssl-protocols: "TLSv1.3" + upstream-keepalive-connections: "32" + use-forwarded-headers: "true" + use-geoip: "false" + use-geoip2: "false" + use-gzip: "true" + worker-cpu-affinity: "auto" + worker-processes: "auto" +--- +# Source: ingress-nginx/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + helm.sh/chart: ingress-nginx-4.3.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: "1.4.0" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + name: ingress-nginx +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + - namespaces + verbs: + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch + - get +--- +# Source: ingress-nginx/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-4.3.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: "1.4.0" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + name: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ingress-nginx +subjects: + - kind: ServiceAccount + name: ingress-nginx + namespace: "ingress-nginx" +--- +# Source: ingress-nginx/templates/controller-role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + helm.sh/chart: ingress-nginx-4.3.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: "1.4.0" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx + namespace: ingress-nginx +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + # TODO(Jintao Zhang) + # Once we release a new version of the controller, + # we will be able to remove the configmap related permissions + # We have used the Lease API for selection + # ref: https://github.com/kubernetes/ingress-nginx/pull/8921 + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - ingress-controller-leader + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - coordination.k8s.io + resources: + - leases + resourceNames: + - ingress-controller-leader + verbs: + - get + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch + - get +--- +# Source: ingress-nginx/templates/controller-rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-4.3.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: "1.4.0" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx + namespace: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ingress-nginx +subjects: + - kind: ServiceAccount + name: ingress-nginx + namespace: "ingress-nginx" +--- +# Source: ingress-nginx/templates/controller-service.yaml +apiVersion: v1 +kind: Service +metadata: + annotations: + labels: + helm.sh/chart: ingress-nginx-4.3.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: "1.4.0" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx-controller + namespace: ingress-nginx +spec: + type: ClusterIP + clusterIP: None + ipFamilyPolicy: RequireDualStack + ipFamilies: + - IPv4 + - IPv6 + ports: + - name: http + port: 80 + protocol: TCP + targetPort: http + appProtocol: http + - name: https + port: 443 + protocol: TCP + targetPort: https + appProtocol: https + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller +--- +# Source: ingress-nginx/templates/controller-daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + helm.sh/chart: ingress-nginx-4.3.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: "1.4.0" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx-controller + namespace: ingress-nginx +spec: + selector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller + revisionHistoryLimit: 2 + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + minReadySeconds: 15 + template: + metadata: + annotations: + prometheus.io/port: "10254" + prometheus.io/scrape: "true" + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller + spec: + dnsPolicy: ClusterFirstWithHostNet + containers: + - name: controller + image: "registry.k8s.io/ingress-nginx/controller:v1.4.0@sha256:34ee929b111ffc7aa426ffd409af44da48e5a0eea1eb2207994d9e0c0882d143" + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + args: + - /nginx-ingress-controller + - --election-id=ingress-controller-leader + - --controller-class=k8s.io/ingress-nginx + - --ingress-class=nginx + - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + runAsUser: 101 + allowPrivilegeEscalation: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 1 + ports: + - name: http + containerPort: 80 + protocol: TCP + - name: https + containerPort: 443 + protocol: TCP + resources: + limits: + cpu: 1 + memory: 1Gi + requests: + cpu: 100m + memory: 128Mi + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: project.io/node-pool + operator: In + values: + - web + serviceAccountName: ingress-nginx + terminationGracePeriodSeconds: 300 +--- +# Source: ingress-nginx/templates/controller-ingressclass.yaml +# We don't support namespaced ingressClass yet +# So a ClusterRole and a ClusterRoleBinding is required +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + labels: + helm.sh/chart: ingress-nginx-4.3.0 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: "1.4.0" + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: nginx +spec: + controller: k8s.io/ingress-nginx diff --git a/exoscale/deployments/ingress.yaml b/exoscale/deployments/ingress.yaml new file mode 100644 index 0000000..0528956 --- /dev/null +++ b/exoscale/deployments/ingress.yaml @@ -0,0 +1,116 @@ + +controller: + kind: DaemonSet + + hostNetwork: true + hostPort: + enabled: false + ports: + http: 80 + https: 443 + + dnsPolicy: ClusterFirstWithHostNet + + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + + publishService: + enabled: false + + config: + worker-processes: "auto" + worker-cpu-affinity: "auto" + error-log-level: "error" + + server-tokens: "false" + http-redirect-code: "301" + + use-gzip: "true" + use-geoip: "false" + use-geoip2: "false" + + use-forwarded-headers: "true" + # curl https://www.cloudflare.com/ips-v4 2>/dev/null | tr '\n' ',' + proxy-real-ip-cidr: "173.245.48.0/20,103.21.244.0/22,103.22.200.0/22,103.31.4.0/22,141.101.64.0/18,108.162.192.0/18,190.93.240.0/20,188.114.96.0/20,197.234.240.0/22,198.41.128.0/17,162.158.0.0/15,172.64.0.0/13,131.0.72.0/22,104.16.0.0/13,104.24.0.0/14,172.16.0.0/12" + + enable-access-log-for-default-backend: "true" + log-format-escape-json: "true" + log-format-upstream: '{"ip":"$remote_addr", "ssl":"$ssl_protocol", "method":"$request_method", "proto":"$scheme", "host":"$host", "uri":"$request_uri", "status":$status, "size":$bytes_sent, "agent":"$http_user_agent", "referer":"$http_referer", "namespace":"$namespace"}' + + upstream-keepalive-connections: "32" + proxy-connect-timeout: "10" + proxy-read-timeout: "60" + proxy-send-timeout: "60" + + ssl-protocols: "TLSv1.3" + hsts: "true" + hsts-max-age: "31536000" + hsts-include-subdomains: "true" + hsts-preload: "true" + proxy-hide-headers: "strict-transport-security" + proxy-headers-hash-bucket-size: "128" + + server-name-hash-bucket-size: "64" + server-name-hash-max-size: "512" + + limit-req-status-code: "429" + + client-header-timeout: "30" + client-body-timeout: "30" + + minReadySeconds: 15 + + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "10254" + + extraEnvs: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + + livenessProbe: + initialDelaySeconds: 15 + periodSeconds: 30 + readinessProbe: + periodSeconds: 30 + + resources: + limits: + cpu: 1 + memory: 1Gi + requests: + cpu: 100m + memory: 128Mi + + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: project.io/node-pool + operator: In + values: + - web + + service: + enabled: true + type: ClusterIP + clusterIP: None + ipFamilyPolicy: "RequireDualStack" + ipFamilies: + - IPv4 + - IPv6 + + admissionWebhooks: + enabled: false + metrics: + enabled: false + +revisionHistoryLimit: 2 + +defaultBackend: + enabled: false diff --git a/exoscale/deployments/local-path-storage.yaml b/exoscale/deployments/local-path-storage.yaml new file mode 100644 index 0000000..3012084 --- /dev/null +++ b/exoscale/deployments/local-path-storage.yaml @@ -0,0 +1,168 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: local-path-storage + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: local-path-provisioner-service-account + namespace: local-path-storage + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: local-path-provisioner-role +rules: + - apiGroups: [ "" ] + resources: [ "nodes", "persistentvolumeclaims", "configmaps" ] + verbs: [ "get", "list", "watch" ] + - apiGroups: [ "" ] + resources: [ "endpoints", "persistentvolumes", "pods" ] + verbs: [ "*" ] + - apiGroups: [ "" ] + resources: [ "events" ] + verbs: [ "create", "patch" ] + - apiGroups: [ "storage.k8s.io" ] + resources: [ "storageclasses" ] + verbs: [ "get", "list", "watch" ] + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: local-path-provisioner-bind +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: local-path-provisioner-role +subjects: + - kind: ServiceAccount + name: local-path-provisioner-service-account + namespace: local-path-storage + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: local-path-provisioner + namespace: local-path-storage +spec: + replicas: 1 + selector: + matchLabels: + app: local-path-provisioner + template: + metadata: + labels: + app: local-path-provisioner + spec: + nodeSelector: + node-role.kubernetes.io/control-plane: "" + tolerations: + - key: "node-role.kubernetes.io/control-plane" + effect: NoSchedule + serviceAccountName: local-path-provisioner-service-account + containers: + - name: local-path-provisioner + image: rancher/local-path-provisioner:v0.0.19 + imagePullPolicy: IfNotPresent + command: + - local-path-provisioner + - --debug + - start + - --config + - /etc/config/config.json + volumeMounts: + - name: config-volume + mountPath: /etc/config/ + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumes: + - name: config-volume + configMap: + name: local-path-config + +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: local-path + annotations: + storageclass.kubernetes.io/is-default-class: "true" +provisioner: rancher.io/local-path +volumeBindingMode: WaitForFirstConsumer +reclaimPolicy: Delete + +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: local-path-config + namespace: local-path-storage +data: + config.json: |- + { + "nodePathMap":[ + { + "node":"DEFAULT_PATH_FOR_NON_LISTED_NODES", + "paths":["/var/local-path-provisioner"] + } + ] + } + setup: |- + #!/bin/sh + while getopts "m:s:p:" opt + do + case $opt in + p) + absolutePath=$OPTARG + ;; + s) + sizeInBytes=$OPTARG + ;; + m) + volMode=$OPTARG + ;; + esac + done + + mkdir -m 0777 -p ${absolutePath} + teardown: |- + #!/bin/sh + while getopts "m:s:p:" opt + do + case $opt in + p) + absolutePath=$OPTARG + ;; + s) + sizeInBytes=$OPTARG + ;; + m) + volMode=$OPTARG + ;; + esac + done + + rm -rf ${absolutePath} + helperPod.yaml: |- + apiVersion: v1 + kind: Pod + metadata: + name: helper-pod + spec: + priorityClassName: system-node-critical + tolerations: + - key: node.kubernetes.io/disk-pressure + operator: Exists + effect: NoSchedule + containers: + - name: helper-pod + image: busybox + imagePullPolicy: IfNotPresent diff --git a/exoscale/deployments/metrics-server.yaml b/exoscale/deployments/metrics-server.yaml new file mode 100644 index 0000000..69bb5ef --- /dev/null +++ b/exoscale/deployments/metrics-server.yaml @@ -0,0 +1,195 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + k8s-app: metrics-server + rbac.authorization.k8s.io/aggregate-to-admin: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-view: "true" + name: system:aggregated-metrics-reader +rules: +- apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + k8s-app: metrics-server + name: system:metrics-server +rules: +- apiGroups: + - "" + resources: + - pods + - nodes + - nodes/stats + - namespaces + - configmaps + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + k8s-app: metrics-server + name: metrics-server-auth-reader + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: +- kind: ServiceAccount + name: metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + k8s-app: metrics-server + name: metrics-server:system:auth-delegator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- kind: ServiceAccount + name: metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + k8s-app: metrics-server + name: system:metrics-server +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:metrics-server +subjects: +- kind: ServiceAccount + name: metrics-server + namespace: kube-system +--- +apiVersion: v1 +kind: Service +metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system +spec: + ports: + - name: https + port: 443 + protocol: TCP + targetPort: https + selector: + k8s-app: metrics-server +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: metrics-server + strategy: + rollingUpdate: + maxUnavailable: 0 + template: + metadata: + labels: + k8s-app: metrics-server + spec: + nodeSelector: + node-role.kubernetes.io/control-plane: "" + tolerations: + - key: "node-role.kubernetes.io/control-plane" + effect: NoSchedule + containers: + - args: + - --cert-dir=/tmp + - --secure-port=443 + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + - --kubelet-use-node-status-port + - --metric-resolution=15s + image: k8s.gcr.io/metrics-server/metrics-server:v0.5.0 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + httpGet: + path: /livez + port: https + scheme: HTTPS + periodSeconds: 10 + name: metrics-server + ports: + - containerPort: 443 + name: https + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /readyz + port: https + scheme: HTTPS + initialDelaySeconds: 20 + periodSeconds: 10 + resources: + requests: + cpu: 100m + memory: 200Mi + securityContext: + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + volumeMounts: + - mountPath: /tmp + name: tmp-dir + priorityClassName: system-cluster-critical + serviceAccountName: metrics-server + volumes: + - emptyDir: {} + name: tmp-dir +--- +apiVersion: apiregistration.k8s.io/v1 +kind: APIService +metadata: + labels: + k8s-app: metrics-server + name: v1beta1.metrics.k8s.io +spec: + group: metrics.k8s.io + groupPriorityMinimum: 100 + insecureSkipTLSVerify: true + service: + name: metrics-server + namespace: kube-system + version: v1beta1 + versionPriority: 100 diff --git a/exoscale/instances-controlplane.tf b/exoscale/instances-controlplane.tf index fa14721..28f405e 100644 --- a/exoscale/instances-controlplane.tf +++ b/exoscale/instances-controlplane.tf @@ -20,7 +20,7 @@ resource "exoscale_instance_pool" "controlplane" { key_pair = exoscale_ssh_key.terraform.name instance_type = try(var.controlplane[each.key].type, "standard.tiny") - disk_size = 10 + disk_size = 16 labels = merge(var.tags, { type = "infra" }) diff --git a/exoscale/instances-web.tf b/exoscale/instances-web.tf index a80ca69..b9fbe37 100644 --- a/exoscale/instances-web.tf +++ b/exoscale/instances-web.tf @@ -11,7 +11,7 @@ resource "exoscale_instance_pool" "web" { instance_prefix = "web" size = var.instances[each.key].web_count template_id = data.exoscale_compute_template.debian[each.key].id - user_data = base64encode(talos_machine_configuration_worker.worker[each.key].machine_config) + user_data = base64encode(talos_machine_configuration_worker.web[each.key].machine_config) ipv6 = true security_group_ids = [local.network_secgroup[each.key].web, local.network_secgroup[each.key].common] diff --git a/exoscale/instances-werker.tf b/exoscale/instances-werker.tf index a31d9b9..6c5a926 100644 --- a/exoscale/instances-werker.tf +++ b/exoscale/instances-werker.tf @@ -6,7 +6,7 @@ resource "exoscale_instance_pool" "worker" { instance_prefix = "worker" size = var.instances[each.key].worker_count template_id = data.exoscale_compute_template.debian[each.key].id - user_data = base64encode(talos_machine_configuration_worker.worker[each.key].machine_config) + user_data = base64encode(talos_machine_configuration_worker.web[each.key].machine_config) ipv6 = true security_group_ids = [local.network_secgroup[each.key].common] @@ -21,7 +21,7 @@ resource "exoscale_instance_pool" "worker" { resource "local_sensitive_file" "worker" { for_each = { for idx, name in local.regions : name => idx } - content = talos_machine_configuration_worker.worker[each.key].machine_config + content = talos_machine_configuration_worker.web[each.key].machine_config filename = "_cfgs/worker-${each.key}.yaml" file_permission = "0600" } diff --git a/exoscale/talos.tf b/exoscale/talos.tf index 46df529..8569dc7 100644 --- a/exoscale/talos.tf +++ b/exoscale/talos.tf @@ -19,7 +19,7 @@ resource "talos_machine_configuration_controlplane" "controlplane" { ] } -resource "talos_machine_configuration_worker" "worker" { +resource "talos_machine_configuration_worker" "web" { for_each = { for idx, name in local.regions : name => idx } cluster_name = var.kubernetes["clusterName"] cluster_endpoint = "https://${var.kubernetes["apiDomain"]}:6443" @@ -30,7 +30,7 @@ resource "talos_machine_configuration_worker" "worker" { templatefile("${path.module}/templates/worker.yaml.tpl", merge(var.kubernetes, { nodeSubnets = local.network[each.key].cidr ipv4_local_vip = cidrhost(local.network[each.key].cidr, 5) - labels = "topology.kubernetes.io/region=${each.key},topology.kubernetes.io/zone=${each.key}" + labels = "topology.kubernetes.io/region=${each.key},topology.kubernetes.io/zone=${each.key},project.io/node-pool=web" })) ] } diff --git a/exoscale/templates/controlplane.yaml.tpl b/exoscale/templates/controlplane.yaml.tpl index fc1a854..cafdd63 100644 --- a/exoscale/templates/controlplane.yaml.tpl +++ b/exoscale/templates/controlplane.yaml.tpl @@ -63,3 +63,11 @@ cluster: - ${nodeSubnets} listenSubnets: - ${nodeSubnets} + externalCloudProvider: + enabled: true + manifests: + - https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/exoscale/deployments/metrics-server.yaml + - https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/exoscale/deployments/local-path-storage.yaml + - https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/exoscale/deployments/coredns-local.yaml + - https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/exoscale/deployments/ingress-ns.yaml + - https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/exoscale/deployments/ingress-result.yaml diff --git a/exoscale/templates/worker.yaml.tpl b/exoscale/templates/worker.yaml.tpl index 7ddbea4..4d353f3 100644 --- a/exoscale/templates/worker.yaml.tpl +++ b/exoscale/templates/worker.yaml.tpl @@ -38,6 +38,8 @@ machine: - no_read_workqueue - no_write_workqueue cluster: + discovery: + enabled: false network: dnsDomain: ${domain} podSubnets: ${format("%#v",split(",",podSubnets))}