diff --git a/README.md b/README.md
index 82f514a..2419485 100644
--- a/README.md
+++ b/README.md
@@ -1,91 +1 @@
# Terraform examples
-
-Local utilities
-
-* terraform
-* talosctl
-* kubectl
-* yq
-
-# Talos on Hetzner Cloud
-
-This terraform example install Talos on [HCloud](https://www.hetzner.com/cloud) with IPv4/IPv6 support.
-
-
-
-## Kubernetes addons
-
-* [cilium](https://github.com/cilium/cilium) 1.10.0
-* [kubelet-serving-cert-approver](https://github.com/alex1989hu/kubelet-serving-cert-approver)
-* [metrics-server](https://github.com/kubernetes-sigs/metrics-server) 0.5.0
-* [rancher.io/local-path](https://github.com/rancher/local-path-provisioner) 0.0.19
-* [hcloud-cloud-controller-manage](https://github.com/hetznercloud/hcloud-cloud-controller-manager) v1.10.0
-
-## Prepare the base image
-
-First, prepare variables to your environment
-
-```shell
-export TF_VAR_hcloud_token=KEY
-```
-
-Terraform will run the VM in recovery mode, replace the base image and take a snapshote. Do not run terraform destroy after. It will delete the snapshot.
-
-```shell
-make prepare-image
-```
-
-## Install control plane
-
-Generate the default talos config
-
-```shell
-make create-config create-templates
-```
-
-open config file **terraform.tfvars** and add params
-
-```hcl
-# counts and type of kubernetes master nodes
-controlplane = {
- count = 1,
- type = "cpx11"
-}
-
-# regions to use
-regions = ["nbg1", "fsn1", "hel1"]
-
-# counts and type of worker nodes by redion
-instances = {
- "nbg1" = {
- web_count = 1,
- web_instance_type = "cx11",
- worker_count = 1,
- worker_instance_type = "cx11",
- },
- "fsn1" = {
- web_count = 1,
- web_instance_type = "cx11",
- worker_count = 1,
- worker_instance_type = "cx11",
- }
- "hel1" = {
- web_count = 1,
- web_instance_type = "cx11",
- worker_count = 1,
- worker_instance_type = "cx11",
- }
-}
-```
-
-And deploy the kubernetes master nodes
-
-```shell
-make create-controlplane
-```
-
-Then deploy all other instances
-
-```shell
-make create-infrastructure
-```
diff --git a/hetzner/README.md b/hetzner/README.md
new file mode 100644
index 0000000..82f514a
--- /dev/null
+++ b/hetzner/README.md
@@ -0,0 +1,91 @@
+# Terraform examples
+
+Local utilities
+
+* terraform
+* talosctl
+* kubectl
+* yq
+
+# Talos on Hetzner Cloud
+
+This terraform example install Talos on [HCloud](https://www.hetzner.com/cloud) with IPv4/IPv6 support.
+
+
+
+## Kubernetes addons
+
+* [cilium](https://github.com/cilium/cilium) 1.10.0
+* [kubelet-serving-cert-approver](https://github.com/alex1989hu/kubelet-serving-cert-approver)
+* [metrics-server](https://github.com/kubernetes-sigs/metrics-server) 0.5.0
+* [rancher.io/local-path](https://github.com/rancher/local-path-provisioner) 0.0.19
+* [hcloud-cloud-controller-manage](https://github.com/hetznercloud/hcloud-cloud-controller-manager) v1.10.0
+
+## Prepare the base image
+
+First, prepare variables to your environment
+
+```shell
+export TF_VAR_hcloud_token=KEY
+```
+
+Terraform will run the VM in recovery mode, replace the base image and take a snapshote. Do not run terraform destroy after. It will delete the snapshot.
+
+```shell
+make prepare-image
+```
+
+## Install control plane
+
+Generate the default talos config
+
+```shell
+make create-config create-templates
+```
+
+open config file **terraform.tfvars** and add params
+
+```hcl
+# counts and type of kubernetes master nodes
+controlplane = {
+ count = 1,
+ type = "cpx11"
+}
+
+# regions to use
+regions = ["nbg1", "fsn1", "hel1"]
+
+# counts and type of worker nodes by redion
+instances = {
+ "nbg1" = {
+ web_count = 1,
+ web_instance_type = "cx11",
+ worker_count = 1,
+ worker_instance_type = "cx11",
+ },
+ "fsn1" = {
+ web_count = 1,
+ web_instance_type = "cx11",
+ worker_count = 1,
+ worker_instance_type = "cx11",
+ }
+ "hel1" = {
+ web_count = 1,
+ web_instance_type = "cx11",
+ worker_count = 1,
+ worker_instance_type = "cx11",
+ }
+}
+```
+
+And deploy the kubernetes master nodes
+
+```shell
+make create-controlplane
+```
+
+Then deploy all other instances
+
+```shell
+make create-infrastructure
+```
diff --git a/scaleway/.gitignore b/scaleway/.gitignore
new file mode 100644
index 0000000..01eee69
--- /dev/null
+++ b/scaleway/.gitignore
@@ -0,0 +1,3 @@
+_cfgs/
+templates/controlplane.yaml
+*.patch
diff --git a/scaleway/Makefile b/scaleway/Makefile
new file mode 100644
index 0000000..1bdf385
--- /dev/null
+++ b/scaleway/Makefile
@@ -0,0 +1,22 @@
+
+create-config:
+ ./talosctl gen config --output-dir _cfgs --with-docs=false --with-examples=false talos-k8s-scaleway https://127.0.0.1:6443
+
+create-templates:
+ @yq ea -P '. as $$item ireduce ({}; . * $$item )' _cfgs/controlplane.yaml templates/controlplane.yaml.tpl > templates/controlplane.yaml
+ @echo 'podSubnets: "10.32.0.0/12,fd00:10:32::/102"' > _cfgs/tfstate.vars
+ @echo 'serviceSubnets: "10.200.0.0/22,fd40:10:200::/112"' >> _cfgs/tfstate.vars
+ @yq eval '.cluster.network.dnsDomain' _cfgs/controlplane.yaml | awk '{ print "domain: "$$1}' >> _cfgs/tfstate.vars
+ @yq eval '.cluster.clusterName' _cfgs/controlplane.yaml | awk '{ print "cluster_name: "$$1}' >> _cfgs/tfstate.vars
+ @yq eval '.machine.token' _cfgs/controlplane.yaml | awk '{ print "tokenmachine: "$$1}' >> _cfgs/tfstate.vars
+ @yq eval '.cluster.token' _cfgs/controlplane.yaml | awk '{ print "token: "$$1}' >> _cfgs/tfstate.vars
+ @yq eval '.cluster.ca.crt' _cfgs/controlplane.yaml | awk '{ print "ca: "$$1}' >> _cfgs/tfstate.vars
+
+ @yq eval -j '{"kubernetes": .}' _cfgs/tfstate.vars > terraform.tfvars.json
+
+create-controlplane:
+ terraform apply -target=scaleway_instance_private_nic.controlplane
+
+create-infrastructure:
+ cd modules/worker && terraform init
+ terraform apply
diff --git a/scaleway/README.md b/scaleway/README.md
new file mode 100644
index 0000000..447d0e0
--- /dev/null
+++ b/scaleway/README.md
@@ -0,0 +1,47 @@
+# Terraform example for Scaleway
+
+Local utilities
+
+* terraform
+* talosctl
+* kubectl
+* yq
+
+## Kubernetes addons
+
+* [cilium](https://github.com/cilium/cilium) 1.10.0
+* [kubelet-serving-cert-approver](https://github.com/alex1989hu/kubelet-serving-cert-approver)
+* [metrics-server](https://github.com/kubernetes-sigs/metrics-server) 0.5.0
+* [rancher.io/local-path](https://github.com/rancher/local-path-provisioner) 0.0.19
+
+## Prepare the base image
+
+## Install control plane
+
+Generate the default talos config
+
+```shell
+make create-config create-templates
+```
+
+open config file **terraform.tfvars** and add params
+
+```hcl
+# counts and type of kubernetes master nodes
+controlplane = {
+ count = 1,
+ type = "DEV1-S"
+}
+```
+
+And deploy the kubernetes master nodes
+
+```shell
+make create-controlplane
+```
+
+Then deploy all other instances
+
+```shell
+make create-infrastructure
+```
diff --git a/scaleway/auth.tf b/scaleway/auth.tf
new file mode 100644
index 0000000..b32a3dd
--- /dev/null
+++ b/scaleway/auth.tf
@@ -0,0 +1,8 @@
+
+provider "scaleway" {
+ access_key = var.scaleway_access
+ secret_key = var.scaleway_secret
+ project_id = var.scaleway_project_id
+ zone = "fr-par-1"
+ region = "fr-par"
+}
diff --git a/scaleway/common.tf b/scaleway/common.tf
new file mode 100644
index 0000000..ac2022c
--- /dev/null
+++ b/scaleway/common.tf
@@ -0,0 +1,4 @@
+
+data "scaleway_instance_image" "talos" {
+ name = "talos-system-disk"
+}
diff --git a/scaleway/deployments/cilium.yaml b/scaleway/deployments/cilium.yaml
new file mode 100644
index 0000000..642b54a
--- /dev/null
+++ b/scaleway/deployments/cilium.yaml
@@ -0,0 +1,68 @@
+---
+
+k8sServiceHost: "172.16.0.10"
+k8sServicePort: "6443"
+
+agent:
+ enabled: true
+
+operator:
+ enabled: true
+ replicas: 1
+ prometheus:
+ enabled: false
+
+identityAllocationMode: crd
+
+bpf:
+ masquerade: false
+
+cni:
+ install: true
+
+ipam:
+ mode: "kubernetes"
+
+tunnel: "vxlan"
+autoDirectNodeRoutes: false
+
+hostFirewall: true
+kubeProxyReplacement: strict
+
+healthChecking: true
+
+ipv4:
+ enabled: true
+ipv6:
+ enabled: true
+hostServices:
+ enabled: false
+hostPort:
+ enabled: true
+nodePort:
+ enabled: false
+externalIPs:
+ enabled: true
+
+k8s:
+ requireIPv4PodCIDR: true
+ requireIPv6PodCIDR: true
+
+prometheus:
+ enabled: true
+
+encryption:
+ enabled: false
+
+cgroup:
+ autoMount:
+ enabled: false
+ hostRoot: /sys/fs/cgroup
+
+resources:
+ # limits:
+ # cpu: 4000m
+ # memory: 4Gi
+ requests:
+ cpu: 100m
+ memory: 128Mi
diff --git a/scaleway/deployments/kubelet-serving-cert-approver.yaml b/scaleway/deployments/kubelet-serving-cert-approver.yaml
new file mode 100644
index 0000000..d472d2c
--- /dev/null
+++ b/scaleway/deployments/kubelet-serving-cert-approver.yaml
@@ -0,0 +1,290 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ labels:
+ app.kubernetes.io/instance: kubelet-serving-cert-approver
+ app.kubernetes.io/name: kubelet-serving-cert-approver
+ name: kubelet-serving-cert-approver
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ app.kubernetes.io/instance: kubelet-serving-cert-approver
+ app.kubernetes.io/name: kubelet-serving-cert-approver
+ name: kubelet-serving-cert-approver
+ namespace: kubelet-serving-cert-approver
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/instance: kubelet-serving-cert-approver
+ app.kubernetes.io/name: kubelet-serving-cert-approver
+ name: certificates:kubelet-serving-cert-approver
+rules:
+- apiGroups:
+ - certificates.k8s.io
+ resources:
+ - certificatesigningrequests
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - certificates.k8s.io
+ resources:
+ - certificatesigningrequests/approval
+ verbs:
+ - update
+- apiGroups:
+ - authorization.k8s.io
+ resources:
+ - subjectaccessreviews
+ verbs:
+ - create
+- apiGroups:
+ - certificates.k8s.io
+ resourceNames:
+ - kubernetes.io/kubelet-serving
+ resources:
+ - signers
+ verbs:
+ - approve
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/instance: kubelet-serving-cert-approver
+ app.kubernetes.io/name: kubelet-serving-cert-approver
+ name: events:kubelet-serving-cert-approver
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ app.kubernetes.io/instance: kubelet-serving-cert-approver
+ app.kubernetes.io/name: kubelet-serving-cert-approver
+ name: psp:kubelet-serving-cert-approver
+rules:
+- apiGroups:
+ - policy
+ resourceNames:
+ - kubelet-serving-cert-approver
+ resources:
+ - podsecuritypolicies
+ verbs:
+ - use
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ app.kubernetes.io/instance: kubelet-serving-cert-approver
+ app.kubernetes.io/name: kubelet-serving-cert-approver
+ name: events:kubelet-serving-cert-approver
+ namespace: default
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: events:kubelet-serving-cert-approver
+subjects:
+- kind: ServiceAccount
+ name: kubelet-serving-cert-approver
+ namespace: kubelet-serving-cert-approver
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ app.kubernetes.io/instance: kubelet-serving-cert-approver
+ app.kubernetes.io/name: kubelet-serving-cert-approver
+ name: psp:kubelet-serving-cert-approver
+ namespace: kubelet-serving-cert-approver
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: psp:kubelet-serving-cert-approver
+subjects:
+- kind: ServiceAccount
+ name: kubelet-serving-cert-approver
+ namespace: kubelet-serving-cert-approver
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ app.kubernetes.io/instance: kubelet-serving-cert-approver
+ app.kubernetes.io/name: kubelet-serving-cert-approver
+ name: kubelet-serving-cert-approver
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: certificates:kubelet-serving-cert-approver
+subjects:
+- kind: ServiceAccount
+ name: kubelet-serving-cert-approver
+ namespace: kubelet-serving-cert-approver
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app.kubernetes.io/instance: kubelet-serving-cert-approver
+ app.kubernetes.io/name: kubelet-serving-cert-approver
+ name: kubelet-serving-cert-approver
+ namespace: kubelet-serving-cert-approver
+spec:
+ ports:
+ - name: metrics
+ port: 9090
+ protocol: TCP
+ targetPort: metrics
+ selector:
+ app.kubernetes.io/instance: kubelet-serving-cert-approver
+ app.kubernetes.io/name: kubelet-serving-cert-approver
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app.kubernetes.io/instance: kubelet-serving-cert-approver
+ app.kubernetes.io/name: kubelet-serving-cert-approver
+ name: kubelet-serving-cert-approver
+ namespace: kubelet-serving-cert-approver
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/instance: kubelet-serving-cert-approver
+ app.kubernetes.io/name: kubelet-serving-cert-approver
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/instance: kubelet-serving-cert-approver
+ app.kubernetes.io/name: kubelet-serving-cert-approver
+ spec:
+ tolerations:
+ - key: "node.cloudprovider.kubernetes.io/uninitialized"
+ value: "true"
+ effect: "NoSchedule"
+ - key: "CriticalAddonsOnly"
+ operator: "Exists"
+ - key: "node-role.kubernetes.io/master"
+ effect: NoSchedule
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - preference:
+ matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: DoesNotExist
+ - key: node-role.kubernetes.io/control-plane
+ operator: DoesNotExist
+ weight: 100
+ containers:
+ - args:
+ - serve
+ env:
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ image: ghcr.io/alex1989hu/kubelet-serving-cert-approver:main
+ imagePullPolicy: Always
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: health
+ initialDelaySeconds: 6
+ name: cert-approver
+ ports:
+ - containerPort: 8080
+ name: health
+ - containerPort: 9090
+ name: metrics
+ readinessProbe:
+ httpGet:
+ path: /readyz
+ port: health
+ initialDelaySeconds: 3
+ resources:
+ limits:
+ cpu: 250m
+ memory: 32Mi
+ requests:
+ cpu: 10m
+ memory: 12Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ privileged: false
+ readOnlyRootFilesystem: true
+ runAsNonRoot: true
+ priorityClassName: system-cluster-critical
+ securityContext:
+ fsGroup: 65534
+ runAsGroup: 65534
+ runAsUser: 65534
+ serviceAccountName: kubelet-serving-cert-approver
+ tolerations:
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ operator: Exists
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
+ operator: Exists
+---
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ annotations:
+ seccomp.security.alpha.kubernetes.io/allowedProfileNames: runtime/default
+ seccomp.security.alpha.kubernetes.io/defaultProfileName: runtime/default
+ labels:
+ app.kubernetes.io/instance: kubelet-serving-cert-approver
+ app.kubernetes.io/name: kubelet-serving-cert-approver
+ name: kubelet-serving-cert-approver
+ namespace: kubelet-serving-cert-approver
+spec:
+ allowPrivilegeEscalation: false
+ forbiddenSysctls:
+ - '*'
+ fsGroup:
+ ranges:
+ - max: 65534
+ min: 65534
+ rule: MustRunAs
+ hostIPC: false
+ hostNetwork: false
+ hostPID: false
+ privileged: false
+ readOnlyRootFilesystem: true
+ requiredDropCapabilities:
+ - ALL
+ runAsUser:
+ ranges:
+ - max: 65534
+ min: 65534
+ rule: MustRunAs
+ seLinux:
+ rule: RunAsAny
+ supplementalGroups:
+ ranges:
+ - max: 65534
+ min: 65534
+ rule: MustRunAs
+ volumes:
+ - downwardAPI
+ - secret
diff --git a/scaleway/deployments/local-path-storage.yaml b/scaleway/deployments/local-path-storage.yaml
new file mode 100644
index 0000000..d7a6d4a
--- /dev/null
+++ b/scaleway/deployments/local-path-storage.yaml
@@ -0,0 +1,163 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: local-path-storage
+
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: local-path-provisioner-service-account
+ namespace: local-path-storage
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: local-path-provisioner-role
+rules:
+ - apiGroups: [ "" ]
+ resources: [ "nodes", "persistentvolumeclaims", "configmaps" ]
+ verbs: [ "get", "list", "watch" ]
+ - apiGroups: [ "" ]
+ resources: [ "endpoints", "persistentvolumes", "pods" ]
+ verbs: [ "*" ]
+ - apiGroups: [ "" ]
+ resources: [ "events" ]
+ verbs: [ "create", "patch" ]
+ - apiGroups: [ "storage.k8s.io" ]
+ resources: [ "storageclasses" ]
+ verbs: [ "get", "list", "watch" ]
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: local-path-provisioner-bind
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: local-path-provisioner-role
+subjects:
+ - kind: ServiceAccount
+ name: local-path-provisioner-service-account
+ namespace: local-path-storage
+
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: local-path-provisioner
+ namespace: local-path-storage
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: local-path-provisioner
+ template:
+ metadata:
+ labels:
+ app: local-path-provisioner
+ spec:
+ tolerations:
+ - key: "CriticalAddonsOnly"
+ operator: "Exists"
+ - key: "node-role.kubernetes.io/master"
+ effect: NoSchedule
+ serviceAccountName: local-path-provisioner-service-account
+ containers:
+ - name: local-path-provisioner
+ image: rancher/local-path-provisioner:v0.0.19
+ imagePullPolicy: IfNotPresent
+ command:
+ - local-path-provisioner
+ - --debug
+ - start
+ - --config
+ - /etc/config/config.json
+ volumeMounts:
+ - name: config-volume
+ mountPath: /etc/config/
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ volumes:
+ - name: config-volume
+ configMap:
+ name: local-path-config
+
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: local-path
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+provisioner: rancher.io/local-path
+volumeBindingMode: WaitForFirstConsumer
+reclaimPolicy: Delete
+
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: local-path-config
+ namespace: local-path-storage
+data:
+ config.json: |-
+ {
+ "nodePathMap":[
+ {
+ "node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
+ "paths":["/var/local-path-provisioner"]
+ }
+ ]
+ }
+ setup: |-
+ #!/bin/sh
+ while getopts "m:s:p:" opt
+ do
+ case $opt in
+ p)
+ absolutePath=$OPTARG
+ ;;
+ s)
+ sizeInBytes=$OPTARG
+ ;;
+ m)
+ volMode=$OPTARG
+ ;;
+ esac
+ done
+
+ mkdir -m 0777 -p ${absolutePath}
+ teardown: |-
+ #!/bin/sh
+ while getopts "m:s:p:" opt
+ do
+ case $opt in
+ p)
+ absolutePath=$OPTARG
+ ;;
+ s)
+ sizeInBytes=$OPTARG
+ ;;
+ m)
+ volMode=$OPTARG
+ ;;
+ esac
+ done
+
+ rm -rf ${absolutePath}
+ helperPod.yaml: |-
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ name: helper-pod
+ spec:
+ containers:
+ - name: helper-pod
+ image: busybox
+ imagePullPolicy: IfNotPresent
diff --git a/scaleway/deployments/metrics-server.yaml b/scaleway/deployments/metrics-server.yaml
new file mode 100644
index 0000000..a51b411
--- /dev/null
+++ b/scaleway/deployments/metrics-server.yaml
@@ -0,0 +1,197 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: metrics-server
+ namespace: kube-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ k8s-app: metrics-server
+ rbac.authorization.k8s.io/aggregate-to-admin: "true"
+ rbac.authorization.k8s.io/aggregate-to-edit: "true"
+ rbac.authorization.k8s.io/aggregate-to-view: "true"
+ name: system:aggregated-metrics-reader
+rules:
+- apiGroups:
+ - metrics.k8s.io
+ resources:
+ - pods
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: system:metrics-server
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ - nodes
+ - nodes/stats
+ - namespaces
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: metrics-server-auth-reader
+ namespace: kube-system
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+subjects:
+- kind: ServiceAccount
+ name: metrics-server
+ namespace: kube-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: metrics-server:system:auth-delegator
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:auth-delegator
+subjects:
+- kind: ServiceAccount
+ name: metrics-server
+ namespace: kube-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: system:metrics-server
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:metrics-server
+subjects:
+- kind: ServiceAccount
+ name: metrics-server
+ namespace: kube-system
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: metrics-server
+ namespace: kube-system
+spec:
+ ports:
+ - name: https
+ port: 443
+ protocol: TCP
+ targetPort: https
+ selector:
+ k8s-app: metrics-server
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: metrics-server
+ namespace: kube-system
+spec:
+ selector:
+ matchLabels:
+ k8s-app: metrics-server
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 0
+ template:
+ metadata:
+ labels:
+ k8s-app: metrics-server
+ spec:
+ tolerations:
+ - key: "CriticalAddonsOnly"
+ operator: "Exists"
+ - key: "node-role.kubernetes.io/master"
+ effect: NoSchedule
+ containers:
+ - args:
+ - --cert-dir=/tmp
+ - --secure-port=443
+ - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
+ - --kubelet-use-node-status-port
+ - --metric-resolution=15s
+ image: k8s.gcr.io/metrics-server/metrics-server:v0.5.0
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /livez
+ port: https
+ scheme: HTTPS
+ periodSeconds: 10
+ name: metrics-server
+ ports:
+ - containerPort: 443
+ name: https
+ protocol: TCP
+ readinessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /readyz
+ port: https
+ scheme: HTTPS
+ initialDelaySeconds: 20
+ periodSeconds: 10
+ resources:
+ requests:
+ cpu: 100m
+ memory: 200Mi
+ securityContext:
+ readOnlyRootFilesystem: true
+ runAsNonRoot: true
+ runAsUser: 1000
+ volumeMounts:
+ - mountPath: /tmp
+ name: tmp-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-cluster-critical
+ serviceAccountName: metrics-server
+ volumes:
+ - emptyDir: {}
+ name: tmp-dir
+---
+apiVersion: apiregistration.k8s.io/v1
+kind: APIService
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: v1beta1.metrics.k8s.io
+spec:
+ group: metrics.k8s.io
+ groupPriorityMinimum: 100
+ insecureSkipTLSVerify: true
+ service:
+ name: metrics-server
+ namespace: kube-system
+ version: v1beta1
+ versionPriority: 100
diff --git a/scaleway/instances-master.tf b/scaleway/instances-master.tf
new file mode 100644
index 0000000..fd0e405
--- /dev/null
+++ b/scaleway/instances-master.tf
@@ -0,0 +1,42 @@
+
+resource "scaleway_instance_ip" "controlplane" {
+ count = lookup(var.controlplane, "count", 0)
+ # zone = element(var.regions, count.index)
+}
+
+resource "scaleway_instance_server" "controlplane" {
+ count = lookup(var.controlplane, "count", 0)
+ # zone = element(var.regions, count.index)
+ name = "master-${count.index + 1}"
+ image = data.scaleway_instance_image.talos.id
+ type = lookup(var.controlplane, "type", "DEV1-M")
+ enable_ipv6 = true
+ ip_id = scaleway_instance_ip.controlplane[count.index].id
+ security_group_id = scaleway_instance_security_group.controlplane.id
+ tags = concat(var.tags, ["infra", "master"])
+
+ user_data = {
+ cloud-init = templatefile("${path.module}/templates/controlplane.yaml",
+ merge(var.kubernetes, {
+ name = "master-${count.index + 1}"
+ type = count.index == 0 ? "init" : "controlplane"
+ lbv4 = local.lbv4
+ ipv4 = scaleway_instance_ip.controlplane[count.index].address
+ })
+ )
+ }
+
+ lifecycle {
+ ignore_changes = [
+ image,
+ type,
+ user_data,
+ ]
+ }
+}
+
+resource "scaleway_instance_private_nic" "controlplane" {
+ count = lookup(var.controlplane, "count", 0)
+ server_id = scaleway_instance_server.controlplane[count.index].id
+ private_network_id = scaleway_vpc_private_network.main.id
+}
diff --git a/scaleway/network-lb.tf b/scaleway/network-lb.tf
new file mode 100644
index 0000000..e480843
--- /dev/null
+++ b/scaleway/network-lb.tf
@@ -0,0 +1,50 @@
+
+locals {
+ lb_enable = lookup(var.controlplane, "type_lb", "") == "" ? false : true
+}
+
+locals {
+ lbv4 = local.lb_enable ? scaleway_lb_ip.lb[0].ip_address : try(scaleway_instance_ip.controlplane[0].address, "127.0.0.1")
+}
+
+resource "scaleway_lb_ip" "lb" {
+ count = local.lb_enable ? 1 : 0
+ # zone = element(var.regions, count.index)
+}
+
+resource "scaleway_lb" "lb" {
+ count = local.lb_enable ? 1 : 0
+ # name = "lb"
+ ip_id = scaleway_lb_ip.lb[0].id
+ type = lookup(var.controlplane, "type_lb", "")
+ tags = concat(var.tags, ["infra"])
+}
+
+resource "scaleway_lb_backend" "api" {
+ count = local.lb_enable ? 1 : 0
+ lb_id = scaleway_lb.lb[0].id
+ name = "api"
+ forward_protocol = "tcp"
+ forward_port = "6443"
+ server_ips = scaleway_instance_server.controlplane[*].private_ip
+
+ health_check_tcp {}
+}
+
+resource "scaleway_lb_frontend" "api" {
+ count = local.lb_enable ? 1 : 0
+ lb_id = scaleway_lb.lb[0].id
+ backend_id = scaleway_lb_backend.api[0].id
+ name = "api"
+ inbound_port = "6443"
+
+ acl {
+ name = "Allow whitlist IPs"
+ action {
+ type = "allow"
+ }
+ match {
+ ip_subnet = var.whitelist_admins
+ }
+ }
+}
diff --git a/scaleway/network-secgroup.tf b/scaleway/network-secgroup.tf
new file mode 100644
index 0000000..5037639
--- /dev/null
+++ b/scaleway/network-secgroup.tf
@@ -0,0 +1,96 @@
+
+resource "scaleway_instance_security_group" "controlplane" {
+ name = "controlplane"
+ inbound_default_policy = "drop"
+ outbound_default_policy = "accept"
+
+ dynamic "inbound_rule" {
+ for_each = ["50000", "50001", "6443", "2379", "2380"]
+
+ content {
+ action = "accept"
+ protocol = "TCP"
+ port = inbound_rule.value
+ }
+ }
+
+ dynamic "inbound_rule" {
+ for_each = ["50000", "50001", "6443"]
+
+ content {
+ action = "accept"
+ protocol = "TCP"
+ port = inbound_rule.value
+ ip_range = "::/0"
+ }
+ }
+
+ dynamic "inbound_rule" {
+ for_each = ["10250"]
+
+ content {
+ action = "accept"
+ protocol = "TCP"
+ port = inbound_rule.value
+ }
+ }
+
+ inbound_rule {
+ action = "accept"
+ protocol = "ICMP"
+ }
+}
+
+resource "scaleway_instance_security_group" "web" {
+ name = "web"
+ inbound_default_policy = "drop"
+ outbound_default_policy = "accept"
+
+ dynamic "inbound_rule" {
+ for_each = ["80", "443"]
+
+ content {
+ action = "accept"
+ protocol = "TCP"
+ port = inbound_rule.value
+ }
+ }
+
+ dynamic "inbound_rule" {
+ for_each = ["4240"]
+
+ content {
+ action = "accept"
+ protocol = "TCP"
+ port = inbound_rule.value
+ ip_range = "::/0"
+ }
+ }
+
+ inbound_rule {
+ action = "accept"
+ protocol = "ICMP"
+ }
+}
+
+resource "scaleway_instance_security_group" "worker" {
+ name = "worker"
+ inbound_default_policy = "drop"
+ outbound_default_policy = "accept"
+
+ dynamic "inbound_rule" {
+ for_each = ["4240"]
+
+ content {
+ action = "accept"
+ protocol = "TCP"
+ port = inbound_rule.value
+ ip_range = "::/0"
+ }
+ }
+
+ inbound_rule {
+ action = "accept"
+ protocol = "ICMP"
+ }
+}
diff --git a/scaleway/network.tf b/scaleway/network.tf
new file mode 100644
index 0000000..999478e
--- /dev/null
+++ b/scaleway/network.tf
@@ -0,0 +1,5 @@
+
+resource "scaleway_vpc_private_network" "main" {
+ name = "main"
+ tags = concat(var.tags, ["infra"])
+}
diff --git a/scaleway/templates/controlplane.yaml.tpl b/scaleway/templates/controlplane.yaml.tpl
new file mode 100644
index 0000000..7d178c0
--- /dev/null
+++ b/scaleway/templates/controlplane.yaml.tpl
@@ -0,0 +1,57 @@
+version: v1alpha1
+debug: false
+persist: true
+machine:
+ type: ${type}
+ certSANs:
+ - "${lbv4}"
+ - "${ipv4}"
+ kubelet:
+ extraArgs:
+ rotate-server-certificates: true
+ network:
+ hostname: "${name}"
+ interfaces:
+ - interface: eth1
+ dhcp: true
+ dhcpOptions:
+ routeMetric: 2048
+ - interface: dummy0
+ addresses:
+ - 169.254.2.53/32
+ - fd00::169:254:2:53/128
+ install:
+ wipe: false
+ sysctls:
+ net.core.somaxconn: 65535
+ net.core.netdev_max_backlog: 4096
+ systemDiskEncryption:
+ ephemeral:
+ provider: luks2
+ keys:
+ - nodeID: {}
+ slot: 0
+cluster:
+ controlPlane:
+ endpoint: https://${lbv4}:6443
+ network:
+ dnsDomain: ${domain}
+ podSubnets: ${format("[%s]",podSubnets)}
+ serviceSubnets: ${format("[%s]",serviceSubnets)}
+ proxy:
+ mode: ipvs
+ apiServer:
+ certSANs:
+ - "${lbv4}"
+ - "${ipv4}"
+ controllerManager:
+ extraArgs:
+ node-cidr-mask-size-ipv4: 24
+ node-cidr-mask-size-ipv6: 112
+ scheduler: {}
+ etcd: {}
+ extraManifests:
+ manifests:
+ - https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/scaleway/deployments/kubelet-serving-cert-approver.yaml
+ - https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/scaleway/deployments/metrics-server.yaml
+ - https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/scaleway/deployments/local-path-storage.yaml
diff --git a/scaleway/variables.tf b/scaleway/variables.tf
new file mode 100644
index 0000000..8515105
--- /dev/null
+++ b/scaleway/variables.tf
@@ -0,0 +1,110 @@
+
+variable "scaleway_access" {
+ description = "The scaleway access key (export TF_VAR_scaleway_access=$access_key)"
+ type = string
+ sensitive = true
+}
+
+variable "scaleway_secret" {
+ description = "The scaleway secret key (export TF_VAR_scaleway_secret=$secret_key)"
+ type = string
+ sensitive = true
+}
+
+variable "scaleway_project_id" {
+ description = "The scaleway secret key (export TF_VAR_scaleway_project_id=$project_id)"
+ type = string
+ sensitive = true
+}
+
+variable "regions" {
+ description = "The id of the scaleway region (oreder is important)"
+ type = list(string)
+ default = ["fr-par-1", "fr-par-2", "nl-ams-1"]
+}
+
+variable "kubernetes" {
+ type = map(string)
+ default = {
+ podSubnets = "10.32.0.0/12,f00d:10:32::/102"
+ serviceSubnets = "10.200.0.0/22,fd40:10:200::/112"
+ domain = "cluster.local"
+ cluster_name = "talos-k8s-scaleway"
+ tokenmachine = ""
+ token = ""
+ ca = ""
+ }
+}
+
+variable "vpc_main_cidr" {
+ description = "Local subnet rfc1918"
+ type = string
+ default = "172.16.0.0/16"
+}
+
+variable "controlplane" {
+ description = "Property of controlplane"
+ type = map(any)
+ default = {
+ count = 0,
+ type = "DEV1-L"
+ type_lb = ""
+ }
+}
+
+variable "instances" {
+ description = "Map of region's properties"
+ type = map(any)
+ default = {
+ "nbg1" = {
+ web_count = 0,
+ web_instance_type = "DEV1-L",
+ worker_count = 0,
+ worker_instance_type = "DEV1-L",
+ },
+ "fsn1" = {
+ web_count = 0,
+ web_instance_type = "DEV1-L",
+ worker_count = 0,
+ worker_instance_type = "DEV1-L",
+ }
+ "hel1" = {
+ web_count = 0,
+ web_instance_type = "DEV1-L",
+ worker_count = 0,
+ worker_instance_type = "DEV1-L",
+ }
+ }
+}
+
+variable "tags" {
+ description = "Tags of resources"
+ type = list(string)
+ default = ["Develop"]
+}
+
+variable "whitelist_admins" {
+ description = "Whitelist for administrators"
+ default = ["0.0.0.0/0", "::/0"]
+}
+
+variable "whitelist_web" {
+ description = "Whitelist for web (default Cloudflare network)"
+ default = [
+ "173.245.48.0/20",
+ "103.21.244.0/22",
+ "103.22.200.0/22",
+ "103.31.4.0/22",
+ "141.101.64.0/18",
+ "108.162.192.0/18",
+ "190.93.240.0/20",
+ "188.114.96.0/20",
+ "197.234.240.0/22",
+ "198.41.128.0/17",
+ "162.158.0.0/15",
+ "172.64.0.0/13",
+ "131.0.72.0/22",
+ "104.16.0.0/13",
+ "104.24.0.0/14",
+ ]
+}
diff --git a/scaleway/versions.tf b/scaleway/versions.tf
new file mode 100644
index 0000000..473d031
--- /dev/null
+++ b/scaleway/versions.tf
@@ -0,0 +1,10 @@
+
+terraform {
+ required_providers {
+ scaleway = {
+ source = "scaleway/scaleway"
+ version = "~> 2.1.0"
+ }
+ }
+ required_version = ">= 1.0"
+}