mirror of
https://github.com/optim-enterprises-bv/terraform-talos.git
synced 2025-10-29 17:42:47 +00:00
Switch to cilium network
This commit is contained in:
@@ -20,6 +20,8 @@ create-templates:
|
||||
@echo 'apiDomain: api.cluster.local' >> _cfgs/tfstate.vars
|
||||
@yq eval '.cluster.network.dnsDomain' _cfgs/controlplane.yaml | awk '{ print "domain: "$$1}' >> _cfgs/tfstate.vars
|
||||
@yq eval '.cluster.clusterName' _cfgs/controlplane.yaml | awk '{ print "clusterName: "$$1}' >> _cfgs/tfstate.vars
|
||||
@yq eval '.cluster.id' _cfgs/controlplane.yaml | awk '{ print "clusterId: "$$1}' >> _cfgs/tfstate.vars
|
||||
@yq eval '.cluster.secret' _cfgs/controlplane.yaml | awk '{ print "clusterSecret: "$$1}'>> _cfgs/tfstate.vars
|
||||
@yq eval '.machine.token' _cfgs/controlplane.yaml | awk '{ print "tokenMachine: "$$1}' >> _cfgs/tfstate.vars
|
||||
@yq eval '.machine.ca.crt' _cfgs/controlplane.yaml | awk '{ print "caMachine: "$$1}' >> _cfgs/tfstate.vars
|
||||
@yq eval '.cluster.token' _cfgs/controlplane.yaml | awk '{ print "token: "$$1}' >> _cfgs/tfstate.vars
|
||||
@@ -35,3 +37,6 @@ create-infrastructure: ## Bootstrap all nodes
|
||||
|
||||
create-kubeconfig: ## Prepare kubeconfig
|
||||
talosctl --talosconfig _cfgs/talosconfig --nodes 172.16.0.11 kubeconfig .
|
||||
|
||||
create-deployments:
|
||||
helm template --namespace=kube-system --version=1.11.1 -f deployments/cilium.yaml cilium cilium/cilium > deployments/cilium_result.yaml
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
|
||||
k8sServiceHost: "172.16.0.10"
|
||||
k8sServiceHost: "172.16.0.5"
|
||||
k8sServicePort: "6443"
|
||||
|
||||
agent:
|
||||
@@ -13,24 +13,27 @@ operator:
|
||||
enabled: false
|
||||
|
||||
identityAllocationMode: crd
|
||||
kubeProxyReplacement: strict
|
||||
enableK8sEndpointSlice: true
|
||||
localRedirectPolicy: true
|
||||
|
||||
bpf:
|
||||
masquerade: false
|
||||
healthChecking: true
|
||||
|
||||
tunnel: "vxlan"
|
||||
autoDirectNodeRoutes: false
|
||||
# devices: [eth0]
|
||||
|
||||
cni:
|
||||
install: true
|
||||
|
||||
ipam:
|
||||
mode: "kubernetes"
|
||||
k8s:
|
||||
requireIPv4PodCIDR: true
|
||||
requireIPv6PodCIDR: true
|
||||
|
||||
tunnel: "vxlan"
|
||||
autoDirectNodeRoutes: false
|
||||
|
||||
hostFirewall: true
|
||||
kubeProxyReplacement: strict
|
||||
|
||||
healthChecking: true
|
||||
|
||||
bpf:
|
||||
masquerade: false
|
||||
ipv4:
|
||||
enabled: true
|
||||
ipv6:
|
||||
@@ -43,17 +46,15 @@ nodePort:
|
||||
enabled: false
|
||||
externalIPs:
|
||||
enabled: true
|
||||
hostFirewall:
|
||||
enabled: true
|
||||
|
||||
k8s:
|
||||
requireIPv4PodCIDR: true
|
||||
requireIPv6PodCIDR: true
|
||||
hubble:
|
||||
enabled: false
|
||||
|
||||
prometheus:
|
||||
enabled: true
|
||||
|
||||
encryption:
|
||||
enabled: false
|
||||
|
||||
cgroup:
|
||||
autoMount:
|
||||
enabled: false
|
||||
|
||||
782
scaleway/deployments/cilium_result.yaml
Normal file
782
scaleway/deployments/cilium_result.yaml
Normal file
@@ -0,0 +1,782 @@
|
||||
---
|
||||
# Source: cilium/templates/cilium-agent/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: "cilium"
|
||||
namespace: kube-system
|
||||
---
|
||||
# Source: cilium/templates/cilium-operator/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: "cilium-operator"
|
||||
namespace: kube-system
|
||||
---
|
||||
# Source: cilium/templates/cilium-configmap.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cilium-config
|
||||
namespace: kube-system
|
||||
data:
|
||||
|
||||
# Identity allocation mode selects how identities are shared between cilium
|
||||
# nodes by setting how they are stored. The options are "crd" or "kvstore".
|
||||
# - "crd" stores identities in kubernetes as CRDs (custom resource definition).
|
||||
# These can be queried with:
|
||||
# kubectl get ciliumid
|
||||
# - "kvstore" stores identities in an etcd kvstore, that is
|
||||
# configured below. Cilium versions before 1.6 supported only the kvstore
|
||||
# backend. Upgrades from these older cilium versions should continue using
|
||||
# the kvstore by commenting out the identity-allocation-mode below, or
|
||||
# setting it to "kvstore".
|
||||
identity-allocation-mode: crd
|
||||
cilium-endpoint-gc-interval: "5m0s"
|
||||
# Disable the usage of CiliumEndpoint CRD
|
||||
disable-endpoint-crd: "false"
|
||||
|
||||
# If you want to run cilium in debug mode change this value to true
|
||||
debug: "false"
|
||||
# The agent can be put into the following three policy enforcement modes
|
||||
# default, always and never.
|
||||
# https://docs.cilium.io/en/latest/policy/intro/#policy-enforcement-modes
|
||||
enable-policy: "default"
|
||||
# If you want metrics enabled in all of your Cilium agents, set the port for
|
||||
# which the Cilium agents will have their metrics exposed.
|
||||
# This option deprecates the "prometheus-serve-addr" in the
|
||||
# "cilium-metrics-config" ConfigMap
|
||||
# NOTE that this will open the port on ALL nodes where Cilium pods are
|
||||
# scheduled.
|
||||
prometheus-serve-addr: ":9090"
|
||||
# Port to expose Envoy metrics (e.g. "9095"). Envoy metrics listener will be disabled if this
|
||||
# field is not set.
|
||||
proxy-prometheus-port: "9095"
|
||||
|
||||
# Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4
|
||||
# address.
|
||||
enable-ipv4: "true"
|
||||
|
||||
# Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6
|
||||
# address.
|
||||
enable-ipv6: "true"
|
||||
# Users who wish to specify their own custom CNI configuration file must set
|
||||
# custom-cni-conf to "true", otherwise Cilium may overwrite the configuration.
|
||||
custom-cni-conf: "false"
|
||||
enable-bpf-clock-probe: "true"
|
||||
# If you want cilium monitor to aggregate tracing for packets, set this level
|
||||
# to "low", "medium", or "maximum". The higher the level, the less packets
|
||||
# that will be seen in monitor output.
|
||||
monitor-aggregation: medium
|
||||
|
||||
# The monitor aggregation interval governs the typical time between monitor
|
||||
# notification events for each allowed connection.
|
||||
#
|
||||
# Only effective when monitor aggregation is set to "medium" or higher.
|
||||
monitor-aggregation-interval: 5s
|
||||
|
||||
# The monitor aggregation flags determine which TCP flags which, upon the
|
||||
# first observation, cause monitor notifications to be generated.
|
||||
#
|
||||
# Only effective when monitor aggregation is set to "medium" or higher.
|
||||
monitor-aggregation-flags: all
|
||||
# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic
|
||||
# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps.
|
||||
bpf-map-dynamic-size-ratio: "0.0025"
|
||||
# bpf-policy-map-max specifies the maximum number of entries in endpoint
|
||||
# policy map (per endpoint)
|
||||
bpf-policy-map-max: "16384"
|
||||
# bpf-lb-map-max specifies the maximum number of entries in bpf lb service,
|
||||
# backend and affinity maps.
|
||||
bpf-lb-map-max: "65536"
|
||||
# bpf-lb-bypass-fib-lookup instructs Cilium to enable the FIB lookup bypass
|
||||
# optimization for nodeport reverse NAT handling.
|
||||
bpf-lb-external-clusterip: "false"
|
||||
|
||||
# Pre-allocation of map entries allows per-packet latency to be reduced, at
|
||||
# the expense of up-front memory allocation for the entries in the maps. The
|
||||
# default value below will minimize memory usage in the default installation;
|
||||
# users who are sensitive to latency may consider setting this to "true".
|
||||
#
|
||||
# This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore
|
||||
# this option and behave as though it is set to "true".
|
||||
#
|
||||
# If this value is modified, then during the next Cilium startup the restore
|
||||
# of existing endpoints and tracking of ongoing connections may be disrupted.
|
||||
# As a result, reply packets may be dropped and the load-balancing decisions
|
||||
# for established connections may change.
|
||||
#
|
||||
# If this option is set to "false" during an upgrade from 1.3 or earlier to
|
||||
# 1.4 or later, then it may cause one-time disruptions during the upgrade.
|
||||
preallocate-bpf-maps: "false"
|
||||
|
||||
# Regular expression matching compatible Istio sidecar istio-proxy
|
||||
# container image names
|
||||
sidecar-istio-proxy-image: "cilium/istio_proxy"
|
||||
|
||||
# Name of the cluster. Only relevant when building a mesh of clusters.
|
||||
cluster-name: default
|
||||
# Unique ID of the cluster. Must be unique across all conneted clusters and
|
||||
# in the range of 1 and 255. Only relevant when building a mesh of clusters.
|
||||
cluster-id: ""
|
||||
|
||||
# Encapsulation mode for communication between nodes
|
||||
# Possible values:
|
||||
# - disabled
|
||||
# - vxlan (default)
|
||||
# - geneve
|
||||
tunnel: vxlan
|
||||
# Enables L7 proxy for L7 policy enforcement and visibility
|
||||
enable-l7-proxy: "true"
|
||||
|
||||
enable-ipv4-masquerade: "true"
|
||||
enable-ipv6-masquerade: "true"
|
||||
enable-bpf-masquerade: "false"
|
||||
|
||||
enable-xt-socket-fallback: "true"
|
||||
install-iptables-rules: "true"
|
||||
install-no-conntrack-iptables-rules: "false"
|
||||
|
||||
auto-direct-node-routes: "false"
|
||||
enable-bandwidth-manager: "false"
|
||||
enable-local-redirect-policy: "true"
|
||||
enable-host-firewall: "true"
|
||||
|
||||
kube-proxy-replacement: "strict"
|
||||
kube-proxy-replacement-healthz-bind-address: ""
|
||||
enable-health-check-nodeport: "true"
|
||||
node-port-bind-protection: "true"
|
||||
enable-auto-protect-node-port-range: "true"
|
||||
enable-session-affinity: "true"
|
||||
enable-l2-neigh-discovery: "true"
|
||||
k8s-require-ipv4-pod-cidr: "true"
|
||||
k8s-require-ipv6-pod-cidr: "true"
|
||||
enable-endpoint-health-checking: "true"
|
||||
enable-health-checking: "true"
|
||||
enable-well-known-identities: "false"
|
||||
enable-remote-node-identity: "true"
|
||||
operator-api-serve-addr: "127.0.0.1:9234"
|
||||
ipam: "kubernetes"
|
||||
disable-cnp-status-updates: "true"
|
||||
enable-k8s-endpoint-slice: "true"
|
||||
cgroup-root: "/sys/fs/cgroup"
|
||||
enable-k8s-terminating-endpoint: "true"
|
||||
---
|
||||
# Source: cilium/templates/cilium-agent/clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: cilium
|
||||
rules:
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- discovery.k8s.io
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
- services
|
||||
- nodes
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- pods/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
- nodes/status
|
||||
verbs:
|
||||
- patch
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
# Deprecated for removal in v1.10
|
||||
- create
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
|
||||
# This is used when validating policies in preflight. This will need to stay
|
||||
# until we figure out how to avoid "get" inside the preflight, and then
|
||||
# should be removed ideally.
|
||||
- get
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumnetworkpolicies
|
||||
- ciliumnetworkpolicies/status
|
||||
- ciliumnetworkpolicies/finalizers
|
||||
- ciliumclusterwidenetworkpolicies
|
||||
- ciliumclusterwidenetworkpolicies/status
|
||||
- ciliumclusterwidenetworkpolicies/finalizers
|
||||
- ciliumendpoints
|
||||
- ciliumendpoints/status
|
||||
- ciliumendpoints/finalizers
|
||||
- ciliumnodes
|
||||
- ciliumnodes/status
|
||||
- ciliumnodes/finalizers
|
||||
- ciliumidentities
|
||||
- ciliumidentities/finalizers
|
||||
- ciliumlocalredirectpolicies
|
||||
- ciliumlocalredirectpolicies/status
|
||||
- ciliumlocalredirectpolicies/finalizers
|
||||
- ciliumegressnatpolicies
|
||||
- ciliumendpointslices
|
||||
verbs:
|
||||
- '*'
|
||||
---
|
||||
# Source: cilium/templates/cilium-operator/clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: cilium-operator
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
# to automatically delete [core|kube]dns pods so that are starting to being
|
||||
# managed by Cilium
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- discovery.k8s.io
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
# to perform LB IP allocation for BGP
|
||||
- services/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
# to perform the translation of a CNP that contains `ToGroup` to its endpoints
|
||||
- services
|
||||
- endpoints
|
||||
# to check apiserver connectivity
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumnetworkpolicies
|
||||
- ciliumnetworkpolicies/status
|
||||
- ciliumnetworkpolicies/finalizers
|
||||
- ciliumclusterwidenetworkpolicies
|
||||
- ciliumclusterwidenetworkpolicies/status
|
||||
- ciliumclusterwidenetworkpolicies/finalizers
|
||||
- ciliumendpoints
|
||||
- ciliumendpoints/status
|
||||
- ciliumendpoints/finalizers
|
||||
- ciliumnodes
|
||||
- ciliumnodes/status
|
||||
- ciliumnodes/finalizers
|
||||
- ciliumidentities
|
||||
- ciliumendpointslices
|
||||
- ciliumidentities/status
|
||||
- ciliumidentities/finalizers
|
||||
- ciliumlocalredirectpolicies
|
||||
- ciliumlocalredirectpolicies/status
|
||||
- ciliumlocalredirectpolicies/finalizers
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- watch
|
||||
# For cilium-operator running in HA mode.
|
||||
#
|
||||
# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election
|
||||
# between multiple running instances.
|
||||
# The preferred way of doing this is to use LeasesResourceLock as edits to Leases are less
|
||||
# common and fewer objects in the cluster watch "all Leases".
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- update
|
||||
---
|
||||
# Source: cilium/templates/cilium-agent/clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: cilium
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cilium
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: "cilium"
|
||||
namespace: kube-system
|
||||
---
|
||||
# Source: cilium/templates/cilium-operator/clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: cilium-operator
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cilium-operator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: "cilium-operator"
|
||||
namespace: kube-system
|
||||
---
|
||||
# Source: cilium/templates/cilium-agent/service.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: cilium-agent
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "9095"
|
||||
labels:
|
||||
k8s-app: cilium
|
||||
spec:
|
||||
clusterIP: None
|
||||
type: ClusterIP
|
||||
selector:
|
||||
k8s-app: cilium
|
||||
ports:
|
||||
- name: envoy-metrics
|
||||
port: 9095
|
||||
protocol: TCP
|
||||
targetPort: envoy-metrics
|
||||
---
|
||||
# Source: cilium/templates/cilium-agent/daemonset.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: cilium
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: cilium
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: cilium
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 2
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/port: "9090"
|
||||
prometheus.io/scrape: "true"
|
||||
# This annotation plus the CriticalAddonsOnly toleration makes
|
||||
# cilium to be a critical pod in the cluster, which ensures cilium
|
||||
# gets priority scheduling.
|
||||
# https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||
labels:
|
||||
k8s-app: cilium
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/os
|
||||
operator: In
|
||||
values:
|
||||
- linux
|
||||
- matchExpressions:
|
||||
- key: beta.kubernetes.io/os
|
||||
operator: In
|
||||
values:
|
||||
- linux
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: k8s-app
|
||||
operator: In
|
||||
values:
|
||||
- cilium
|
||||
topologyKey: kubernetes.io/hostname
|
||||
containers:
|
||||
- name: cilium-agent
|
||||
image: "quay.io/cilium/cilium:v1.11.1@sha256:251ff274acf22fd2067b29a31e9fda94253d2961c061577203621583d7e85bd2"
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- cilium-agent
|
||||
args:
|
||||
- --config-dir=/tmp/cilium/config-map
|
||||
startupProbe:
|
||||
httpGet:
|
||||
host: "127.0.0.1"
|
||||
path: /healthz
|
||||
port: 9876
|
||||
scheme: HTTP
|
||||
httpHeaders:
|
||||
- name: "brief"
|
||||
value: "true"
|
||||
failureThreshold: 105
|
||||
periodSeconds: 2
|
||||
successThreshold: 1
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: "127.0.0.1"
|
||||
path: /healthz
|
||||
port: 9876
|
||||
scheme: HTTP
|
||||
httpHeaders:
|
||||
- name: "brief"
|
||||
value: "true"
|
||||
periodSeconds: 30
|
||||
successThreshold: 1
|
||||
failureThreshold: 10
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
host: "127.0.0.1"
|
||||
path: /healthz
|
||||
port: 9876
|
||||
scheme: HTTP
|
||||
httpHeaders:
|
||||
- name: "brief"
|
||||
value: "true"
|
||||
periodSeconds: 30
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
timeoutSeconds: 5
|
||||
env:
|
||||
- name: K8S_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
- name: CILIUM_K8S_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
- name: CILIUM_CLUSTERMESH_CONFIG
|
||||
value: /var/lib/cilium/clustermesh/
|
||||
- name: CILIUM_CNI_CHAINING_MODE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: cilium-config
|
||||
key: cni-chaining-mode
|
||||
optional: true
|
||||
- name: CILIUM_CUSTOM_CNI_CONF
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: cilium-config
|
||||
key: custom-cni-conf
|
||||
optional: true
|
||||
- name: KUBERNETES_SERVICE_HOST
|
||||
value: "172.16.0.5"
|
||||
- name: KUBERNETES_SERVICE_PORT
|
||||
value: "6443"
|
||||
lifecycle:
|
||||
postStart:
|
||||
exec:
|
||||
command:
|
||||
- "/cni-install.sh"
|
||||
- "--enable-debug=false"
|
||||
- "--cni-exclusive=true"
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /cni-uninstall.sh
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
ports:
|
||||
- name: prometheus
|
||||
containerPort: 9090
|
||||
hostPort: 9090
|
||||
protocol: TCP
|
||||
- name: envoy-metrics
|
||||
containerPort: 9095
|
||||
hostPort: 9095
|
||||
protocol: TCP
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: bpf-maps
|
||||
mountPath: /sys/fs/bpf
|
||||
mountPropagation: Bidirectional
|
||||
# Check for duplicate mounts before mounting
|
||||
- name: cilium-cgroup
|
||||
mountPath: /sys/fs/cgroup
|
||||
- name: cilium-run
|
||||
mountPath: /var/run/cilium
|
||||
- name: cni-path
|
||||
mountPath: /host/opt/cni/bin
|
||||
- name: etc-cni-netd
|
||||
mountPath: /host/etc/cni/net.d
|
||||
- name: clustermesh-secrets
|
||||
mountPath: /var/lib/cilium/clustermesh
|
||||
readOnly: true
|
||||
- name: cilium-config-path
|
||||
mountPath: /tmp/cilium/config-map
|
||||
readOnly: true
|
||||
# Needed to be able to load kernel modules
|
||||
- name: lib-modules
|
||||
mountPath: /lib/modules
|
||||
readOnly: true
|
||||
- name: xtables-lock
|
||||
mountPath: /run/xtables.lock
|
||||
hostNetwork: true
|
||||
initContainers:
|
||||
- name: clean-cilium-state
|
||||
image: "quay.io/cilium/cilium:v1.11.1@sha256:251ff274acf22fd2067b29a31e9fda94253d2961c061577203621583d7e85bd2"
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- /init-container.sh
|
||||
env:
|
||||
- name: CILIUM_ALL_STATE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: cilium-config
|
||||
key: clean-cilium-state
|
||||
optional: true
|
||||
- name: CILIUM_BPF_STATE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: cilium-config
|
||||
key: clean-cilium-bpf-state
|
||||
optional: true
|
||||
- name: KUBERNETES_SERVICE_HOST
|
||||
value: "172.16.0.5"
|
||||
- name: KUBERNETES_SERVICE_PORT
|
||||
value: "6443"
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: bpf-maps
|
||||
mountPath: /sys/fs/bpf
|
||||
# Required to mount cgroup filesystem from the host to cilium agent pod
|
||||
- name: cilium-cgroup
|
||||
mountPath: /sys/fs/cgroup
|
||||
mountPropagation: HostToContainer
|
||||
- name: cilium-run
|
||||
mountPath: /var/run/cilium
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
restartPolicy: Always
|
||||
priorityClassName: system-node-critical
|
||||
serviceAccount: "cilium"
|
||||
serviceAccountName: "cilium"
|
||||
terminationGracePeriodSeconds: 1
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
volumes:
|
||||
# To keep state between restarts / upgrades
|
||||
- name: cilium-run
|
||||
hostPath:
|
||||
path: /var/run/cilium
|
||||
type: DirectoryOrCreate
|
||||
# To keep state between restarts / upgrades for bpf maps
|
||||
- name: bpf-maps
|
||||
hostPath:
|
||||
path: /sys/fs/bpf
|
||||
type: DirectoryOrCreate
|
||||
# To keep state between restarts / upgrades for cgroup2 filesystem
|
||||
- name: cilium-cgroup
|
||||
hostPath:
|
||||
path: /sys/fs/cgroup
|
||||
type: DirectoryOrCreate
|
||||
# To install cilium cni plugin in the host
|
||||
- name: cni-path
|
||||
hostPath:
|
||||
path: /opt/cni/bin
|
||||
type: DirectoryOrCreate
|
||||
# To install cilium cni configuration in the host
|
||||
- name: etc-cni-netd
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
type: DirectoryOrCreate
|
||||
# To be able to load kernel modules
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
# To access iptables concurrently with other processes (e.g. kube-proxy)
|
||||
- name: xtables-lock
|
||||
hostPath:
|
||||
path: /run/xtables.lock
|
||||
type: FileOrCreate
|
||||
# To read the clustermesh configuration
|
||||
- name: clustermesh-secrets
|
||||
secret:
|
||||
secretName: cilium-clustermesh
|
||||
# note: the leading zero means this number is in octal representation: do not remove it
|
||||
defaultMode: 0400
|
||||
optional: true
|
||||
# To read the configuration from the config map
|
||||
- name: cilium-config-path
|
||||
configMap:
|
||||
name: cilium-config
|
||||
---
|
||||
# Source: cilium/templates/cilium-operator/deployment.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: cilium-operator
|
||||
namespace: kube-system
|
||||
labels:
|
||||
io.cilium/app: operator
|
||||
name: cilium-operator
|
||||
spec:
|
||||
# See docs on ServerCapabilities.LeasesResourceLock in file pkg/k8s/version/version.go
|
||||
# for more details.
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
io.cilium/app: operator
|
||||
name: cilium-operator
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
labels:
|
||||
io.cilium/app: operator
|
||||
name: cilium-operator
|
||||
spec:
|
||||
# In HA mode, cilium-operator pods must not be scheduled on the same
|
||||
# node as they will clash with each other.
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: io.cilium/app
|
||||
operator: In
|
||||
values:
|
||||
- operator
|
||||
topologyKey: kubernetes.io/hostname
|
||||
containers:
|
||||
- name: cilium-operator
|
||||
image: quay.io/cilium/operator-generic:v1.11.1@sha256:977240a4783c7be821e215ead515da3093a10f4a7baea9f803511a2c2b44a235
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- cilium-operator-generic
|
||||
args:
|
||||
- --config-dir=/tmp/cilium/config-map
|
||||
- --debug=$(CILIUM_DEBUG)
|
||||
env:
|
||||
- name: K8S_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
- name: CILIUM_K8S_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
- name: CILIUM_DEBUG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: debug
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: KUBERNETES_SERVICE_HOST
|
||||
value: "172.16.0.5"
|
||||
- name: KUBERNETES_SERVICE_PORT
|
||||
value: "6443"
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: "127.0.0.1"
|
||||
path: /healthz
|
||||
port: 9234
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 3
|
||||
volumeMounts:
|
||||
- name: cilium-config-path
|
||||
mountPath: /tmp/cilium/config-map
|
||||
readOnly: true
|
||||
hostNetwork: true
|
||||
restartPolicy: Always
|
||||
priorityClassName: system-cluster-critical
|
||||
serviceAccount: "cilium-operator"
|
||||
serviceAccountName: "cilium-operator"
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
volumes:
|
||||
# To read the configuration from the config map
|
||||
- name: cilium-config-path
|
||||
configMap:
|
||||
name: cilium-config
|
||||
@@ -4,14 +4,15 @@ resource "scaleway_instance_ip" "controlplane" {
|
||||
}
|
||||
|
||||
resource "scaleway_instance_server" "controlplane" {
|
||||
count = lookup(var.controlplane, "count", 0)
|
||||
name = "master-${count.index + 1}"
|
||||
image = data.scaleway_instance_image.talos.id
|
||||
type = lookup(var.controlplane, "type", "DEV1-M")
|
||||
enable_ipv6 = true
|
||||
ip_id = scaleway_instance_ip.controlplane[count.index].id
|
||||
security_group_id = scaleway_instance_security_group.controlplane.id
|
||||
tags = concat(var.tags, ["infra", "master"])
|
||||
count = lookup(var.controlplane, "count", 0)
|
||||
name = "master-${count.index + 1}"
|
||||
image = data.scaleway_instance_image.talos.id
|
||||
type = lookup(var.controlplane, "type", "DEV1-M")
|
||||
enable_ipv6 = true
|
||||
ip_id = scaleway_instance_ip.controlplane[count.index].id
|
||||
security_group_id = scaleway_instance_security_group.controlplane.id
|
||||
placement_group_id = scaleway_instance_placement_group.controlplane.id
|
||||
tags = concat(var.tags, ["infra", "master"])
|
||||
|
||||
private_network {
|
||||
pn_id = scaleway_vpc_private_network.main.id
|
||||
@@ -39,3 +40,9 @@ resource "scaleway_instance_server" "controlplane" {
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "scaleway_instance_placement_group" "controlplane" {
|
||||
name = "controlplane"
|
||||
policy_type = "max_availability"
|
||||
policy_mode = "enforced"
|
||||
}
|
||||
|
||||
37
scaleway/instances-web.tf
Normal file
37
scaleway/instances-web.tf
Normal file
@@ -0,0 +1,37 @@
|
||||
|
||||
# FIXME: does not work without enable_dynamic_ip
|
||||
|
||||
resource "scaleway_instance_server" "web" {
|
||||
count = lookup(var.instances, "web_count", 0)
|
||||
name = "web-${count.index + 1}"
|
||||
image = data.scaleway_instance_image.talos.id
|
||||
type = lookup(var.instances, "web_instance_type", "DEV1-M")
|
||||
enable_ipv6 = true
|
||||
enable_dynamic_ip = true
|
||||
tags = concat(var.tags, ["web"])
|
||||
|
||||
private_network {
|
||||
pn_id = scaleway_vpc_private_network.main.id
|
||||
}
|
||||
|
||||
user_data = {
|
||||
cloud-init = templatefile("${path.module}/templates/web.yaml.tpl",
|
||||
merge(var.kubernetes, {
|
||||
name = "web-${count.index + 1}"
|
||||
type = "worker"
|
||||
ipv4_vip = local.ipv4_vip
|
||||
clusterDns = cidrhost(split(",", var.kubernetes["serviceSubnets"])[0], 10)
|
||||
nodeSubnets = local.main_subnet
|
||||
labels = "topology.kubernetes.io/region=fr-par"
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
image,
|
||||
type,
|
||||
user_data,
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -16,8 +16,15 @@ resource "scaleway_vpc_public_gateway" "main" {
|
||||
}
|
||||
|
||||
resource "scaleway_vpc_public_gateway_dhcp" "main" {
|
||||
subnet = local.main_subnet
|
||||
pool_low = cidrhost(local.main_subnet, 16)
|
||||
subnet = local.main_subnet
|
||||
push_default_route = true
|
||||
pool_low = cidrhost(local.main_subnet, 16)
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
dns_server_override
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "scaleway_vpc_private_network" "main" {
|
||||
|
||||
@@ -30,6 +30,8 @@ machine:
|
||||
addresses:
|
||||
- 169.254.2.53/32
|
||||
- fd00::169:254:2:53/128
|
||||
kubespan:
|
||||
enabled: true
|
||||
install:
|
||||
wipe: false
|
||||
sysctls:
|
||||
@@ -52,12 +54,18 @@ machine:
|
||||
cluster:
|
||||
controlPlane:
|
||||
endpoint: https://${ipv4_vip}:6443
|
||||
discovery:
|
||||
enabled: true
|
||||
network:
|
||||
dnsDomain: ${domain}
|
||||
podSubnets: ${format("%#v",split(",",podSubnets))}
|
||||
serviceSubnets: ${format("%#v",split(",",serviceSubnets))}
|
||||
cni:
|
||||
name: custom
|
||||
urls:
|
||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/scaleway/deployments/cilium_result.yaml
|
||||
proxy:
|
||||
mode: ipvs
|
||||
disabled: true
|
||||
apiServer:
|
||||
certSANs:
|
||||
- "${lbv4}"
|
||||
|
||||
63
scaleway/templates/web.yaml.tpl
Normal file
63
scaleway/templates/web.yaml.tpl
Normal file
@@ -0,0 +1,63 @@
|
||||
version: v1alpha1
|
||||
debug: false
|
||||
persist: true
|
||||
machine:
|
||||
type: worker
|
||||
token: ${tokenMachine}
|
||||
ca:
|
||||
crt: ${caMachine}
|
||||
certSANs: []
|
||||
kubelet:
|
||||
extraArgs:
|
||||
cloud-provider: external
|
||||
rotate-server-certificates: true
|
||||
node-labels: ${labels}
|
||||
clusterDNS:
|
||||
- 169.254.2.53
|
||||
- ${clusterDns}
|
||||
nodeIP:
|
||||
validSubnets: ${format("%#v",split(",",nodeSubnets))}
|
||||
network:
|
||||
interfaces:
|
||||
- interface: eth0
|
||||
dhcp: true
|
||||
- interface: eth1
|
||||
dhcp: true
|
||||
dhcpOptions:
|
||||
routeMetric: 2048
|
||||
- interface: dummy0
|
||||
addresses:
|
||||
- 169.254.2.53/32
|
||||
- fd00::169:254:2:53/128
|
||||
kubespan:
|
||||
enabled: true
|
||||
install:
|
||||
wipe: true
|
||||
sysctls:
|
||||
net.core.somaxconn: 65535
|
||||
net.core.netdev_max_backlog: 4096
|
||||
net.ipv4.tcp_keepalive_time: 600
|
||||
net.ipv4.tcp_keepalive_intvl: 60
|
||||
fs.inotify.max_user_instances: 256
|
||||
systemDiskEncryption:
|
||||
state:
|
||||
provider: luks2
|
||||
keys:
|
||||
- nodeID: {}
|
||||
slot: 0
|
||||
cluster:
|
||||
id: ${clusterID}
|
||||
secret: ${clusterSecret}
|
||||
controlPlane:
|
||||
endpoint: https://${ipv4_vip}:6443
|
||||
clusterName: ${clusterName}
|
||||
discovery:
|
||||
enabled: true
|
||||
network:
|
||||
dnsDomain: ${domain}
|
||||
serviceSubnets: ${format("%#v",split(",",serviceSubnets))}
|
||||
proxy:
|
||||
disabled: true
|
||||
token: ${token}
|
||||
ca:
|
||||
crt: ${ca}
|
||||
@@ -32,6 +32,8 @@ variable "kubernetes" {
|
||||
domain = "cluster.local"
|
||||
apiDomain = "api.cluster.local"
|
||||
clusterName = "talos-k8s-hetzner"
|
||||
clusterID = ""
|
||||
clusterSecret = ""
|
||||
tokenMachine = ""
|
||||
caMachine = ""
|
||||
token = ""
|
||||
|
||||
Reference in New Issue
Block a user