refactoring

This commit is contained in:
Serge Logvinov
2023-04-15 16:42:38 +03:00
parent 0451531353
commit d9140bdbc2
20 changed files with 238 additions and 2241 deletions

View File

@@ -1,5 +1,6 @@
ENDPOINT:=${shell terraform output -raw controlplane_endpoint 2>/dev/null}
ENDPOINT:=api.cluster.local
# ENDPOINT:=${shell terraform output -raw controlplane_endpoint 2>/dev/null}
CLUSTERNAME:="talos-k8s-hetzner"
help:

View File

@@ -1,6 +1,8 @@
data "hcloud_image" "talos" {
with_selector = "type=infra"
for_each = toset(["amd64", "arm64"])
with_architecture = each.key == "amd64" ? "x86" : "arm"
with_selector = "type=infra"
}
resource "hcloud_ssh_key" "infra" {

View File

@@ -1,889 +0,0 @@
---
# Source: cilium/templates/cilium-agent/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: "cilium"
namespace: kube-system
---
# Source: cilium/templates/cilium-operator/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: "cilium-operator"
namespace: kube-system
---
# Source: cilium/templates/cilium-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: cilium-config
namespace: kube-system
data:
# Identity allocation mode selects how identities are shared between cilium
# nodes by setting how they are stored. The options are "crd" or "kvstore".
# - "crd" stores identities in kubernetes as CRDs (custom resource definition).
# These can be queried with:
# kubectl get ciliumid
# - "kvstore" stores identities in an etcd kvstore, that is
# configured below. Cilium versions before 1.6 supported only the kvstore
# backend. Upgrades from these older cilium versions should continue using
# the kvstore by commenting out the identity-allocation-mode below, or
# setting it to "kvstore".
identity-allocation-mode: crd
cilium-endpoint-gc-interval: "5m0s"
nodes-gc-interval: "5m0s"
skip-cnp-status-startup-clean: "false"
# Disable the usage of CiliumEndpoint CRD
disable-endpoint-crd: "false"
# If you want to run cilium in debug mode change this value to true
debug: "false"
# The agent can be put into the following three policy enforcement modes
# default, always and never.
# https://docs.cilium.io/en/latest/policy/intro/#policy-enforcement-modes
enable-policy: "default"
# If you want metrics enabled in all of your Cilium agents, set the port for
# which the Cilium agents will have their metrics exposed.
# This option deprecates the "prometheus-serve-addr" in the
# "cilium-metrics-config" ConfigMap
# NOTE that this will open the port on ALL nodes where Cilium pods are
# scheduled.
prometheus-serve-addr: ":9962"
# Port to expose Envoy metrics (e.g. "9964"). Envoy metrics listener will be disabled if this
# field is not set.
proxy-prometheus-port: "9964"
# Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4
# address.
enable-ipv4: "true"
# Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6
# address.
enable-ipv6: "true"
# Users who wish to specify their own custom CNI configuration file must set
# custom-cni-conf to "true", otherwise Cilium may overwrite the configuration.
custom-cni-conf: "false"
enable-bpf-clock-probe: "true"
# If you want cilium monitor to aggregate tracing for packets, set this level
# to "low", "medium", or "maximum". The higher the level, the less packets
# that will be seen in monitor output.
monitor-aggregation: medium
# The monitor aggregation interval governs the typical time between monitor
# notification events for each allowed connection.
#
# Only effective when monitor aggregation is set to "medium" or higher.
monitor-aggregation-interval: 5s
# The monitor aggregation flags determine which TCP flags which, upon the
# first observation, cause monitor notifications to be generated.
#
# Only effective when monitor aggregation is set to "medium" or higher.
monitor-aggregation-flags: all
# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic
# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps.
bpf-map-dynamic-size-ratio: "0.0025"
# bpf-policy-map-max specifies the maximum number of entries in endpoint
# policy map (per endpoint)
bpf-policy-map-max: "16384"
# bpf-lb-map-max specifies the maximum number of entries in bpf lb service,
# backend and affinity maps.
bpf-lb-map-max: "65536"
# bpf-lb-bypass-fib-lookup instructs Cilium to enable the FIB lookup bypass
# optimization for nodeport reverse NAT handling.
bpf-lb-external-clusterip: "false"
# Pre-allocation of map entries allows per-packet latency to be reduced, at
# the expense of up-front memory allocation for the entries in the maps. The
# default value below will minimize memory usage in the default installation;
# users who are sensitive to latency may consider setting this to "true".
#
# This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore
# this option and behave as though it is set to "true".
#
# If this value is modified, then during the next Cilium startup the restore
# of existing endpoints and tracking of ongoing connections may be disrupted.
# As a result, reply packets may be dropped and the load-balancing decisions
# for established connections may change.
#
# If this option is set to "false" during an upgrade from 1.3 or earlier to
# 1.4 or later, then it may cause one-time disruptions during the upgrade.
preallocate-bpf-maps: "false"
# Regular expression matching compatible Istio sidecar istio-proxy
# container image names
sidecar-istio-proxy-image: "cilium/istio_proxy"
# Name of the cluster. Only relevant when building a mesh of clusters.
cluster-name: default
# Unique ID of the cluster. Must be unique across all conneted clusters and
# in the range of 1 and 255. Only relevant when building a mesh of clusters.
cluster-id: "0"
# Encapsulation mode for communication between nodes
# Possible values:
# - disabled
# - vxlan (default)
# - geneve
tunnel: "vxlan"
# Enables L7 proxy for L7 policy enforcement and visibility
enable-l7-proxy: "true"
enable-ipv4-masquerade: "true"
enable-ipv6-masquerade: "true"
enable-bpf-masquerade: "false"
enable-xt-socket-fallback: "true"
install-iptables-rules: "true"
install-no-conntrack-iptables-rules: "false"
auto-direct-node-routes: "false"
enable-local-redirect-policy: "true"
enable-host-firewall: "true"
# List of devices used to attach bpf_host.o (implements BPF NodePort,
# host-firewall and BPF masquerading)
devices: "eth+"
kube-proxy-replacement: "strict"
kube-proxy-replacement-healthz-bind-address: ""
bpf-lb-sock: "false"
host-reachable-services-protos:
enable-health-check-nodeport: "true"
node-port-bind-protection: "true"
enable-auto-protect-node-port-range: "true"
enable-svc-source-range-check: "true"
enable-l2-neigh-discovery: "true"
arping-refresh-period: "30s"
k8s-require-ipv4-pod-cidr: "true"
k8s-require-ipv6-pod-cidr: "true"
enable-endpoint-health-checking: "true"
enable-health-checking: "true"
enable-well-known-identities: "false"
enable-remote-node-identity: "true"
synchronize-k8s-nodes: "true"
operator-api-serve-addr: "127.0.0.1:9234"
ipam: "kubernetes"
disable-cnp-status-updates: "true"
enable-vtep: "false"
vtep-endpoint: ""
vtep-cidr: ""
vtep-mask: ""
vtep-mac: ""
enable-k8s-endpoint-slice: "true"
enable-bgp-control-plane: "false"
bpf-root: "/sys/fs/bpf"
cgroup-root: "/sys/fs/cgroup"
enable-k8s-terminating-endpoint: "true"
remove-cilium-node-taints: "true"
set-cilium-is-up-condition: "true"
unmanaged-pod-watcher-interval: "15"
tofqdns-dns-reject-response-code: "refused"
tofqdns-enable-dns-compression: "true"
tofqdns-endpoint-max-ip-per-hostname: "50"
tofqdns-idle-connection-grace-period: "0s"
tofqdns-max-deferred-connection-deletes: "10000"
tofqdns-min-ttl: "3600"
tofqdns-proxy-response-max-delay: "100ms"
agent-not-ready-taint-key: "node.cilium.io/agent-not-ready"
---
# Source: cilium/templates/cilium-agent/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cilium
rules:
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- namespaces
- services
- pods
- endpoints
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- list
- watch
# This is used when validating policies in preflight. This will need to stay
# until we figure out how to avoid "get" inside the preflight, and then
# should be removed ideally.
- get
- apiGroups:
- cilium.io
resources:
- ciliumbgploadbalancerippools
- ciliumbgppeeringpolicies
- ciliumclusterwideenvoyconfigs
- ciliumclusterwidenetworkpolicies
- ciliumegressgatewaypolicies
- ciliumegressnatpolicies
- ciliumendpoints
- ciliumendpointslices
- ciliumenvoyconfigs
- ciliumidentities
- ciliumlocalredirectpolicies
- ciliumnetworkpolicies
- ciliumnodes
verbs:
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumidentities
- ciliumendpoints
- ciliumnodes
verbs:
- create
- apiGroups:
- cilium.io
# To synchronize garbage collection of such resources
resources:
- ciliumidentities
verbs:
- update
- apiGroups:
- cilium.io
resources:
- ciliumendpoints
verbs:
- delete
- get
- apiGroups:
- cilium.io
resources:
- ciliumnodes
- ciliumnodes/status
verbs:
- get
- update
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies/status
- ciliumclusterwidenetworkpolicies/status
- ciliumendpoints/status
- ciliumendpoints
verbs:
- patch
---
# Source: cilium/templates/cilium-operator/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cilium-operator
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- watch
# to automatically delete [core|kube]dns pods so that are starting to being
# managed by Cilium
- delete
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
# To remove node taints
- nodes
# To set NetworkUnavailable false on startup
- nodes/status
verbs:
- patch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
# to perform LB IP allocation for BGP
- services/status
verbs:
- update
- apiGroups:
- ""
resources:
# to check apiserver connectivity
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
# to perform the translation of a CNP that contains `ToGroup` to its endpoints
- services
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies
- ciliumclusterwidenetworkpolicies
verbs:
# Create auto-generated CNPs and CCNPs from Policies that have 'toGroups'
- create
- update
- deletecollection
# To update the status of the CNPs and CCNPs
- patch
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies/status
- ciliumclusterwidenetworkpolicies/status
verbs:
# Update the auto-generated CNPs and CCNPs status.
- patch
- update
- apiGroups:
- cilium.io
resources:
- ciliumendpoints
- ciliumidentities
verbs:
# To perform garbage collection of such resources
- delete
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumidentities
verbs:
# To synchronize garbage collection of such resources
- update
- apiGroups:
- cilium.io
resources:
- ciliumnodes
verbs:
- create
- update
- get
- list
- watch
# To perform CiliumNode garbage collector
- delete
- apiGroups:
- cilium.io
resources:
- ciliumnodes/status
verbs:
- update
- apiGroups:
- cilium.io
resources:
- ciliumendpointslices
- ciliumenvoyconfigs
verbs:
- create
- update
- get
- list
- watch
- delete
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- create
- get
- list
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- update
resourceNames:
- ciliumbgploadbalancerippools.cilium.io
- ciliumbgppeeringpolicies.cilium.io
- ciliumclusterwideenvoyconfigs.cilium.io
- ciliumclusterwidenetworkpolicies.cilium.io
- ciliumegressgatewaypolicies.cilium.io
- ciliumegressnatpolicies.cilium.io
- ciliumendpoints.cilium.io
- ciliumendpointslices.cilium.io
- ciliumenvoyconfigs.cilium.io
- ciliumexternalworkloads.cilium.io
- ciliumidentities.cilium.io
- ciliumlocalredirectpolicies.cilium.io
- ciliumnetworkpolicies.cilium.io
- ciliumnodes.cilium.io
# For cilium-operator running in HA mode.
#
# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election
# between multiple running instances.
# The preferred way of doing this is to use LeasesResourceLock as edits to Leases are less
# common and fewer objects in the cluster watch "all Leases".
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- get
- update
---
# Source: cilium/templates/cilium-agent/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cilium
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cilium
subjects:
- kind: ServiceAccount
name: "cilium"
namespace: kube-system
---
# Source: cilium/templates/cilium-operator/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cilium-operator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cilium-operator
subjects:
- kind: ServiceAccount
name: "cilium-operator"
namespace: kube-system
---
# Source: cilium/templates/cilium-agent/service.yaml
apiVersion: v1
kind: Service
metadata:
name: cilium-agent
namespace: kube-system
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9964"
labels:
k8s-app: cilium
spec:
clusterIP: None
type: ClusterIP
selector:
k8s-app: cilium
ports:
- name: envoy-metrics
port: 9964
protocol: TCP
targetPort: envoy-metrics
---
# Source: cilium/templates/cilium-agent/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: cilium
namespace: kube-system
labels:
k8s-app: cilium
spec:
selector:
matchLabels:
k8s-app: cilium
updateStrategy:
rollingUpdate:
maxUnavailable: 2
type: RollingUpdate
template:
metadata:
annotations:
prometheus.io/port: "9962"
prometheus.io/scrape: "true"
labels:
k8s-app: cilium
spec:
containers:
- name: cilium-agent
image: "quay.io/cilium/cilium:v1.12.7@sha256:8cb6b4742cc27b39e4f789d282a1fc2041decb6f5698bfe09112085a07b1fd61"
imagePullPolicy: IfNotPresent
command:
- cilium-agent
args:
- --config-dir=/tmp/cilium/config-map
startupProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9879
scheme: HTTP
httpHeaders:
- name: "brief"
value: "true"
failureThreshold: 105
periodSeconds: 2
successThreshold: 1
livenessProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9879
scheme: HTTP
httpHeaders:
- name: "brief"
value: "true"
periodSeconds: 30
successThreshold: 1
failureThreshold: 10
timeoutSeconds: 5
readinessProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9879
scheme: HTTP
httpHeaders:
- name: "brief"
value: "true"
periodSeconds: 30
successThreshold: 1
failureThreshold: 3
timeoutSeconds: 5
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_CLUSTERMESH_CONFIG
value: /var/lib/cilium/clustermesh/
- name: CILIUM_CNI_CHAINING_MODE
valueFrom:
configMapKeyRef:
name: cilium-config
key: cni-chaining-mode
optional: true
- name: CILIUM_CUSTOM_CNI_CONF
valueFrom:
configMapKeyRef:
name: cilium-config
key: custom-cni-conf
optional: true
- name: KUBERNETES_SERVICE_HOST
value: "api.cluster.local"
- name: KUBERNETES_SERVICE_PORT
value: "6443"
lifecycle:
postStart:
exec:
command:
- "/cni-install.sh"
- "--enable-debug=false"
- "--cni-exclusive=true"
- "--log-file=/var/run/cilium/cilium-cni.log"
preStop:
exec:
command:
- /cni-uninstall.sh
resources:
limits:
cpu: 2
memory: 2Gi
requests:
cpu: 100m
memory: 128Mi
ports:
- name: peer-service
containerPort: 4244
hostPort: 4244
protocol: TCP
- name: prometheus
containerPort: 9962
hostPort: 9962
protocol: TCP
- name: envoy-metrics
containerPort: 9964
hostPort: 9964
protocol: TCP
securityContext:
privileged: true
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- name: bpf-maps
mountPath: /sys/fs/bpf
mountPropagation: Bidirectional
# Check for duplicate mounts before mounting
- name: cilium-cgroup
mountPath: /sys/fs/cgroup
- name: cilium-run
mountPath: /var/run/cilium
- name: cni-path
mountPath: /host/opt/cni/bin
- name: etc-cni-netd
mountPath: /host/etc/cni/net.d
- name: clustermesh-secrets
mountPath: /var/lib/cilium/clustermesh
readOnly: true
- name: cilium-config-path
mountPath: /tmp/cilium/config-map
readOnly: true
# Needed to be able to load kernel modules
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: xtables-lock
mountPath: /run/xtables.lock
initContainers:
- name: clean-cilium-state
image: "quay.io/cilium/cilium:v1.12.7@sha256:8cb6b4742cc27b39e4f789d282a1fc2041decb6f5698bfe09112085a07b1fd61"
imagePullPolicy: IfNotPresent
command:
- /init-container.sh
env:
- name: CILIUM_ALL_STATE
valueFrom:
configMapKeyRef:
name: cilium-config
key: clean-cilium-state
optional: true
- name: CILIUM_BPF_STATE
valueFrom:
configMapKeyRef:
name: cilium-config
key: clean-cilium-bpf-state
optional: true
- name: KUBERNETES_SERVICE_HOST
value: "api.cluster.local"
- name: KUBERNETES_SERVICE_PORT
value: "6443"
terminationMessagePolicy: FallbackToLogsOnError
securityContext:
privileged: true
volumeMounts:
- name: bpf-maps
mountPath: /sys/fs/bpf
# Required to mount cgroup filesystem from the host to cilium agent pod
- name: cilium-cgroup
mountPath: /sys/fs/cgroup
mountPropagation: HostToContainer
- name: cilium-run
mountPath: /var/run/cilium
resources:
requests:
cpu: 100m
memory: 100Mi # wait-for-kube-proxy
restartPolicy: Always
priorityClassName: system-node-critical
serviceAccount: "cilium"
serviceAccountName: "cilium"
terminationGracePeriodSeconds: 1
hostNetwork: true
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
k8s-app: cilium
topologyKey: kubernetes.io/hostname
nodeSelector:
kubernetes.io/os: linux
tolerations:
- operator: Exists
volumes:
# To keep state between restarts / upgrades
- name: cilium-run
hostPath:
path: /var/run/cilium
type: DirectoryOrCreate
# To keep state between restarts / upgrades for bpf maps
- name: bpf-maps
hostPath:
path: /sys/fs/bpf
type: DirectoryOrCreate
# To keep state between restarts / upgrades for cgroup2 filesystem
- name: cilium-cgroup
hostPath:
path: /sys/fs/cgroup
type: DirectoryOrCreate
# To install cilium cni plugin in the host
- name: cni-path
hostPath:
path: /opt/cni/bin
type: DirectoryOrCreate
# To install cilium cni configuration in the host
- name: etc-cni-netd
hostPath:
path: /etc/cni/net.d
type: DirectoryOrCreate
# To be able to load kernel modules
- name: lib-modules
hostPath:
path: /lib/modules
# To access iptables concurrently with other processes (e.g. kube-proxy)
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
# To read the clustermesh configuration
- name: clustermesh-secrets
secret:
secretName: cilium-clustermesh
# note: the leading zero means this number is in octal representation: do not remove it
defaultMode: 0400
optional: true
# To read the configuration from the config map
- name: cilium-config-path
configMap:
name: cilium-config
---
# Source: cilium/templates/cilium-operator/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: cilium-operator
namespace: kube-system
labels:
io.cilium/app: operator
name: cilium-operator
spec:
# See docs on ServerCapabilities.LeasesResourceLock in file pkg/k8s/version/version.go
# for more details.
replicas: 1
selector:
matchLabels:
io.cilium/app: operator
name: cilium-operator
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
annotations:
# ensure pods roll when configmap updates
cilium.io/cilium-configmap-checksum: "93ed3047796c548140dd014145d2cb313155de38c36595eb2f05f60856400ae5"
labels:
io.cilium/app: operator
name: cilium-operator
spec:
containers:
- name: cilium-operator
image: "quay.io/cilium/operator-generic:v1.12.7@sha256:80f24810bf8484974c757382eb2c7408c9c024e5cb0719f4a56fba3f47695c72"
imagePullPolicy: IfNotPresent
command:
- cilium-operator-generic
args:
- --config-dir=/tmp/cilium/config-map
- --debug=$(CILIUM_DEBUG)
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_DEBUG
valueFrom:
configMapKeyRef:
key: debug
name: cilium-config
optional: true
- name: KUBERNETES_SERVICE_HOST
value: "api.cluster.local"
- name: KUBERNETES_SERVICE_PORT
value: "6443"
livenessProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9234
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
timeoutSeconds: 3
volumeMounts:
- name: cilium-config-path
mountPath: /tmp/cilium/config-map
readOnly: true
terminationMessagePolicy: FallbackToLogsOnError
hostNetwork: true
restartPolicy: Always
priorityClassName: system-cluster-critical
serviceAccount: "cilium-operator"
serviceAccountName: "cilium-operator"
# In HA mode, cilium-operator pods must not be scheduled on the same
# node as they will clash with each other.
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
io.cilium/app: operator
topologyKey: kubernetes.io/hostname
nodeSelector:
kubernetes.io/os: linux
node-role.kubernetes.io/control-plane: ""
tolerations:
- effect: NoSchedule
operator: Exists
volumes:
# To read the configuration from the config map
- name: cilium-config-path
configMap:
name: cilium-config

View File

@@ -1,77 +0,0 @@
---
k8sServiceHost: "api.cluster.local"
k8sServicePort: "6443"
operator:
enabled: true
rollOutPods: true
replicas: 1
prometheus:
enabled: false
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- operator: Exists
effect: NoSchedule
identityAllocationMode: crd
kubeProxyReplacement: strict
enableK8sEndpointSlice: true
localRedirectPolicy: true
tunnel: "vxlan"
autoDirectNodeRoutes: false
devices: [eth+]
healthChecking: true
cni:
install: true
ipam:
mode: "kubernetes"
k8s:
requireIPv4PodCIDR: true
requireIPv6PodCIDR: true
bpf:
masquerade: false
ipv4:
enabled: true
ipv6:
enabled: true
hostServices:
enabled: true
hostPort:
enabled: true
nodePort:
enabled: true
externalIPs:
enabled: true
hostFirewall:
enabled: true
ingressController:
enabled: false
securityContext:
privileged: true
hubble:
enabled: false
prometheus:
enabled: true
cgroup:
autoMount:
enabled: false
hostRoot: /sys/fs/cgroup
resources:
limits:
cpu: 2
memory: 2Gi
requests:
cpu: 100m
memory: 128Mi

View File

@@ -1,153 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns-local
namespace: kube-system
data:
empty.db: |
@ 60 IN SOA localnet. root.localnet. (
1 ; serial
60 ; refresh
60 ; retry
60 ; expiry
60 ) ; minimum
;
@ IN NS localnet.
hosts: |
# static hosts
169.254.2.53 dns.local
Corefile.local: |
(empty) {
file /etc/coredns/empty.db
}
.:53 {
errors
bind 169.254.2.53
health 127.0.0.1:8091 {
lameduck 5s
}
hosts /etc/coredns/hosts {
reload 60s
fallthrough
}
kubernetes cluster.local in-addr.arpa ip6.arpa {
endpoint https://api.cluster.local:6443
kubeconfig /etc/coredns/kubeconfig.conf coredns
pods insecure
ttl 60
}
prometheus :9153
forward . /etc/resolv.conf {
policy sequential
expire 30s
}
cache 300
loop
reload
loadbalance
}
kubeconfig.conf: |-
apiVersion: v1
kind: Config
clusters:
- cluster:
certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
server: https://api.cluster.local:6443
name: default
contexts:
- context:
cluster: default
namespace: kube-system
user: coredns
name: coredns
current-context: coredns
users:
- name: coredns
user:
tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: coredns-local
namespace: kube-system
labels:
k8s-app: kube-dns-local
kubernetes.io/name: CoreDNS
spec:
updateStrategy:
type: RollingUpdate
minReadySeconds: 15
selector:
matchLabels:
k8s-app: kube-dns-local
kubernetes.io/name: CoreDNS
template:
metadata:
labels:
k8s-app: kube-dns-local
kubernetes.io/name: CoreDNS
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9153"
spec:
priorityClassName: system-node-critical
serviceAccount: coredns
serviceAccountName: coredns
enableServiceLinks: false
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Exists
- effect: NoSchedule
key: node.cloudprovider.kubernetes.io/uninitialized
operator: Exists
hostNetwork: true
containers:
- name: coredns
image: coredns/coredns:1.9.4
imagePullPolicy: IfNotPresent
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 50m
memory: 64Mi
args: [ "-conf", "/etc/coredns/Corefile.local" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
livenessProbe:
httpGet:
host: 127.0.0.1
path: /health
port: 8091
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns-local

View File

@@ -1,483 +0,0 @@
---
# Source: ingress-nginx/templates/controller-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
automountServiceAccountToken: true
---
# Source: ingress-nginx/templates/controller-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
labels:
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
data:
allow-snippet-annotations: "true"
client-body-timeout: "30"
client-header-timeout: "30"
enable-access-log-for-default-backend: "true"
error-log-level: "error"
hsts: "true"
hsts-include-subdomains: "true"
hsts-max-age: "31536000"
hsts-preload: "true"
http-redirect-code: "301"
limit-req-status-code: "429"
log-format-escape-json: "true"
log-format-upstream: "{\"ip\":\"$remote_addr\", \"ssl\":\"$ssl_protocol\", \"method\":\"$request_method\", \"proto\":\"$scheme\", \"host\":\"$host\", \"uri\":\"$request_uri\", \"status\":$status, \"size\":$bytes_sent, \"agent\":\"$http_user_agent\", \"referer\":\"$http_referer\", \"namespace\":\"$namespace\"}"
proxy-connect-timeout: "10"
proxy-headers-hash-bucket-size: "128"
proxy-hide-headers: "strict-transport-security"
proxy-read-timeout: "60"
proxy-real-ip-cidr: "173.245.48.0/20,103.21.244.0/22,103.22.200.0/22,103.31.4.0/22,141.101.64.0/18,108.162.192.0/18,190.93.240.0/20,188.114.96.0/20,197.234.240.0/22,198.41.128.0/17,162.158.0.0/15,172.64.0.0/13,131.0.72.0/22,104.16.0.0/13,104.24.0.0/14,172.16.0.0/12"
proxy-send-timeout: "60"
server-name-hash-bucket-size: "64"
server-name-hash-max-size: "512"
server-tokens: "false"
ssl-protocols: "TLSv1.3"
upstream-keepalive-connections: "32"
use-forwarded-headers: "true"
use-geoip: "false"
use-geoip2: "false"
use-gzip: "true"
worker-cpu-affinity: "auto"
worker-processes: "auto"
---
# Source: ingress-nginx/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
- namespaces
verbs:
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
# Source: ingress-nginx/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: "ingress-nginx"
---
# Source: ingress-nginx/templates/controller-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
# TODO(Jintao Zhang)
# Once we release a new version of the controller,
# we will be able to remove the configmap related permissions
# We have used the Lease API for selection
# ref: https://github.com/kubernetes/ingress-nginx/pull/8921
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
- ingress-nginx-leader
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- apiGroups:
- coordination.k8s.io
resources:
- leases
resourceNames:
- ingress-nginx-leader
verbs:
- get
- update
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
# Source: ingress-nginx/templates/controller-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: "ingress-nginx"
---
# Source: ingress-nginx/templates/controller-service.yaml
apiVersion: v1
kind: Service
metadata:
annotations:
labels:
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
type: ClusterIP
clusterIP: None
ipFamilyPolicy: RequireDualStack
ipFamilies:
- IPv4
- IPv6
ports:
- name: http
port: 80
protocol: TCP
targetPort: http
appProtocol: http
- name: https
port: 443
protocol: TCP
targetPort: https
appProtocol: https
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
selector:
matchLabels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
revisionHistoryLimit: 2
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
minReadySeconds: 15
template:
metadata:
annotations:
prometheus.io/port: "10254"
prometheus.io/scrape: "true"
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
spec:
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: controller
image: "registry.k8s.io/ingress-nginx/controller:v1.5.1@sha256:4ba73c697770664c1e00e9f968de14e08f606ff961c76e5d7033a4a9c593c629"
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
args:
- /nginx-ingress-controller
- --election-id=ingress-nginx-leader
- --controller-class=k8s.io/ingress-nginx
- --ingress-class=nginx
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
runAsUser: 101
allowPrivilegeEscalation: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LD_PRELOAD
value: /usr/local/lib/libmimalloc.so
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 15
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 1
ports:
- name: http
containerPort: 80
protocol: TCP
- name: https
containerPort: 443
protocol: TCP
resources:
limits:
cpu: 1
memory: 1Gi
requests:
cpu: 100m
memory: 128Mi
hostNetwork: true
nodeSelector:
kubernetes.io/os: linux
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: project.io/node-pool
operator: In
values:
- web
serviceAccountName: ingress-nginx
terminationGracePeriodSeconds: 300
---
# Source: ingress-nginx/templates/controller-ingressclass.yaml
# We don't support namespaced ingressClass yet
# So a ClusterRole and a ClusterRoleBinding is required
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
labels:
helm.sh/chart: ingress-nginx-4.4.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.5.1"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: nginx
spec:
controller: k8s.io/ingress-nginx

View File

@@ -1,116 +0,0 @@
controller:
kind: DaemonSet
hostNetwork: true
hostPort:
enabled: false
ports:
http: 80
https: 443
dnsPolicy: ClusterFirstWithHostNet
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
publishService:
enabled: false
config:
worker-processes: "auto"
worker-cpu-affinity: "auto"
error-log-level: "error"
server-tokens: "false"
http-redirect-code: "301"
use-gzip: "true"
use-geoip: "false"
use-geoip2: "false"
use-forwarded-headers: "true"
# curl https://www.cloudflare.com/ips-v4 2>/dev/null | tr '\n' ','
proxy-real-ip-cidr: "173.245.48.0/20,103.21.244.0/22,103.22.200.0/22,103.31.4.0/22,141.101.64.0/18,108.162.192.0/18,190.93.240.0/20,188.114.96.0/20,197.234.240.0/22,198.41.128.0/17,162.158.0.0/15,172.64.0.0/13,131.0.72.0/22,104.16.0.0/13,104.24.0.0/14,172.16.0.0/12"
enable-access-log-for-default-backend: "true"
log-format-escape-json: "true"
log-format-upstream: '{"ip":"$remote_addr", "ssl":"$ssl_protocol", "method":"$request_method", "proto":"$scheme", "host":"$host", "uri":"$request_uri", "status":$status, "size":$bytes_sent, "agent":"$http_user_agent", "referer":"$http_referer", "namespace":"$namespace"}'
upstream-keepalive-connections: "32"
proxy-connect-timeout: "10"
proxy-read-timeout: "60"
proxy-send-timeout: "60"
ssl-protocols: "TLSv1.3"
hsts: "true"
hsts-max-age: "31536000"
hsts-include-subdomains: "true"
hsts-preload: "true"
proxy-hide-headers: "strict-transport-security"
proxy-headers-hash-bucket-size: "128"
server-name-hash-bucket-size: "64"
server-name-hash-max-size: "512"
limit-req-status-code: "429"
client-header-timeout: "30"
client-body-timeout: "30"
minReadySeconds: 15
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "10254"
extraEnvs:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
livenessProbe:
initialDelaySeconds: 15
periodSeconds: 30
readinessProbe:
periodSeconds: 30
resources:
limits:
cpu: 1
memory: 1Gi
requests:
cpu: 100m
memory: 128Mi
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: project.io/node-pool
operator: In
values:
- web
service:
enabled: true
type: ClusterIP
clusterIP: None
ipFamilyPolicy: "RequireDualStack"
ipFamilies:
- IPv4
- IPv6
admissionWebhooks:
enabled: false
metrics:
enabled: false
revisionHistoryLimit: 2
defaultBackend:
enabled: false

View File

@@ -1,140 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: local-path-storage
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: local-path-provisioner-service-account
namespace: local-path-storage
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: local-path-provisioner-role
rules:
- apiGroups: [ "" ]
resources: [ "nodes", "persistentvolumeclaims", "configmaps" ]
verbs: [ "get", "list", "watch" ]
- apiGroups: [ "" ]
resources: [ "endpoints", "persistentvolumes", "pods" ]
verbs: [ "*" ]
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "create", "patch" ]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "storageclasses" ]
verbs: [ "get", "list", "watch" ]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: local-path-provisioner-bind
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: local-path-provisioner-role
subjects:
- kind: ServiceAccount
name: local-path-provisioner-service-account
namespace: local-path-storage
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: local-path-provisioner
namespace: local-path-storage
spec:
replicas: 1
selector:
matchLabels:
app: local-path-provisioner
template:
metadata:
labels:
app: local-path-provisioner
spec:
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: "node-role.kubernetes.io/control-plane"
effect: NoSchedule
serviceAccountName: local-path-provisioner-service-account
containers:
- name: local-path-provisioner
image: rancher/local-path-provisioner:v0.0.23
imagePullPolicy: IfNotPresent
command:
- local-path-provisioner
- --debug
- start
- --config
- /etc/config/config.json
volumeMounts:
- name: config-volume
mountPath: /etc/config/
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumes:
- name: config-volume
configMap:
name: local-path-config
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: local-path
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: rancher.io/local-path
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: Delete
---
kind: ConfigMap
apiVersion: v1
metadata:
name: local-path-config
namespace: local-path-storage
data:
config.json: |-
{
"nodePathMap":[
{
"node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
"paths":["/var/data"]
}
]
}
setup: |-
#!/bin/sh
set -eu
mkdir -m 0777 -p "$VOL_DIR"
teardown: |-
#!/bin/sh
set -eu
rm -rf "$VOL_DIR"
helperPod.yaml: |-
apiVersion: v1
kind: Pod
metadata:
name: helper-pod
spec:
priorityClassName: system-node-critical
tolerations:
- key: node.kubernetes.io/disk-pressure
operator: Exists
effect: NoSchedule
containers:
- name: helper-pod
image: busybox
imagePullPolicy: IfNotPresent

View File

@@ -1,197 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: system:aggregated-metrics-reader
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/stats
- namespaces
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: metrics-server
strategy:
rollingUpdate:
maxUnavailable: 0
template:
metadata:
labels:
k8s-app: metrics-server
spec:
nodeSelector:
kubernetes.io/os: linux
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: "node-role.kubernetes.io/control-plane"
effect: NoSchedule
containers:
- args:
- --cert-dir=/tmp
- --secure-port=6443
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
- --authorization-always-allow-paths=/metrics
image: k8s.gcr.io/metrics-server/metrics-server:v0.5.0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
periodSeconds: 10
name: metrics-server
ports:
- containerPort: 6443
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
initialDelaySeconds: 20
periodSeconds: 10
resources:
requests:
cpu: 100m
memory: 200Mi
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- mountPath: /tmp
name: tmp-dir
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
volumes:
- emptyDir: {}
name: tmp-dir
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
labels:
k8s-app: metrics-server
name: v1beta1.metrics.k8s.io
spec:
group: metrics.k8s.io
groupPriorityMinimum: 100
insecureSkipTLSVerify: true
service:
name: metrics-server
namespace: kube-system
version: v1beta1
versionPriority: 100

6
hetzner/images/Makefile Normal file
View File

@@ -0,0 +1,6 @@
init:
packer init .
release:
packer build -only=release.hcloud.talos .

View File

@@ -0,0 +1,41 @@
packer {
required_plugins {
hcloud = {
version = ">= 1.0.5"
source = "github.com/hashicorp/hcloud"
}
}
}
source "hcloud" "talos" {
token = var.hcloud_token
rescue = "linux64"
image = "debian-11"
location = var.hcloud_location
server_type = var.hcloud_type
ssh_username = "root"
snapshot_name = "talos system disk"
snapshot_labels = {
type = "infra",
os = "talos",
arch = substr(var.hcloud_type, 0, 2) == "ca" ? "arm64" : "amd64",
version = "${var.talos_version}",
}
}
build {
name = "release"
sources = ["source.hcloud.talos"]
provisioner "shell" {
inline = [
"apt-get install -y wget",
"export ARCH=${substr(var.hcloud_type, 0, 2) == "ca" ? "arm64" : "amd64"}",
"wget -O /tmp/talos.raw.xz ${local.image}",
"sync && xz -d -c /tmp/talos.raw.xz | dd of=/dev/sda && sync",
"mount /dev/sda3 /mnt && sed -i 's/set timeout=3/set timeout=10/g' /mnt/grub/grub.cfg && umount /mnt"
]
}
}

View File

@@ -0,0 +1,25 @@
variable "hcloud_token" {
type = string
default = env("HCLOUD_TOKEN")
sensitive = true
}
variable "hcloud_location" {
type = string
default = "fsn1"
}
variable "hcloud_type" {
type = string
default = "cx11" # cx11|cax11 (arm)
}
variable "talos_version" {
type = string
default = "v1.4.0-beta.1"
}
locals {
image = "https://github.com/talos-systems/talos/releases/download/${var.talos_version}/hcloud-$ARCH.raw.xz"
}

View File

@@ -0,0 +1,108 @@
locals {
contolplane_labels = ""
controlplanes = { for k in flatten([
for regions in var.regions : [
for inx in range(lookup(try(var.controlplane[regions], {}), "count", 0)) : {
name : "controlplane-${regions}-${1 + inx}"
image : data.hcloud_image.talos[startswith(lookup(try(var.controlplane[regions], {}), "type", "cpx11"), "ca") ? "arm64" : "amd64"].id
region : regions
type : lookup(try(var.controlplane[regions], {}), "type", "cpx11")
ip : cidrhost(cidrsubnet(hcloud_network_subnet.core.ip_range, 6, 2 + index(var.regions, regions)), inx)
}
]
]) : k.name => k }
}
output "instances" {
value = local.controlplanes
}
resource "hcloud_server" "controlplane" {
for_each = local.controlplanes
location = each.value.region
name = each.value.name
image = each.value.image
server_type = each.value.type
ssh_keys = [hcloud_ssh_key.infra.id]
keep_disk = true
labels = merge(var.tags, { type = "infra", label = "controlplane" })
firewall_ids = [hcloud_firewall.controlplane.id]
public_net {
ipv4_enabled = true
ipv6_enabled = true
}
network {
network_id = hcloud_network.main.id
ip = each.value.ip
}
# user_data = templatefile("${path.module}/templates/controlplane.yaml",
# merge(var.kubernetes, {
# name = each.value.name
# ipv4_vip = local.ipv4_vip
# ipv4_local = each.value.ip
# lbv4_local = local.lbv4_local
# lbv4 = local.lbv4
# lbv6 = local.lbv6
# hcloud_network = hcloud_network.main.id
# hcloud_token = var.hcloud_token
# hcloud_image = data.hcloud_image.talos["amd64"].id
# robot_user = var.robot_user
# robot_password = var.robot_password
# })
# )
lifecycle {
ignore_changes = [
network,
image,
server_type,
user_data,
ssh_keys,
]
}
}
resource "hcloud_load_balancer_target" "api" {
count = local.lb_enable ? lookup(var.controlplane, "count", 0) : 0
type = "server"
load_balancer_id = hcloud_load_balancer.api[0].id
server_id = hcloud_server.controlplane[count.index].id
}
#
# Secure push talos config to the controlplane
#
resource "local_file" "controlplane" {
for_each = local.controlplanes
content = templatefile("${path.module}/templates/controlplane.yaml.tpl",
merge(var.kubernetes, {
name = each.value.name
ipv4_vip = local.ipv4_vip
ipv4_local = each.value.ip
lbv4_local = local.lbv4_local
lbv4 = local.lbv4
lbv6 = local.lbv6
hcloud_network = hcloud_network.main.id
hcloud_token = var.hcloud_token
hcloud_image = data.hcloud_image.talos["amd64"].id
robot_user = var.robot_user
robot_password = var.robot_password
})
)
filename = "_cfgs/${each.value.name}.yaml"
file_permission = "0600"
}
resource "null_resource" "controlplane" {
for_each = local.controlplanes
provisioner "local-exec" {
command = "sleep 60 && talosctl apply-config --insecure --nodes ${hcloud_server.controlplane[each.key].ipv4_address} --config-patch @_cfgs/${each.value.name}.yaml --file _cfgs/controlplane.yaml"
}
depends_on = [hcloud_load_balancer_target.api, local_file.controlplane]
}

View File

@@ -1,88 +0,0 @@
resource "hcloud_server" "controlplane" {
count = lookup(var.controlplane, "count", 0)
location = element(var.regions, count.index)
name = "controlplane-${count.index + 1}"
image = data.hcloud_image.talos.id
server_type = lookup(var.controlplane, "type", "cpx11")
ssh_keys = [hcloud_ssh_key.infra.id]
keep_disk = true
labels = merge(var.tags, { type = "infra", label = "controlplane" })
firewall_ids = [hcloud_firewall.controlplane.id]
public_net {
ipv4_enabled = true
ipv6_enabled = true
}
network {
network_id = hcloud_network.main.id
ip = cidrhost(hcloud_network_subnet.core.ip_range, 11 + count.index)
}
user_data = templatefile("${path.module}/templates/controlplane.yaml",
merge(var.kubernetes, {
name = "controlplane-${count.index + 1}"
ipv4_vip = local.ipv4_vip
ipv4_local = cidrhost(hcloud_network_subnet.core.ip_range, 11 + count.index)
lbv4_local = local.lbv4_local
lbv4 = local.lbv4
lbv6 = local.lbv6
hcloud_network = hcloud_network.main.id
hcloud_token = var.hcloud_token
hcloud_image = data.hcloud_image.talos.id
robot_user = var.robot_user
robot_password = var.robot_password
})
)
lifecycle {
ignore_changes = [
network,
image,
server_type,
user_data,
ssh_keys,
]
}
}
resource "hcloud_load_balancer_target" "api" {
count = local.lb_enable ? lookup(var.controlplane, "count", 0) : 0
type = "server"
load_balancer_id = hcloud_load_balancer.api[0].id
server_id = hcloud_server.controlplane[count.index].id
}
#
# Secure push talos config to the controlplane
#
# resource "local_file" "controlplane" {
# count = lookup(var.controlplane, "count", 0)
# content = templatefile("${path.module}/templates/controlplane.yaml",
# merge(var.kubernetes, {
# name = "master-${count.index + 1}"
# type = "controlplane"
# ipv4_vip = local.ipv4_vip
# ipv4_local = cidrhost(hcloud_network_subnet.core.ip_range, 11 + count.index)
# lbv4_local = local.lbv4_local
# lbv4 = local.lbv4
# lbv6 = local.lbv6
# hcloud_network = hcloud_network.main.id
# hcloud_token = var.hcloud_token
# labels = "topology.kubernetes.io/region=${hcloud_server.controlplane[count.index].location},topology.kubernetes.io/zone=${hcloud_server.controlplane[count.index].datacenter}"
# })
# )
# filename = "_cfgs/controlplane-${count.index + 1}.yaml"
# file_permission = "0600"
# depends_on = [hcloud_server.controlplane]
# }
# resource "null_resource" "controlplane" {
# count = lookup(var.controlplane, "count", 0)
# provisioner "local-exec" {
# command = "sleep 60 && talosctl apply-config --insecure --nodes ${hcloud_server.controlplane[count.index].ipv4_address} --file _cfgs/controlplane-${count.index + 1}.yaml"
# }
# depends_on = [hcloud_load_balancer_target.api, local_file.controlplane]
# }

View File

@@ -1,13 +1,13 @@
locals {
lb_enable = lookup(var.controlplane, "type_lb", "") == "" ? false : true
lb_enable = try(var.controlplane["all"].type_lb, "") == "" ? false : true
}
locals {
ipv4_vip = cidrhost(hcloud_network_subnet.core.ip_range, 10)
ipv4_vip = cidrhost(hcloud_network_subnet.core.ip_range, 6)
lbv4_local = cidrhost(hcloud_network_subnet.core.ip_range, 5)
lbv4 = local.lb_enable ? hcloud_load_balancer.api[0].ipv4 : hcloud_floating_ip.api[0].ip_address
lbv6 = local.lb_enable ? hcloud_load_balancer.api[0].ipv6 : cidrhost(hcloud_network_subnet.core.ip_range, 10)
lbv6 = local.lb_enable ? hcloud_load_balancer.api[0].ipv6 : local.ipv4_vip
}
resource "hcloud_floating_ip" "api" {
@@ -22,7 +22,7 @@ resource "hcloud_load_balancer" "api" {
count = local.lb_enable ? 1 : 0
name = "api"
location = var.regions[0]
load_balancer_type = lookup(var.controlplane, "type_lb", "lb11")
load_balancer_type = try(var.controlplane["all"].type_lb, "lb11")
labels = merge(var.tags, { type = "infra" })
}

View File

@@ -8,14 +8,14 @@ resource "hcloud_network" "main" {
resource "hcloud_network_subnet" "core" {
network_id = hcloud_network.main.id
type = "cloud"
network_zone = "eu-central"
network_zone = var.vpc_main_zone
ip_range = cidrsubnet(hcloud_network.main.ip_range, 8, 0)
}
resource "hcloud_network_subnet" "private" {
network_id = hcloud_network.main.id
type = "cloud"
network_zone = "eu-central"
network_zone = var.vpc_main_zone
ip_range = cidrsubnet(hcloud_network.main.ip_range, 8, 1)
}
@@ -23,7 +23,7 @@ resource "hcloud_network_subnet" "robot" {
count = var.vpc_vswitch_id == 0 ? 0 : 1
network_id = hcloud_network.main.id
type = "vswitch"
network_zone = "eu-central"
network_zone = var.vpc_main_zone
vswitch_id = var.vpc_vswitch_id
ip_range = cidrsubnet(hcloud_network.main.ip_range, 8, 2)
}

View File

@@ -1,8 +1,4 @@
version: v1alpha1
debug: false
persist: true
machine:
type: controlplane
certSANs:
- "${lbv4}"
- "${lbv6}"
@@ -10,16 +6,8 @@ machine:
- "${ipv4_local}"
- "${ipv4_vip}"
- "${apiDomain}"
features:
kubernetesTalosAPIAccess:
enabled: true
allowedRoles:
- os:reader
allowedKubernetesNamespaces:
- kube-system
kubelet:
extraArgs:
node-ip: "${ipv4_local}"
rotate-server-certificates: true
clusterDNS:
- 169.254.2.53
@@ -44,40 +32,42 @@ machine:
- interface: dummy0
addresses:
- 169.254.2.53/32
kubespan:
enabled: false
allowDownPeerBypass: true
extraHostEntries:
- ip: ${ipv4_vip}
- ip: 127.0.0.1
aliases:
- ${apiDomain}
install:
wipe: false
sysctls:
net.core.somaxconn: 65535
net.core.netdev_max_backlog: 4096
systemDiskEncryption:
state:
provider: luks2
options:
- no_read_workqueue
- no_write_workqueue
keys:
- nodeID: {}
slot: 0
ephemeral:
provider: luks2
keys:
- nodeID: {}
slot: 0
options:
- no_read_workqueue
- no_write_workqueue
keys:
- nodeID: {}
slot: 0
features:
kubernetesTalosAPIAccess:
enabled: true
allowedRoles:
- os:reader
allowedKubernetesNamespaces:
- kube-system
cluster:
id: ${clusterID}
secret: ${clusterSecret}
adminKubeconfig:
certLifetime: 8h0m0s
controlPlane:
endpoint: https://${apiDomain}:6443
clusterName: ${clusterName}
discovery:
enabled: true
network:
dnsDomain: ${domain}
podSubnets: ${format("%#v",split(",",podSubnets))}
@@ -85,10 +75,9 @@ cluster:
cni:
name: custom
urls:
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/hetzner/deployments/cilium-result.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/cilium-result.yaml
proxy:
disabled: true
mode: ipvs
apiServer:
certSANs:
- "${lbv4}"
@@ -97,32 +86,10 @@ cluster:
- "${ipv4_local}"
- "${ipv4_vip}"
- "${apiDomain}"
admissionControl:
- name: PodSecurity
configuration:
apiVersion: pod-security.admission.config.k8s.io/v1alpha1
defaults:
audit: restricted
audit-version: latest
enforce: baseline
enforce-version: latest
warn: restricted
warn-version: latest
exemptions:
namespaces:
- kube-system
- ingress-nginx
- monitoring
- local-path-storage
- local-lvm
runtimeClasses: []
usernames: []
kind: PodSecurityConfiguration
controllerManager:
extraArgs:
node-cidr-mask-size-ipv4: 24
node-cidr-mask-size-ipv6: 112
scheduler: {}
etcd:
advertisedSubnets:
- ${nodeSubnets}
@@ -150,8 +117,8 @@ cluster:
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/hetzner/deployments/hcloud-cloud-controller-manager.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/hetzner/deployments/hcloud-csi.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/hetzner/deployments/kubelet-serving-cert-approver.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/hetzner/deployments/metrics-server.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/hetzner/deployments/local-path-storage.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/hetzner/deployments/coredns-local.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/hetzner/deployments/ingress-ns.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/hetzner/deployments/ingress-result.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/metrics-server.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/local-path-storage-result.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/coredns-local.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/ingress-ns.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/ingress-result.yaml

View File

@@ -47,6 +47,12 @@ variable "vpc_main_cidr" {
default = "172.16.0.0/16"
}
variable "vpc_main_zone" {
description = "Network zone"
type = string
default = "eu-central"
}
variable "vpc_vswitch_id" {
description = "vSwitch id"
type = number
@@ -54,12 +60,25 @@ variable "vpc_vswitch_id" {
}
variable "controlplane" {
description = "Property of controlplane"
description = "Controlplane scheme"
type = map(any)
default = {
count = 0,
type = "cpx11"
type_lb = "" # lb11, if "" use floating-ip
"all" = {
type_lb = "" # lb11, if "" use floating-ip
},
"nbg1" = {
count = 0,
type = "cpx11",
},
"fsn1" = {
count = 1,
type = "cpx11",
},
"hel1" = {
count = 0,
type = "cax11",
}
}
}
@@ -121,32 +140,3 @@ variable "whitelist_web" {
"104.24.0.0/14",
]
}
# variable "robot_servers" {
# type = list(object({
# name = string
# ipv4_address = string
# ipv6_address = string
# zone = string
# location = string
# params = string
# }))
# default = []
# }
# variable "hosts" {
# type = list(object({
# name = string
# ipv4_address = string
# ipv6_address = string
# }))
# default = [
# {
# name = "api.local"
# ipv4_address = "123"
# ipv6_address = ""
# },
# ]
# }

View File

@@ -2,7 +2,7 @@ terraform {
required_providers {
hcloud = {
source = "hetznercloud/hcloud"
version = "~> 1.36.1"
version = "~> 1.38.2"
}
}
required_version = ">= 1.2"