mirror of
https://github.com/optim-enterprises-bv/terraform-talos.git
synced 2025-10-29 17:42:47 +00:00
add deployments
This commit is contained in:
@@ -10,7 +10,6 @@ create-config: ## Genereate talos configs
|
||||
talosctl --talosconfig _cfgs/talosconfig config endpoint ${ENDPOINT}
|
||||
|
||||
create-templates:
|
||||
@yq ea -P '. as $$item ireduce ({}; . * $$item )' _cfgs/controlplane.yaml templates/controlplane.yaml.tpl > templates/controlplane.yaml
|
||||
@echo 'podSubnets: "10.32.0.0/12,fd00:10:32::/102"' > _cfgs/tfstate.vars
|
||||
@echo 'serviceSubnets: "10.200.0.0/22,fd40:10:200::/112"' >> _cfgs/tfstate.vars
|
||||
@echo 'nodeSubnets: "172.16.0.0/12"' >> _cfgs/tfstate.vars
|
||||
@@ -28,12 +27,18 @@ create-templates:
|
||||
|
||||
create-controlplane-bootstrap:
|
||||
talosctl --talosconfig _cfgs/talosconfig config endpoint ${ENDPOINT}
|
||||
talosctl --talosconfig _cfgs/talosconfig --nodes 172.16.0.11 bootstrap
|
||||
talosctl --talosconfig _cfgs/talosconfig --nodes 172.16.0.48 bootstrap
|
||||
|
||||
create-controlplane: ## Bootstrap first controlplane node
|
||||
terraform apply -auto-approve -target=hcloud_server.controlplane
|
||||
|
||||
create-kubeconfig: ## Prepare kubeconfig
|
||||
talosctl --talosconfig _cfgs/talosconfig --nodes 172.16.0.11 kubeconfig .
|
||||
talosctl --talosconfig _cfgs/talosconfig --nodes 172.16.0.48 kubeconfig .
|
||||
kubectl --kubeconfig=kubeconfig config set clusters.${CLUSTERNAME}.server https://${ENDPOINT}:6443
|
||||
kubectl --kubeconfig=kubeconfig config set-context --current --namespace=kube-system
|
||||
|
||||
create-deployments:
|
||||
helm template --namespace=kube-system --version=1.12.8 -f deployments/cilium.yaml cilium \
|
||||
cilium/cilium > deployments/cilium-result.yaml
|
||||
helm template --namespace=ingress-nginx --version=4.6.0 -f deployments/ingress.yaml ingress-nginx \
|
||||
ingress-nginx/ingress-nginx > deployments/ingress-result.yaml
|
||||
|
||||
908
proxmox/deployments/cilium-result.yaml
Normal file
908
proxmox/deployments/cilium-result.yaml
Normal file
@@ -0,0 +1,908 @@
|
||||
---
|
||||
# Source: cilium/templates/cilium-agent/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: "cilium"
|
||||
namespace: kube-system
|
||||
---
|
||||
# Source: cilium/templates/cilium-operator/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: "cilium-operator"
|
||||
namespace: kube-system
|
||||
---
|
||||
# Source: cilium/templates/cilium-configmap.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cilium-config
|
||||
namespace: kube-system
|
||||
data:
|
||||
|
||||
# Identity allocation mode selects how identities are shared between cilium
|
||||
# nodes by setting how they are stored. The options are "crd" or "kvstore".
|
||||
# - "crd" stores identities in kubernetes as CRDs (custom resource definition).
|
||||
# These can be queried with:
|
||||
# kubectl get ciliumid
|
||||
# - "kvstore" stores identities in an etcd kvstore, that is
|
||||
# configured below. Cilium versions before 1.6 supported only the kvstore
|
||||
# backend. Upgrades from these older cilium versions should continue using
|
||||
# the kvstore by commenting out the identity-allocation-mode below, or
|
||||
# setting it to "kvstore".
|
||||
identity-allocation-mode: crd
|
||||
cilium-endpoint-gc-interval: "5m0s"
|
||||
nodes-gc-interval: "5m0s"
|
||||
skip-cnp-status-startup-clean: "false"
|
||||
# Disable the usage of CiliumEndpoint CRD
|
||||
disable-endpoint-crd: "false"
|
||||
|
||||
# If you want to run cilium in debug mode change this value to true
|
||||
debug: "false"
|
||||
# The agent can be put into the following three policy enforcement modes
|
||||
# default, always and never.
|
||||
# https://docs.cilium.io/en/latest/policy/intro/#policy-enforcement-modes
|
||||
enable-policy: "default"
|
||||
# If you want metrics enabled in all of your Cilium agents, set the port for
|
||||
# which the Cilium agents will have their metrics exposed.
|
||||
# This option deprecates the "prometheus-serve-addr" in the
|
||||
# "cilium-metrics-config" ConfigMap
|
||||
# NOTE that this will open the port on ALL nodes where Cilium pods are
|
||||
# scheduled.
|
||||
prometheus-serve-addr: ":9962"
|
||||
# Port to expose Envoy metrics (e.g. "9964"). Envoy metrics listener will be disabled if this
|
||||
# field is not set.
|
||||
proxy-prometheus-port: "9964"
|
||||
|
||||
# Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4
|
||||
# address.
|
||||
enable-ipv4: "true"
|
||||
|
||||
# Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6
|
||||
# address.
|
||||
enable-ipv6: "true"
|
||||
# Users who wish to specify their own custom CNI configuration file must set
|
||||
# custom-cni-conf to "true", otherwise Cilium may overwrite the configuration.
|
||||
custom-cni-conf: "false"
|
||||
enable-bpf-clock-probe: "true"
|
||||
# If you want cilium monitor to aggregate tracing for packets, set this level
|
||||
# to "low", "medium", or "maximum". The higher the level, the less packets
|
||||
# that will be seen in monitor output.
|
||||
monitor-aggregation: medium
|
||||
|
||||
# The monitor aggregation interval governs the typical time between monitor
|
||||
# notification events for each allowed connection.
|
||||
#
|
||||
# Only effective when monitor aggregation is set to "medium" or higher.
|
||||
monitor-aggregation-interval: 5s
|
||||
|
||||
# The monitor aggregation flags determine which TCP flags which, upon the
|
||||
# first observation, cause monitor notifications to be generated.
|
||||
#
|
||||
# Only effective when monitor aggregation is set to "medium" or higher.
|
||||
monitor-aggregation-flags: all
|
||||
# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic
|
||||
# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps.
|
||||
bpf-map-dynamic-size-ratio: "0.0025"
|
||||
# bpf-policy-map-max specifies the maximum number of entries in endpoint
|
||||
# policy map (per endpoint)
|
||||
bpf-policy-map-max: "16384"
|
||||
# bpf-lb-map-max specifies the maximum number of entries in bpf lb service,
|
||||
# backend and affinity maps.
|
||||
bpf-lb-map-max: "65536"
|
||||
# bpf-lb-bypass-fib-lookup instructs Cilium to enable the FIB lookup bypass
|
||||
# optimization for nodeport reverse NAT handling.
|
||||
bpf-lb-external-clusterip: "false"
|
||||
|
||||
# Pre-allocation of map entries allows per-packet latency to be reduced, at
|
||||
# the expense of up-front memory allocation for the entries in the maps. The
|
||||
# default value below will minimize memory usage in the default installation;
|
||||
# users who are sensitive to latency may consider setting this to "true".
|
||||
#
|
||||
# This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore
|
||||
# this option and behave as though it is set to "true".
|
||||
#
|
||||
# If this value is modified, then during the next Cilium startup the restore
|
||||
# of existing endpoints and tracking of ongoing connections may be disrupted.
|
||||
# As a result, reply packets may be dropped and the load-balancing decisions
|
||||
# for established connections may change.
|
||||
#
|
||||
# If this option is set to "false" during an upgrade from 1.3 or earlier to
|
||||
# 1.4 or later, then it may cause one-time disruptions during the upgrade.
|
||||
preallocate-bpf-maps: "false"
|
||||
|
||||
# Regular expression matching compatible Istio sidecar istio-proxy
|
||||
# container image names
|
||||
sidecar-istio-proxy-image: "cilium/istio_proxy"
|
||||
|
||||
# Name of the cluster. Only relevant when building a mesh of clusters.
|
||||
cluster-name: default
|
||||
# Unique ID of the cluster. Must be unique across all conneted clusters and
|
||||
# in the range of 1 and 255. Only relevant when building a mesh of clusters.
|
||||
cluster-id: "0"
|
||||
|
||||
# Encapsulation mode for communication between nodes
|
||||
# Possible values:
|
||||
# - disabled
|
||||
# - vxlan (default)
|
||||
# - geneve
|
||||
tunnel: "vxlan"
|
||||
# Enables L7 proxy for L7 policy enforcement and visibility
|
||||
enable-l7-proxy: "true"
|
||||
|
||||
enable-ipv4-masquerade: "true"
|
||||
enable-ipv6-masquerade: "true"
|
||||
enable-bpf-masquerade: "false"
|
||||
|
||||
enable-xt-socket-fallback: "true"
|
||||
install-iptables-rules: "true"
|
||||
install-no-conntrack-iptables-rules: "false"
|
||||
|
||||
auto-direct-node-routes: "false"
|
||||
enable-local-redirect-policy: "true"
|
||||
enable-host-firewall: "true"
|
||||
# List of devices used to attach bpf_host.o (implements BPF NodePort,
|
||||
# host-firewall and BPF masquerading)
|
||||
devices: "eth+"
|
||||
|
||||
kube-proxy-replacement: "strict"
|
||||
kube-proxy-replacement-healthz-bind-address: ""
|
||||
bpf-lb-sock: "false"
|
||||
host-reachable-services-protos:
|
||||
enable-health-check-nodeport: "true"
|
||||
node-port-bind-protection: "true"
|
||||
enable-auto-protect-node-port-range: "true"
|
||||
enable-svc-source-range-check: "true"
|
||||
enable-l2-neigh-discovery: "true"
|
||||
arping-refresh-period: "30s"
|
||||
k8s-require-ipv4-pod-cidr: "true"
|
||||
k8s-require-ipv6-pod-cidr: "true"
|
||||
cni-uninstall: "true"
|
||||
enable-endpoint-health-checking: "true"
|
||||
enable-health-checking: "true"
|
||||
enable-well-known-identities: "false"
|
||||
enable-remote-node-identity: "true"
|
||||
synchronize-k8s-nodes: "true"
|
||||
operator-api-serve-addr: "127.0.0.1:9234"
|
||||
ipam: "kubernetes"
|
||||
disable-cnp-status-updates: "true"
|
||||
enable-vtep: "false"
|
||||
vtep-endpoint: ""
|
||||
vtep-cidr: ""
|
||||
vtep-mask: ""
|
||||
vtep-mac: ""
|
||||
enable-k8s-endpoint-slice: "true"
|
||||
enable-bgp-control-plane: "false"
|
||||
bpf-root: "/sys/fs/bpf"
|
||||
cgroup-root: "/sys/fs/cgroup"
|
||||
enable-k8s-terminating-endpoint: "true"
|
||||
remove-cilium-node-taints: "true"
|
||||
set-cilium-is-up-condition: "true"
|
||||
unmanaged-pod-watcher-interval: "15"
|
||||
tofqdns-dns-reject-response-code: "refused"
|
||||
tofqdns-enable-dns-compression: "true"
|
||||
tofqdns-endpoint-max-ip-per-hostname: "50"
|
||||
tofqdns-idle-connection-grace-period: "0s"
|
||||
tofqdns-max-deferred-connection-deletes: "10000"
|
||||
tofqdns-min-ttl: "3600"
|
||||
tofqdns-proxy-response-max-delay: "100ms"
|
||||
agent-not-ready-taint-key: "node.cilium.io/agent-not-ready"
|
||||
---
|
||||
# Source: cilium/templates/cilium-agent/clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: cilium
|
||||
rules:
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- discovery.k8s.io
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
- services
|
||||
- pods
|
||||
- endpoints
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
# This is used when validating policies in preflight. This will need to stay
|
||||
# until we figure out how to avoid "get" inside the preflight, and then
|
||||
# should be removed ideally.
|
||||
- get
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumbgploadbalancerippools
|
||||
- ciliumbgppeeringpolicies
|
||||
- ciliumclusterwideenvoyconfigs
|
||||
- ciliumclusterwidenetworkpolicies
|
||||
- ciliumegressgatewaypolicies
|
||||
- ciliumegressnatpolicies
|
||||
- ciliumendpoints
|
||||
- ciliumendpointslices
|
||||
- ciliumenvoyconfigs
|
||||
- ciliumidentities
|
||||
- ciliumlocalredirectpolicies
|
||||
- ciliumnetworkpolicies
|
||||
- ciliumnodes
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumidentities
|
||||
- ciliumendpoints
|
||||
- ciliumnodes
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
# To synchronize garbage collection of such resources
|
||||
resources:
|
||||
- ciliumidentities
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumendpoints
|
||||
verbs:
|
||||
- delete
|
||||
- get
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumnodes
|
||||
- ciliumnodes/status
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumnetworkpolicies/status
|
||||
- ciliumclusterwidenetworkpolicies/status
|
||||
- ciliumendpoints/status
|
||||
- ciliumendpoints
|
||||
verbs:
|
||||
- patch
|
||||
---
|
||||
# Source: cilium/templates/cilium-operator/clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: cilium-operator
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
# to automatically delete [core|kube]dns pods so that are starting to being
|
||||
# managed by Cilium
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
# To remove node taints
|
||||
- nodes
|
||||
# To set NetworkUnavailable false on startup
|
||||
- nodes/status
|
||||
verbs:
|
||||
- patch
|
||||
- apiGroups:
|
||||
- discovery.k8s.io
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
# to perform LB IP allocation for BGP
|
||||
- services/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
# to check apiserver connectivity
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
# to perform the translation of a CNP that contains `ToGroup` to its endpoints
|
||||
- services
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumnetworkpolicies
|
||||
- ciliumclusterwidenetworkpolicies
|
||||
verbs:
|
||||
# Create auto-generated CNPs and CCNPs from Policies that have 'toGroups'
|
||||
- create
|
||||
- update
|
||||
- deletecollection
|
||||
# To update the status of the CNPs and CCNPs
|
||||
- patch
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumnetworkpolicies/status
|
||||
- ciliumclusterwidenetworkpolicies/status
|
||||
verbs:
|
||||
# Update the auto-generated CNPs and CCNPs status.
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumendpoints
|
||||
- ciliumidentities
|
||||
verbs:
|
||||
# To perform garbage collection of such resources
|
||||
- delete
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumidentities
|
||||
verbs:
|
||||
# To synchronize garbage collection of such resources
|
||||
- update
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumnodes
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
# To perform CiliumNode garbage collector
|
||||
- delete
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumnodes/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumendpointslices
|
||||
- ciliumenvoyconfigs
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
- update
|
||||
resourceNames:
|
||||
- ciliumbgploadbalancerippools.cilium.io
|
||||
- ciliumbgppeeringpolicies.cilium.io
|
||||
- ciliumclusterwideenvoyconfigs.cilium.io
|
||||
- ciliumclusterwidenetworkpolicies.cilium.io
|
||||
- ciliumegressgatewaypolicies.cilium.io
|
||||
- ciliumegressnatpolicies.cilium.io
|
||||
- ciliumendpoints.cilium.io
|
||||
- ciliumendpointslices.cilium.io
|
||||
- ciliumenvoyconfigs.cilium.io
|
||||
- ciliumexternalworkloads.cilium.io
|
||||
- ciliumidentities.cilium.io
|
||||
- ciliumlocalredirectpolicies.cilium.io
|
||||
- ciliumnetworkpolicies.cilium.io
|
||||
- ciliumnodes.cilium.io
|
||||
# For cilium-operator running in HA mode.
|
||||
#
|
||||
# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election
|
||||
# between multiple running instances.
|
||||
# The preferred way of doing this is to use LeasesResourceLock as edits to Leases are less
|
||||
# common and fewer objects in the cluster watch "all Leases".
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- update
|
||||
---
|
||||
# Source: cilium/templates/cilium-agent/clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: cilium
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cilium
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: "cilium"
|
||||
namespace: kube-system
|
||||
---
|
||||
# Source: cilium/templates/cilium-operator/clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: cilium-operator
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cilium-operator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: "cilium-operator"
|
||||
namespace: kube-system
|
||||
---
|
||||
# Source: cilium/templates/cilium-agent/service.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: cilium-agent
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "9964"
|
||||
labels:
|
||||
k8s-app: cilium
|
||||
spec:
|
||||
clusterIP: None
|
||||
type: ClusterIP
|
||||
selector:
|
||||
k8s-app: cilium
|
||||
ports:
|
||||
- name: envoy-metrics
|
||||
port: 9964
|
||||
protocol: TCP
|
||||
targetPort: envoy-metrics
|
||||
---
|
||||
# Source: cilium/templates/cilium-agent/daemonset.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: cilium
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: cilium
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: cilium
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 2
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/port: "9962"
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
k8s-app: cilium
|
||||
spec:
|
||||
containers:
|
||||
- name: cilium-agent
|
||||
image: "quay.io/cilium/cilium:v1.12.8@sha256:b6c3c48b380334b8f08dba6e0c28d906c0d722b8c2beb0d506b3cea27f66f78d"
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- cilium-agent
|
||||
args:
|
||||
- --config-dir=/tmp/cilium/config-map
|
||||
startupProbe:
|
||||
httpGet:
|
||||
host: "127.0.0.1"
|
||||
path: /healthz
|
||||
port: 9879
|
||||
scheme: HTTP
|
||||
httpHeaders:
|
||||
- name: "brief"
|
||||
value: "true"
|
||||
failureThreshold: 105
|
||||
periodSeconds: 2
|
||||
successThreshold: 1
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: "127.0.0.1"
|
||||
path: /healthz
|
||||
port: 9879
|
||||
scheme: HTTP
|
||||
httpHeaders:
|
||||
- name: "brief"
|
||||
value: "true"
|
||||
periodSeconds: 30
|
||||
successThreshold: 1
|
||||
failureThreshold: 10
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
host: "127.0.0.1"
|
||||
path: /healthz
|
||||
port: 9879
|
||||
scheme: HTTP
|
||||
httpHeaders:
|
||||
- name: "brief"
|
||||
value: "true"
|
||||
periodSeconds: 30
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
timeoutSeconds: 5
|
||||
env:
|
||||
- name: K8S_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
- name: CILIUM_K8S_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
- name: CILIUM_CLUSTERMESH_CONFIG
|
||||
value: /var/lib/cilium/clustermesh/
|
||||
- name: CILIUM_CNI_CHAINING_MODE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: cilium-config
|
||||
key: cni-chaining-mode
|
||||
optional: true
|
||||
- name: CILIUM_CUSTOM_CNI_CONF
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: cilium-config
|
||||
key: custom-cni-conf
|
||||
optional: true
|
||||
- name: KUBERNETES_SERVICE_HOST
|
||||
value: "api.cluster.local"
|
||||
- name: KUBERNETES_SERVICE_PORT
|
||||
value: "6443"
|
||||
lifecycle:
|
||||
postStart:
|
||||
exec:
|
||||
command:
|
||||
- "/cni-install.sh"
|
||||
- "--enable-debug=false"
|
||||
- "--cni-exclusive=true"
|
||||
- "--log-file=/var/run/cilium/cilium-cni.log"
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /cni-uninstall.sh
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2
|
||||
memory: 2Gi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
ports:
|
||||
- name: peer-service
|
||||
containerPort: 4244
|
||||
hostPort: 4244
|
||||
protocol: TCP
|
||||
- name: prometheus
|
||||
containerPort: 9962
|
||||
hostPort: 9962
|
||||
protocol: TCP
|
||||
- name: envoy-metrics
|
||||
containerPort: 9964
|
||||
hostPort: 9964
|
||||
protocol: TCP
|
||||
securityContext:
|
||||
privileged: true
|
||||
terminationMessagePolicy: FallbackToLogsOnError
|
||||
volumeMounts:
|
||||
- name: bpf-maps
|
||||
mountPath: /sys/fs/bpf
|
||||
mountPropagation: Bidirectional
|
||||
# Check for duplicate mounts before mounting
|
||||
- name: cilium-cgroup
|
||||
mountPath: /sys/fs/cgroup
|
||||
- name: cilium-run
|
||||
mountPath: /var/run/cilium
|
||||
- name: etc-cni-netd
|
||||
mountPath: /host/etc/cni/net.d
|
||||
- name: clustermesh-secrets
|
||||
mountPath: /var/lib/cilium/clustermesh
|
||||
readOnly: true
|
||||
- name: cilium-config-path
|
||||
mountPath: /tmp/cilium/config-map
|
||||
readOnly: true
|
||||
# Needed to be able to load kernel modules
|
||||
- name: lib-modules
|
||||
mountPath: /lib/modules
|
||||
readOnly: true
|
||||
- name: xtables-lock
|
||||
mountPath: /run/xtables.lock
|
||||
initContainers:
|
||||
- name: clean-cilium-state
|
||||
image: "quay.io/cilium/cilium:v1.12.8@sha256:b6c3c48b380334b8f08dba6e0c28d906c0d722b8c2beb0d506b3cea27f66f78d"
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- /init-container.sh
|
||||
env:
|
||||
- name: CILIUM_ALL_STATE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: cilium-config
|
||||
key: clean-cilium-state
|
||||
optional: true
|
||||
- name: CILIUM_BPF_STATE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: cilium-config
|
||||
key: clean-cilium-bpf-state
|
||||
optional: true
|
||||
- name: KUBERNETES_SERVICE_HOST
|
||||
value: "api.cluster.local"
|
||||
- name: KUBERNETES_SERVICE_PORT
|
||||
value: "6443"
|
||||
terminationMessagePolicy: FallbackToLogsOnError
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: bpf-maps
|
||||
mountPath: /sys/fs/bpf
|
||||
# Required to mount cgroup filesystem from the host to cilium agent pod
|
||||
- name: cilium-cgroup
|
||||
mountPath: /sys/fs/cgroup
|
||||
mountPropagation: HostToContainer
|
||||
- name: cilium-run
|
||||
mountPath: /var/run/cilium
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi # wait-for-kube-proxy
|
||||
# Install the CNI binaries in an InitContainer so we don't have a writable host mount in the agent
|
||||
- name: install-cni-binaries
|
||||
image: "quay.io/cilium/cilium:v1.12.8@sha256:b6c3c48b380334b8f08dba6e0c28d906c0d722b8c2beb0d506b3cea27f66f78d"
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- "/install-plugin.sh"
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 10Mi
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
terminationMessagePolicy: FallbackToLogsOnError
|
||||
volumeMounts:
|
||||
- name: cni-path
|
||||
mountPath: /host/opt/cni/bin
|
||||
restartPolicy: Always
|
||||
priorityClassName: system-node-critical
|
||||
serviceAccount: "cilium"
|
||||
serviceAccountName: "cilium"
|
||||
automountServiceAccountToken: true
|
||||
terminationGracePeriodSeconds: 1
|
||||
hostNetwork: true
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchLabels:
|
||||
k8s-app: cilium
|
||||
topologyKey: kubernetes.io/hostname
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
volumes:
|
||||
# To keep state between restarts / upgrades
|
||||
- name: cilium-run
|
||||
hostPath:
|
||||
path: /var/run/cilium
|
||||
type: DirectoryOrCreate
|
||||
# To keep state between restarts / upgrades for bpf maps
|
||||
- name: bpf-maps
|
||||
hostPath:
|
||||
path: /sys/fs/bpf
|
||||
type: DirectoryOrCreate
|
||||
# To keep state between restarts / upgrades for cgroup2 filesystem
|
||||
- name: cilium-cgroup
|
||||
hostPath:
|
||||
path: /sys/fs/cgroup
|
||||
type: DirectoryOrCreate
|
||||
# To install cilium cni plugin in the host
|
||||
- name: cni-path
|
||||
hostPath:
|
||||
path: /opt/cni/bin
|
||||
type: DirectoryOrCreate
|
||||
# To install cilium cni configuration in the host
|
||||
- name: etc-cni-netd
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
type: DirectoryOrCreate
|
||||
# To be able to load kernel modules
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
# To access iptables concurrently with other processes (e.g. kube-proxy)
|
||||
- name: xtables-lock
|
||||
hostPath:
|
||||
path: /run/xtables.lock
|
||||
type: FileOrCreate
|
||||
# To read the clustermesh configuration
|
||||
- name: clustermesh-secrets
|
||||
secret:
|
||||
secretName: cilium-clustermesh
|
||||
# note: the leading zero means this number is in octal representation: do not remove it
|
||||
defaultMode: 0400
|
||||
optional: true
|
||||
# To read the configuration from the config map
|
||||
- name: cilium-config-path
|
||||
configMap:
|
||||
name: cilium-config
|
||||
---
|
||||
# Source: cilium/templates/cilium-operator/deployment.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: cilium-operator
|
||||
namespace: kube-system
|
||||
labels:
|
||||
io.cilium/app: operator
|
||||
name: cilium-operator
|
||||
spec:
|
||||
# See docs on ServerCapabilities.LeasesResourceLock in file pkg/k8s/version/version.go
|
||||
# for more details.
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
io.cilium/app: operator
|
||||
name: cilium-operator
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
# ensure pods roll when configmap updates
|
||||
cilium.io/cilium-configmap-checksum: "8355b2f8d6da37eedd17661ef948fedfa489901c3e7274ed0a0c586a424b71cb"
|
||||
labels:
|
||||
io.cilium/app: operator
|
||||
name: cilium-operator
|
||||
spec:
|
||||
containers:
|
||||
- name: cilium-operator
|
||||
image: "quay.io/cilium/operator-generic:v1.12.8@sha256:7431f0c2001fb875b1a8901e103825394c38cd6c63a1435a3273ed20ae0e7578"
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- cilium-operator-generic
|
||||
args:
|
||||
- --config-dir=/tmp/cilium/config-map
|
||||
- --debug=$(CILIUM_DEBUG)
|
||||
env:
|
||||
- name: K8S_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
- name: CILIUM_K8S_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
- name: CILIUM_DEBUG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: debug
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: KUBERNETES_SERVICE_HOST
|
||||
value: "api.cluster.local"
|
||||
- name: KUBERNETES_SERVICE_PORT
|
||||
value: "6443"
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: "127.0.0.1"
|
||||
path: /healthz
|
||||
port: 9234
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 3
|
||||
volumeMounts:
|
||||
- name: cilium-config-path
|
||||
mountPath: /tmp/cilium/config-map
|
||||
readOnly: true
|
||||
terminationMessagePolicy: FallbackToLogsOnError
|
||||
hostNetwork: true
|
||||
restartPolicy: Always
|
||||
priorityClassName: system-cluster-critical
|
||||
serviceAccount: "cilium-operator"
|
||||
serviceAccountName: "cilium-operator"
|
||||
automountServiceAccountToken: true
|
||||
# In HA mode, cilium-operator pods must not be scheduled on the same
|
||||
# node as they will clash with each other.
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchLabels:
|
||||
io.cilium/app: operator
|
||||
topologyKey: kubernetes.io/hostname
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
volumes:
|
||||
# To read the configuration from the config map
|
||||
- name: cilium-config-path
|
||||
configMap:
|
||||
name: cilium-config
|
||||
77
proxmox/deployments/cilium.yaml
Normal file
77
proxmox/deployments/cilium.yaml
Normal file
@@ -0,0 +1,77 @@
|
||||
---
|
||||
|
||||
k8sServiceHost: "api.cluster.local"
|
||||
k8sServicePort: "6443"
|
||||
|
||||
operator:
|
||||
enabled: true
|
||||
rollOutPods: true
|
||||
replicas: 1
|
||||
prometheus:
|
||||
enabled: false
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
effect: NoSchedule
|
||||
|
||||
identityAllocationMode: crd
|
||||
kubeProxyReplacement: strict
|
||||
enableK8sEndpointSlice: true
|
||||
localRedirectPolicy: true
|
||||
|
||||
tunnel: "vxlan"
|
||||
autoDirectNodeRoutes: false
|
||||
devices: [eth+]
|
||||
|
||||
healthChecking: true
|
||||
|
||||
cni:
|
||||
install: true
|
||||
|
||||
ipam:
|
||||
mode: "kubernetes"
|
||||
k8s:
|
||||
requireIPv4PodCIDR: true
|
||||
requireIPv6PodCIDR: true
|
||||
|
||||
bpf:
|
||||
masquerade: false
|
||||
ipv4:
|
||||
enabled: true
|
||||
ipv6:
|
||||
enabled: true
|
||||
hostServices:
|
||||
enabled: true
|
||||
hostPort:
|
||||
enabled: true
|
||||
nodePort:
|
||||
enabled: true
|
||||
externalIPs:
|
||||
enabled: true
|
||||
hostFirewall:
|
||||
enabled: true
|
||||
ingressController:
|
||||
enabled: false
|
||||
|
||||
securityContext:
|
||||
privileged: true
|
||||
|
||||
hubble:
|
||||
enabled: false
|
||||
|
||||
prometheus:
|
||||
enabled: true
|
||||
|
||||
cgroup:
|
||||
autoMount:
|
||||
enabled: false
|
||||
hostRoot: /sys/fs/cgroup
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2
|
||||
memory: 2Gi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
153
proxmox/deployments/coredns-local.yaml
Normal file
153
proxmox/deployments/coredns-local.yaml
Normal file
@@ -0,0 +1,153 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: coredns-local
|
||||
namespace: kube-system
|
||||
data:
|
||||
empty.db: |
|
||||
@ 60 IN SOA localnet. root.localnet. (
|
||||
1 ; serial
|
||||
60 ; refresh
|
||||
60 ; retry
|
||||
60 ; expiry
|
||||
60 ) ; minimum
|
||||
;
|
||||
@ IN NS localnet.
|
||||
|
||||
hosts: |
|
||||
# static hosts
|
||||
169.254.2.53 dns.local
|
||||
|
||||
Corefile.local: |
|
||||
(empty) {
|
||||
file /etc/coredns/empty.db
|
||||
}
|
||||
|
||||
.:53 {
|
||||
errors
|
||||
bind 169.254.2.53
|
||||
|
||||
health 127.0.0.1:8091 {
|
||||
lameduck 5s
|
||||
}
|
||||
|
||||
hosts /etc/coredns/hosts {
|
||||
reload 60s
|
||||
fallthrough
|
||||
}
|
||||
|
||||
kubernetes cluster.local in-addr.arpa ip6.arpa {
|
||||
endpoint https://api.cluster.local:6443
|
||||
kubeconfig /etc/coredns/kubeconfig.conf coredns
|
||||
pods insecure
|
||||
ttl 60
|
||||
}
|
||||
prometheus :9153
|
||||
|
||||
forward . /etc/resolv.conf {
|
||||
policy sequential
|
||||
expire 30s
|
||||
}
|
||||
|
||||
cache 300
|
||||
loop
|
||||
reload
|
||||
loadbalance
|
||||
}
|
||||
kubeconfig.conf: |-
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
server: https://api.cluster.local:6443
|
||||
name: default
|
||||
contexts:
|
||||
- context:
|
||||
cluster: default
|
||||
namespace: kube-system
|
||||
user: coredns
|
||||
name: coredns
|
||||
current-context: coredns
|
||||
users:
|
||||
- name: coredns
|
||||
user:
|
||||
tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: coredns-local
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns-local
|
||||
kubernetes.io/name: CoreDNS
|
||||
spec:
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
minReadySeconds: 15
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-dns-local
|
||||
kubernetes.io/name: CoreDNS
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns-local
|
||||
kubernetes.io/name: CoreDNS
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "9153"
|
||||
spec:
|
||||
priorityClassName: system-node-critical
|
||||
serviceAccount: coredns
|
||||
serviceAccountName: coredns
|
||||
enableServiceLinks: false
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
- effect: NoSchedule
|
||||
key: node.cloudprovider.kubernetes.io/uninitialized
|
||||
operator: Exists
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: coredns
|
||||
image: coredns/coredns:1.9.4
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 64Mi
|
||||
args: [ "-conf", "/etc/coredns/Corefile.local" ]
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/coredns
|
||||
readOnly: true
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /health
|
||||
port: 8091
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
drop:
|
||||
- all
|
||||
readOnlyRootFilesystem: true
|
||||
dnsPolicy: Default
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: coredns-local
|
||||
11
proxmox/deployments/ingress-ns.yaml
Normal file
11
proxmox/deployments/ingress-ns.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: ingress-nginx
|
||||
labels:
|
||||
pod-security.kubernetes.io/enforce: baseline
|
||||
pod-security.kubernetes.io/enforce-version: latest
|
||||
pod-security.kubernetes.io/audit: baseline
|
||||
pod-security.kubernetes.io/audit-version: latest
|
||||
pod-security.kubernetes.io/warn: baseline
|
||||
pod-security.kubernetes.io/warn-version: latest
|
||||
467
proxmox/deployments/ingress-result.yaml
Normal file
467
proxmox/deployments/ingress-result.yaml
Normal file
@@ -0,0 +1,467 @@
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.6.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.7.0"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
automountServiceAccountToken: true
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-configmap.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.6.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.7.0"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
data:
|
||||
allow-snippet-annotations: "true"
|
||||
client-body-timeout: "30"
|
||||
client-header-timeout: "30"
|
||||
enable-access-log-for-default-backend: "true"
|
||||
error-log-level: "error"
|
||||
hsts: "true"
|
||||
hsts-include-subdomains: "true"
|
||||
hsts-max-age: "31536000"
|
||||
hsts-preload: "true"
|
||||
http-redirect-code: "301"
|
||||
limit-req-status-code: "429"
|
||||
log-format-escape-json: "true"
|
||||
log-format-upstream: "{\"ip\":\"$remote_addr\", \"ssl\":\"$ssl_protocol\", \"method\":\"$request_method\", \"proto\":\"$scheme\", \"host\":\"$host\", \"uri\":\"$request_uri\", \"status\":$status, \"size\":$bytes_sent, \"agent\":\"$http_user_agent\", \"referer\":\"$http_referer\", \"namespace\":\"$namespace\"}"
|
||||
proxy-connect-timeout: "10"
|
||||
proxy-headers-hash-bucket-size: "128"
|
||||
proxy-hide-headers: "strict-transport-security"
|
||||
proxy-read-timeout: "60"
|
||||
proxy-real-ip-cidr: "173.245.48.0/20,103.21.244.0/22,103.22.200.0/22,103.31.4.0/22,141.101.64.0/18,108.162.192.0/18,190.93.240.0/20,188.114.96.0/20,197.234.240.0/22,198.41.128.0/17,162.158.0.0/15,172.64.0.0/13,131.0.72.0/22,104.16.0.0/13,104.24.0.0/14"
|
||||
proxy-send-timeout: "60"
|
||||
server-name-hash-bucket-size: "64"
|
||||
server-name-hash-max-size: "512"
|
||||
server-tokens: "false"
|
||||
ssl-protocols: "TLSv1.3"
|
||||
upstream-keepalive-connections: "32"
|
||||
use-forwarded-headers: "true"
|
||||
use-geoip: "false"
|
||||
use-geoip2: "false"
|
||||
use-gzip: "true"
|
||||
worker-cpu-affinity: "auto"
|
||||
worker-processes: "auto"
|
||||
---
|
||||
# Source: ingress-nginx/templates/clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.6.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.7.0"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: ingress-nginx
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
- endpoints
|
||||
- nodes
|
||||
- pods
|
||||
- secrets
|
||||
- namespaces
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingressclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- discovery.k8s.io
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- get
|
||||
---
|
||||
# Source: ingress-nginx/templates/clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.6.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.7.0"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: ingress-nginx
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: ingress-nginx
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ingress-nginx
|
||||
namespace: "ingress-nginx"
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-role.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.6.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.7.0"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
- pods
|
||||
- secrets
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingressclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
resourceNames:
|
||||
- ingress-nginx-leader
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- discovery.k8s.io
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- get
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-rolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.6.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.7.0"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: ingress-nginx
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ingress-nginx
|
||||
namespace: "ingress-nginx"
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-service.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.6.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.7.0"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
type: ClusterIP
|
||||
clusterIP: None
|
||||
ipFamilyPolicy: RequireDualStack
|
||||
ipFamilies:
|
||||
- IPv4
|
||||
- IPv6
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
appProtocol: http
|
||||
- name: https
|
||||
port: 443
|
||||
protocol: TCP
|
||||
targetPort: https
|
||||
appProtocol: https
|
||||
selector:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-daemonset.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.6.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.7.0"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
revisionHistoryLimit: 2
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
minReadySeconds: 15
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/port: "10254"
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.6.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.7.0"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
spec:
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
containers:
|
||||
- name: controller
|
||||
image: "registry.k8s.io/ingress-nginx/controller:v1.7.0@sha256:7612338342a1e7b8090bef78f2a04fffcadd548ccaabe8a47bf7758ff549a5f7"
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /wait-shutdown
|
||||
args:
|
||||
- /nginx-ingress-controller
|
||||
- --election-id=ingress-nginx-leader
|
||||
- --controller-class=k8s.io/ingress-nginx
|
||||
- --ingress-class=nginx
|
||||
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
runAsUser: 101
|
||||
allowPrivilegeEscalation: true
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: LD_PRELOAD
|
||||
value: /usr/local/lib/libmimalloc.so
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
livenessProbe:
|
||||
failureThreshold: 5
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 30
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
readinessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 30
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
protocol: TCP
|
||||
- name: https
|
||||
containerPort: 443
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1
|
||||
memory: 1Gi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
hostNetwork: true
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: project.io/node-pool
|
||||
operator: In
|
||||
values:
|
||||
- web
|
||||
serviceAccountName: ingress-nginx
|
||||
terminationGracePeriodSeconds: 300
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-ingressclass.yaml
|
||||
# We don't support namespaced ingressClass yet
|
||||
# So a ClusterRole and a ClusterRoleBinding is required
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: IngressClass
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.6.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.7.0"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: nginx
|
||||
spec:
|
||||
controller: k8s.io/ingress-nginx
|
||||
116
proxmox/deployments/ingress.yaml
Normal file
116
proxmox/deployments/ingress.yaml
Normal file
@@ -0,0 +1,116 @@
|
||||
|
||||
controller:
|
||||
kind: DaemonSet
|
||||
|
||||
hostNetwork: true
|
||||
hostPort:
|
||||
enabled: false
|
||||
ports:
|
||||
http: 80
|
||||
https: 443
|
||||
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
|
||||
publishService:
|
||||
enabled: false
|
||||
|
||||
config:
|
||||
worker-processes: "auto"
|
||||
worker-cpu-affinity: "auto"
|
||||
error-log-level: "error"
|
||||
|
||||
server-tokens: "false"
|
||||
http-redirect-code: "301"
|
||||
|
||||
use-gzip: "true"
|
||||
use-geoip: "false"
|
||||
use-geoip2: "false"
|
||||
|
||||
use-forwarded-headers: "true"
|
||||
# curl https://www.cloudflare.com/ips-v4 2>/dev/null | tr '\n' ','
|
||||
proxy-real-ip-cidr: "173.245.48.0/20,103.21.244.0/22,103.22.200.0/22,103.31.4.0/22,141.101.64.0/18,108.162.192.0/18,190.93.240.0/20,188.114.96.0/20,197.234.240.0/22,198.41.128.0/17,162.158.0.0/15,172.64.0.0/13,131.0.72.0/22,104.16.0.0/13,104.24.0.0/14"
|
||||
|
||||
enable-access-log-for-default-backend: "true"
|
||||
log-format-escape-json: "true"
|
||||
log-format-upstream: '{"ip":"$remote_addr", "ssl":"$ssl_protocol", "method":"$request_method", "proto":"$scheme", "host":"$host", "uri":"$request_uri", "status":$status, "size":$bytes_sent, "agent":"$http_user_agent", "referer":"$http_referer", "namespace":"$namespace"}'
|
||||
|
||||
upstream-keepalive-connections: "32"
|
||||
proxy-connect-timeout: "10"
|
||||
proxy-read-timeout: "60"
|
||||
proxy-send-timeout: "60"
|
||||
|
||||
ssl-protocols: "TLSv1.3"
|
||||
hsts: "true"
|
||||
hsts-max-age: "31536000"
|
||||
hsts-include-subdomains: "true"
|
||||
hsts-preload: "true"
|
||||
proxy-hide-headers: "strict-transport-security"
|
||||
proxy-headers-hash-bucket-size: "128"
|
||||
|
||||
server-name-hash-bucket-size: "64"
|
||||
server-name-hash-max-size: "512"
|
||||
|
||||
limit-req-status-code: "429"
|
||||
|
||||
client-header-timeout: "30"
|
||||
client-body-timeout: "30"
|
||||
|
||||
minReadySeconds: 15
|
||||
|
||||
podAnnotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "10254"
|
||||
|
||||
extraEnvs:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
|
||||
livenessProbe:
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 30
|
||||
readinessProbe:
|
||||
periodSeconds: 30
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1
|
||||
memory: 1Gi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: project.io/node-pool
|
||||
operator: In
|
||||
values:
|
||||
- web
|
||||
|
||||
service:
|
||||
enabled: true
|
||||
type: ClusterIP
|
||||
clusterIP: None
|
||||
ipFamilyPolicy: "RequireDualStack"
|
||||
ipFamilies:
|
||||
- IPv4
|
||||
- IPv6
|
||||
|
||||
admissionWebhooks:
|
||||
enabled: false
|
||||
metrics:
|
||||
enabled: false
|
||||
|
||||
revisionHistoryLimit: 2
|
||||
|
||||
defaultBackend:
|
||||
enabled: false
|
||||
@@ -1,78 +1,131 @@
|
||||
|
||||
# resource "null_resource" "controlplane_machineconfig" {
|
||||
# count = lookup(var.controlplane, "count", 0)
|
||||
# connection {
|
||||
# type = "ssh"
|
||||
# user = "root"
|
||||
# host = var.proxmox_host
|
||||
# }
|
||||
locals {
|
||||
controlplane_prefix = "controlplane"
|
||||
controlplanes = { for k in flatten([
|
||||
for zone in local.zones : [
|
||||
for inx in range(lookup(try(var.controlplane[zone], {}), "count", 0)) : {
|
||||
id : lookup(try(var.controlplane[zone], {}), "id", 9000) + inx
|
||||
name : "${local.controlplane_prefix}-${lower(substr(zone, -1, -1))}${1 + inx}"
|
||||
zone : zone
|
||||
node_name : zone
|
||||
cpu : lookup(try(var.controlplane[zone], {}), "cpu", 1)
|
||||
mem : lookup(try(var.controlplane[zone], {}), "mem", 2048)
|
||||
ipv4 : "${cidrhost(local.controlplane_subnet, index(local.zones, zone) + inx)}/24"
|
||||
gwv4 : local.gwv4
|
||||
}
|
||||
]
|
||||
]) : k.name => k }
|
||||
}
|
||||
|
||||
# provisioner "file" {
|
||||
# content = templatefile("${path.module}/templates/controlplane.yaml",
|
||||
# merge(var.kubernetes, {
|
||||
# name = "controlplane-${count.index + 1}"
|
||||
# type = "controlplane"
|
||||
# ipv4_local = "192.168.10.11"
|
||||
# ipv4_vip = "192.168.10.10"
|
||||
# nodeSubnets = "${var.vpc_main_cidr}"
|
||||
# })
|
||||
# )
|
||||
resource "null_resource" "controlplane_metadata" {
|
||||
for_each = local.controlplanes
|
||||
connection {
|
||||
type = "ssh"
|
||||
user = "root"
|
||||
host = "${each.value.node_name}.${var.proxmox_domain}"
|
||||
}
|
||||
|
||||
# destination = "/var/lib/vz/snippets/controlplane-${count.index + 1}.yml"
|
||||
# }
|
||||
# }
|
||||
provisioner "file" {
|
||||
content = templatefile("${path.module}/templates/metadata.yaml", {
|
||||
hostname : each.value.name,
|
||||
id : each.value.id,
|
||||
type : "qemu",
|
||||
zone : each.value.zone,
|
||||
region : var.region,
|
||||
})
|
||||
destination = "/var/lib/vz/snippets/${each.value.name}.metadata.yaml"
|
||||
}
|
||||
|
||||
# resource "proxmox_vm_qemu" "controlplane" {
|
||||
# count = lookup(var.controlplane, "count", 0)
|
||||
# name = "controlplane-${count.index + 1}"
|
||||
# target_node = var.proxmox_nodename
|
||||
# clone = var.proxmox_image
|
||||
triggers = {
|
||||
params = join(",", [for k, v in local.controlplanes[each.key] : "${k}-${v}"])
|
||||
}
|
||||
}
|
||||
|
||||
# # preprovision = false
|
||||
# define_connection_info = false
|
||||
# os_type = "ubuntu"
|
||||
# ipconfig0 = "ip=${cidrhost(var.vpc_main_cidr, 11 + count.index)}/24,gw=${local.gwv4}"
|
||||
# cicustom = "user=local:snippets/controlplane-${count.index + 1}.yml"
|
||||
# cloudinit_cdrom_storage = var.proxmox_storage
|
||||
resource "proxmox_vm_qemu" "controlplane" {
|
||||
for_each = local.controlplanes
|
||||
name = each.value.name
|
||||
vmid = each.value.id
|
||||
target_node = each.value.node_name
|
||||
clone = var.proxmox_image
|
||||
|
||||
# onboot = false
|
||||
# cpu = "host,flags=+aes"
|
||||
# cores = 2
|
||||
# sockets = 1
|
||||
# memory = 2048
|
||||
# scsihw = "virtio-scsi-pci"
|
||||
agent = 0
|
||||
define_connection_info = false
|
||||
os_type = "ubuntu"
|
||||
qemu_os = "l26"
|
||||
ipconfig0 = "ip6=auto"
|
||||
ipconfig1 = "ip=${each.value.ipv4},gw=${each.value.gwv4}"
|
||||
cicustom = "meta=local:snippets/${each.value.name}.metadata.yaml"
|
||||
cloudinit_cdrom_storage = var.proxmox_storage
|
||||
|
||||
# vga {
|
||||
# memory = 0
|
||||
# type = "serial0"
|
||||
# }
|
||||
# serial {
|
||||
# id = 0
|
||||
# type = "socket"
|
||||
# }
|
||||
onboot = false
|
||||
cpu = "host,flags=+aes"
|
||||
sockets = 1
|
||||
cores = each.value.cpu
|
||||
memory = each.value.mem
|
||||
scsihw = "virtio-scsi-pci"
|
||||
|
||||
# network {
|
||||
# model = "virtio"
|
||||
# bridge = var.proxmox_bridge
|
||||
# firewall = false
|
||||
# }
|
||||
vga {
|
||||
memory = 0
|
||||
type = "serial0"
|
||||
}
|
||||
serial {
|
||||
id = 0
|
||||
type = "socket"
|
||||
}
|
||||
|
||||
# boot = "order=scsi0"
|
||||
# disk {
|
||||
# type = "scsi"
|
||||
# storage = var.proxmox_storage
|
||||
# size = "16G"
|
||||
# cache = "writethrough"
|
||||
# ssd = 1
|
||||
# backup = 0
|
||||
# }
|
||||
network {
|
||||
model = "virtio"
|
||||
bridge = "vmbr0"
|
||||
firewall = true
|
||||
}
|
||||
network {
|
||||
model = "virtio"
|
||||
bridge = "vmbr1"
|
||||
}
|
||||
|
||||
# lifecycle {
|
||||
# ignore_changes = [
|
||||
# desc,
|
||||
# define_connection_info,
|
||||
# ]
|
||||
# }
|
||||
boot = "order=scsi0"
|
||||
disk {
|
||||
type = "scsi"
|
||||
storage = var.proxmox_storage
|
||||
size = "32G"
|
||||
cache = "writethrough"
|
||||
ssd = 1
|
||||
backup = false
|
||||
}
|
||||
|
||||
# depends_on = [null_resource.controlplane_machineconfig]
|
||||
# }
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
boot,
|
||||
network,
|
||||
desc,
|
||||
numa,
|
||||
agent,
|
||||
ipconfig0,
|
||||
ipconfig1,
|
||||
define_connection_info,
|
||||
]
|
||||
}
|
||||
|
||||
depends_on = [null_resource.controlplane_metadata]
|
||||
}
|
||||
|
||||
resource "local_file" "controlplane" {
|
||||
for_each = local.controlplanes
|
||||
content = templatefile("${path.module}/templates/controlplane.yaml.tpl",
|
||||
merge(var.kubernetes, {
|
||||
name = each.value.name
|
||||
ipv4_vip = local.ipv4_vip
|
||||
nodeSubnets = local.controlplane_subnet
|
||||
})
|
||||
)
|
||||
filename = "_cfgs/${each.value.name}.yaml"
|
||||
file_permission = "0600"
|
||||
}
|
||||
|
||||
resource "null_resource" "controlplane" {
|
||||
for_each = local.controlplanes
|
||||
provisioner "local-exec" {
|
||||
command = "sleep 60 && talosctl apply-config --insecure --nodes ${each.value.ipv4} --config-patch @_cfgs/${each.value.name}.yaml --file _cfgs/controlplane.yaml"
|
||||
}
|
||||
depends_on = [proxmox_vm_qemu.controlplane, local_file.controlplane]
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
|
||||
locals {
|
||||
gwv4 = cidrhost(var.vpc_main_cidr, 1)
|
||||
lbv4_local = cidrhost(var.vpc_main_cidr, 10)
|
||||
gwv4 = cidrhost(var.vpc_main_cidr, 1)
|
||||
ipv4_vip = cidrhost(var.vpc_main_cidr, 10)
|
||||
}
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
|
||||
locals {
|
||||
zones = [for k, v in var.instances : k]
|
||||
subnets = { for inx, zone in local.zones : zone => cidrsubnet(var.vpc_main_cidr, 5, var.network_shift + inx) }
|
||||
zones = [for k, v in var.instances : k]
|
||||
|
||||
controlplane_subnet = cidrsubnet(var.vpc_main_cidr, 5, var.network_shift)
|
||||
subnets = { for inx, zone in local.zones : zone => cidrsubnet(var.vpc_main_cidr, 5, var.network_shift + inx + 1) }
|
||||
}
|
||||
|
||||
@@ -1,29 +1,25 @@
|
||||
version: v1alpha1
|
||||
debug: false
|
||||
persist: true
|
||||
machine:
|
||||
type: ${type}
|
||||
certSANs:
|
||||
- "${ipv4_local}"
|
||||
- "${ipv4_vip}"
|
||||
kubelet:
|
||||
extraArgs:
|
||||
rotate-server-certificates: true
|
||||
clusterDNS:
|
||||
- 169.254.2.53
|
||||
- ${cidrhost(split(",",serviceSubnets)[0], 10)}
|
||||
nodeIP:
|
||||
validSubnets: ${format("%#v",split(",",nodeSubnets))}
|
||||
network:
|
||||
hostname: "${name}"
|
||||
interfaces:
|
||||
- interface: eth0
|
||||
dhcp: true
|
||||
- interface: eth1
|
||||
vip:
|
||||
ip: ${ipv4_vip}
|
||||
- interface: dummy0
|
||||
addresses:
|
||||
- 169.254.2.53/32
|
||||
- fd00::169:254:2:53/128
|
||||
install:
|
||||
wipe: false
|
||||
extraHostEntries:
|
||||
- ip: 127.0.0.1
|
||||
aliases:
|
||||
- ${apiDomain}
|
||||
sysctls:
|
||||
net.core.somaxconn: 65535
|
||||
net.core.netdev_max_backlog: 4096
|
||||
@@ -46,20 +42,31 @@ machine:
|
||||
slot: 0
|
||||
cluster:
|
||||
controlPlane:
|
||||
endpoint: https://${ipv4_vip}:6443
|
||||
endpoint: https://${apiDomain}:6443
|
||||
network:
|
||||
dnsDomain: ${domain}
|
||||
podSubnets: ${format("%#v",split(",",podSubnets))}
|
||||
serviceSubnets: ${format("%#v",split(",",serviceSubnets))}
|
||||
# proxy:
|
||||
# disabled: true
|
||||
apiServer:
|
||||
certSANs:
|
||||
- "${ipv4_local}"
|
||||
- "${ipv4_vip}"
|
||||
cni:
|
||||
name: custom
|
||||
urls:
|
||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/proxmox/deployments/cilium-result.yaml
|
||||
proxy:
|
||||
disabled: true
|
||||
controllerManager:
|
||||
extraArgs:
|
||||
node-cidr-mask-size-ipv4: 24
|
||||
node-cidr-mask-size-ipv6: 112
|
||||
scheduler: {}
|
||||
etcd: {}
|
||||
etcd:
|
||||
advertisedSubnets:
|
||||
- ${nodeSubnets}
|
||||
listenSubnets:
|
||||
- ${nodeSubnets}
|
||||
externalCloudProvider:
|
||||
enabled: true
|
||||
manifests:
|
||||
- https://raw.githubusercontent.com/siderolabs/talos-cloud-controller-manager/main/docs/deploy/cloud-controller-manager.yml
|
||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/hetzner/deployments/metrics-server.yaml
|
||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/hetzner/deployments/coredns-local.yaml
|
||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/hetzner/deployments/ingress-ns.yaml
|
||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/hetzner/deployments/ingress-result.yaml
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
machine:
|
||||
kubelet:
|
||||
extraArgs:
|
||||
cloud-provider: external
|
||||
rotate-server-certificates: true
|
||||
clusterDNS:
|
||||
- 169.254.2.53
|
||||
- ${cidrhost(split(",",serviceSubnets)[0], 10)}
|
||||
network:
|
||||
interfaces:
|
||||
- interface: dummy0
|
||||
addresses:
|
||||
- 169.254.2.53/32
|
||||
extraHostEntries:
|
||||
- ip: ${lbv4}
|
||||
aliases:
|
||||
- ${apiDomain}
|
||||
sysctls:
|
||||
net.core.somaxconn: 65535
|
||||
net.core.netdev_max_backlog: 4096
|
||||
cluster:
|
||||
proxy:
|
||||
disabled: true
|
||||
@@ -1,41 +1,44 @@
|
||||
version: v1alpha1
|
||||
debug: false
|
||||
persist: true
|
||||
machine:
|
||||
type: worker
|
||||
token: ${tokenMachine}
|
||||
ca:
|
||||
crt: ${caMachine}
|
||||
kubelet:
|
||||
extraArgs:
|
||||
cloud-provider: external
|
||||
rotate-server-certificates: true
|
||||
node-labels: "${labels}"
|
||||
nodeIP:
|
||||
validSubnets: ${format("%#v",split(",",nodeSubnets))}
|
||||
clusterDNS:
|
||||
- 169.254.2.53
|
||||
- ${cidrhost(split(",",serviceSubnets)[0], 10)}
|
||||
nodeIP:
|
||||
validSubnets: ${format("%#v",split(",",nodeSubnets))}
|
||||
network:
|
||||
hostname: "${name}"
|
||||
interfaces:
|
||||
- interface: dummy0
|
||||
addresses:
|
||||
- 169.254.2.53/32
|
||||
- fd00::169:254:2:53/128
|
||||
extraHostEntries:
|
||||
- ip: ${lbv4}
|
||||
aliases:
|
||||
- ${apiDomain}
|
||||
sysctls:
|
||||
net.core.somaxconn: 65535
|
||||
net.core.netdev_max_backlog: 4096
|
||||
install:
|
||||
wipe: false
|
||||
systemDiskEncryption:
|
||||
state:
|
||||
provider: luks2
|
||||
options:
|
||||
- no_read_workqueue
|
||||
- no_write_workqueue
|
||||
keys:
|
||||
- nodeID: {}
|
||||
slot: 0
|
||||
ephemeral:
|
||||
provider: luks2
|
||||
options:
|
||||
- no_read_workqueue
|
||||
- no_write_workqueue
|
||||
keys:
|
||||
- nodeID: {}
|
||||
slot: 0
|
||||
cluster:
|
||||
controlPlane:
|
||||
endpoint: https://${lbv4}:6443
|
||||
clusterName: ${clusterName}
|
||||
network:
|
||||
dnsDomain: ${domain}
|
||||
serviceSubnets: ${format("%#v",split(",",serviceSubnets))}
|
||||
# proxy:
|
||||
# disabled: true
|
||||
token: ${token}
|
||||
ca:
|
||||
crt: ${ca}
|
||||
endpoint: https://${apiDomain}:6443
|
||||
proxy:
|
||||
disabled: true
|
||||
|
||||
@@ -48,6 +48,7 @@ variable "kubernetes" {
|
||||
default = {
|
||||
podSubnets = "10.32.0.0/12,fd40:10:32::/102"
|
||||
serviceSubnets = "10.200.0.0/22,fd40:10:200::/112"
|
||||
nodeSubnets = "192.168.0.0/16"
|
||||
domain = "cluster.local"
|
||||
apiDomain = "api.cluster.local"
|
||||
clusterName = "talos-k8s-proxmox"
|
||||
|
||||
Reference in New Issue
Block a user