mirror of
				https://github.com/optim-enterprises-bv/terraform-talos.git
				synced 2025-10-31 02:08:32 +00:00 
			
		
		
		
	controlplane refactoring
This commit is contained in:
		| @@ -1,7 +1,11 @@ | ||||
|  | ||||
| CLUSTERNAME := "talos-k8s-azure" | ||||
| CPFIRST := ${shell terraform output -raw controlplane_endpoint 2>/dev/null} | ||||
| ENDPOINT ?= $(shell terraform output -no-color -raw controlplane_endpoint_public 2>/dev/null) | ||||
| ifneq (,$(findstring Warning,${ENDPOINT})) | ||||
| ENDPOINT := 127.0.0.1 | ||||
| ENDPOINT := api.cluster.local | ||||
| else ifeq (,$(ENDPOINT)) | ||||
| ENDPOINT := api.cluster.local | ||||
| endif | ||||
|  | ||||
| help: | ||||
| @@ -12,14 +16,12 @@ clean: | ||||
| 	rm -f kubeconfig talosctl | ||||
|  | ||||
| create-config: ## Genereate talos configs | ||||
| 	talosctl gen config --output-dir _cfgs --with-docs=false --with-examples=false talos-k8s-azure https://${ENDPOINT}:6443 | ||||
| 	talosctl gen config --output-dir _cfgs --with-docs=false --with-examples=false ${CLUSTERNAME} https://${ENDPOINT}:6443 | ||||
| 	talosctl --talosconfig _cfgs/talosconfig config endpoint ${ENDPOINT} | ||||
|  | ||||
| create-templates: | ||||
| 	@yq ea -P '. as $$item ireduce ({}; . * $$item )' _cfgs/controlplane.yaml templates/controlplane.yaml.tpl > templates/controlplane.yaml | ||||
| 	@echo 'podSubnets: "10.32.0.0/12,fd00:10:32::/102"'        >  _cfgs/tfstate.vars | ||||
| 	@echo 'serviceSubnets: "10.200.0.0/22,fd40:10:200::/112"'  >> _cfgs/tfstate.vars | ||||
| 	@echo 'nodeSubnets: "172.16.0.0/12"'                       >> _cfgs/tfstate.vars | ||||
| 	@echo 'apiDomain: api.cluster.local'                       >> _cfgs/tfstate.vars | ||||
| 	@yq eval '.cluster.network.dnsDomain' _cfgs/controlplane.yaml | awk '{ print "domain: "$$1}'       >> _cfgs/tfstate.vars | ||||
| 	@yq eval '.cluster.clusterName' _cfgs/controlplane.yaml       | awk '{ print "clusterName: "$$1}'  >> _cfgs/tfstate.vars | ||||
| @@ -33,23 +35,21 @@ create-templates: | ||||
| 	@yq eval -o=json '{"kubernetes": .}' _cfgs/tfstate.vars > terraform.tfvars.json | ||||
|  | ||||
| create-deployments: | ||||
| 	helm template --namespace=kube-system   --version=1.12.7 -f deployments/cilium.yaml cilium \ | ||||
| 		cilium/cilium > deployments/cilium-result.yaml | ||||
| 	helm template --namespace=kube-system -f deployments/azure-autoscaler.yaml cluster-autoscaler-azure \ | ||||
| 		autoscaler/cluster-autoscaler > deployments/azure-autoscaler-result.yaml | ||||
| 	helm template --namespace=ingress-nginx --version=4.4.2 -f deployments/ingress.yaml ingress-nginx \ | ||||
| 		ingress-nginx/ingress-nginx > deployments/ingress-result.yaml | ||||
|  | ||||
| create-network: ## Create networks | ||||
| 	cd prepare && terraform init && terraform apply -auto-approve | ||||
|  | ||||
| create-controlplane-bootstrap: | ||||
| 	talosctl --talosconfig _cfgs/talosconfig config endpoint ${CPFIRST} | ||||
| 	talosctl --talosconfig _cfgs/talosconfig --nodes ${CPFIRST} bootstrap | ||||
|  | ||||
| create-controlplane: ## Bootstrap controlplane | ||||
| 	terraform apply -target=module.controlplane | ||||
| 	talosctl --talosconfig _cfgs/talosconfig config endpoint ${ENDPOINT} | ||||
| 	talosctl --talosconfig _cfgs/talosconfig --nodes 172.16.136.11 bootstrap | ||||
| 	terraform apply -target=azurerm_linux_virtual_machine.controlplane | ||||
|  | ||||
| create-kubeconfig: ## Download kubeconfig | ||||
| 	talosctl --talosconfig _cfgs/talosconfig --nodes 172.16.136.11 kubeconfig . | ||||
| 	talosctl --talosconfig _cfgs/talosconfig --nodes ${CPFIRST} kubeconfig . | ||||
| 	kubectl --kubeconfig=kubeconfig config set clusters.talos-k8s-azure.server https://${ENDPOINT}:6443 | ||||
| 	kubectl --kubeconfig=kubeconfig config set-context --current --namespace=kube-system | ||||
|  | ||||
|   | ||||
| @@ -1,32 +1,14 @@ | ||||
|  | ||||
| # data "azurerm_image" "talos" { | ||||
| #   for_each            = { for idx, name in local.regions : name => idx } | ||||
| #   name                = "talos-amd64-${each.key}" | ||||
| #   resource_group_name = local.resource_group | ||||
| # } | ||||
|  | ||||
| # data "azurerm_shared_image" "talos" { | ||||
| #   name                = "talos-arm64" | ||||
| #   gallery_name        = var.gallery_name | ||||
| #   resource_group_name = local.resource_group | ||||
| # } | ||||
| data "azurerm_client_config" "terraform" {} | ||||
|  | ||||
| data "azurerm_shared_image_version" "talos" { | ||||
|   for_each            = toset(var.arch) | ||||
|   name                = "latest" | ||||
|   image_name          = "talos-x64" | ||||
|   image_name          = "talos-${lower(each.key)}" | ||||
|   gallery_name        = var.gallery_name | ||||
|   resource_group_name = local.resource_group | ||||
| } | ||||
|  | ||||
| # data "azurerm_shared_image_version" "talos_arm" { | ||||
| #   name                = "latest" | ||||
| #   image_name          = "talos-arm64" | ||||
| #   gallery_name        = var.gallery_name | ||||
| #   resource_group_name = local.resource_group | ||||
| # } | ||||
|  | ||||
| data "azurerm_client_config" "terraform" {} | ||||
|  | ||||
| resource "azurerm_proximity_placement_group" "common" { | ||||
|   for_each            = { for idx, name in local.regions : name => idx } | ||||
|   location            = each.key | ||||
|   | ||||
| @@ -1,891 +0,0 @@ | ||||
| --- | ||||
| # Source: cilium/templates/cilium-agent/serviceaccount.yaml | ||||
| apiVersion: v1 | ||||
| kind: ServiceAccount | ||||
| metadata: | ||||
|   name: "cilium" | ||||
|   namespace: kube-system | ||||
| --- | ||||
| # Source: cilium/templates/cilium-operator/serviceaccount.yaml | ||||
| apiVersion: v1 | ||||
| kind: ServiceAccount | ||||
| metadata: | ||||
|   name: "cilium-operator" | ||||
|   namespace: kube-system | ||||
| --- | ||||
| # Source: cilium/templates/cilium-configmap.yaml | ||||
| apiVersion: v1 | ||||
| kind: ConfigMap | ||||
| metadata: | ||||
|   name: cilium-config | ||||
|   namespace: kube-system | ||||
| data: | ||||
|  | ||||
|   # Identity allocation mode selects how identities are shared between cilium | ||||
|   # nodes by setting how they are stored. The options are "crd" or "kvstore". | ||||
|   # - "crd" stores identities in kubernetes as CRDs (custom resource definition). | ||||
|   #   These can be queried with: | ||||
|   #     kubectl get ciliumid | ||||
|   # - "kvstore" stores identities in an etcd kvstore, that is | ||||
|   #   configured below. Cilium versions before 1.6 supported only the kvstore | ||||
|   #   backend. Upgrades from these older cilium versions should continue using | ||||
|   #   the kvstore by commenting out the identity-allocation-mode below, or | ||||
|   #   setting it to "kvstore". | ||||
|   identity-allocation-mode: crd | ||||
|   cilium-endpoint-gc-interval: "5m0s" | ||||
|   nodes-gc-interval: "5m0s" | ||||
|   skip-cnp-status-startup-clean: "false" | ||||
|   # Disable the usage of CiliumEndpoint CRD | ||||
|   disable-endpoint-crd: "false" | ||||
|  | ||||
|   # If you want to run cilium in debug mode change this value to true | ||||
|   debug: "false" | ||||
|   # The agent can be put into the following three policy enforcement modes | ||||
|   # default, always and never. | ||||
|   # https://docs.cilium.io/en/latest/policy/intro/#policy-enforcement-modes | ||||
|   enable-policy: "default" | ||||
|   # If you want metrics enabled in all of your Cilium agents, set the port for | ||||
|   # which the Cilium agents will have their metrics exposed. | ||||
|   # This option deprecates the "prometheus-serve-addr" in the | ||||
|   # "cilium-metrics-config" ConfigMap | ||||
|   # NOTE that this will open the port on ALL nodes where Cilium pods are | ||||
|   # scheduled. | ||||
|   prometheus-serve-addr: ":9962" | ||||
|   # Port to expose Envoy metrics (e.g. "9964"). Envoy metrics listener will be disabled if this | ||||
|   # field is not set. | ||||
|   proxy-prometheus-port: "9964" | ||||
|  | ||||
|   # Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4 | ||||
|   # address. | ||||
|   enable-ipv4: "true" | ||||
|  | ||||
|   # Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6 | ||||
|   # address. | ||||
|   enable-ipv6: "true" | ||||
|   # Users who wish to specify their own custom CNI configuration file must set | ||||
|   # custom-cni-conf to "true", otherwise Cilium may overwrite the configuration. | ||||
|   custom-cni-conf: "false" | ||||
|   enable-bpf-clock-probe: "true" | ||||
|   # If you want cilium monitor to aggregate tracing for packets, set this level | ||||
|   # to "low", "medium", or "maximum". The higher the level, the less packets | ||||
|   # that will be seen in monitor output. | ||||
|   monitor-aggregation: medium | ||||
|  | ||||
|   # The monitor aggregation interval governs the typical time between monitor | ||||
|   # notification events for each allowed connection. | ||||
|   # | ||||
|   # Only effective when monitor aggregation is set to "medium" or higher. | ||||
|   monitor-aggregation-interval: 5s | ||||
|  | ||||
|   # The monitor aggregation flags determine which TCP flags which, upon the | ||||
|   # first observation, cause monitor notifications to be generated. | ||||
|   # | ||||
|   # Only effective when monitor aggregation is set to "medium" or higher. | ||||
|   monitor-aggregation-flags: all | ||||
|   # Specifies the ratio (0.0-1.0) of total system memory to use for dynamic | ||||
|   # sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps. | ||||
|   bpf-map-dynamic-size-ratio: "0.0025" | ||||
|   # bpf-policy-map-max specifies the maximum number of entries in endpoint | ||||
|   # policy map (per endpoint) | ||||
|   bpf-policy-map-max: "16384" | ||||
|   # bpf-lb-map-max specifies the maximum number of entries in bpf lb service, | ||||
|   # backend and affinity maps. | ||||
|   bpf-lb-map-max: "65536" | ||||
|   # bpf-lb-bypass-fib-lookup instructs Cilium to enable the FIB lookup bypass | ||||
|   # optimization for nodeport reverse NAT handling. | ||||
|   bpf-lb-external-clusterip: "false" | ||||
|  | ||||
|   # Pre-allocation of map entries allows per-packet latency to be reduced, at | ||||
|   # the expense of up-front memory allocation for the entries in the maps. The | ||||
|   # default value below will minimize memory usage in the default installation; | ||||
|   # users who are sensitive to latency may consider setting this to "true". | ||||
|   # | ||||
|   # This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore | ||||
|   # this option and behave as though it is set to "true". | ||||
|   # | ||||
|   # If this value is modified, then during the next Cilium startup the restore | ||||
|   # of existing endpoints and tracking of ongoing connections may be disrupted. | ||||
|   # As a result, reply packets may be dropped and the load-balancing decisions | ||||
|   # for established connections may change. | ||||
|   # | ||||
|   # If this option is set to "false" during an upgrade from 1.3 or earlier to | ||||
|   # 1.4 or later, then it may cause one-time disruptions during the upgrade. | ||||
|   preallocate-bpf-maps: "false" | ||||
|  | ||||
|   # Regular expression matching compatible Istio sidecar istio-proxy | ||||
|   # container image names | ||||
|   sidecar-istio-proxy-image: "cilium/istio_proxy" | ||||
|  | ||||
|   # Name of the cluster. Only relevant when building a mesh of clusters. | ||||
|   cluster-name: default | ||||
|   # Unique ID of the cluster. Must be unique across all conneted clusters and | ||||
|   # in the range of 1 and 255. Only relevant when building a mesh of clusters. | ||||
|   cluster-id: "0" | ||||
|  | ||||
|   # Encapsulation mode for communication between nodes | ||||
|   # Possible values: | ||||
|   #   - disabled | ||||
|   #   - vxlan (default) | ||||
|   #   - geneve | ||||
|   tunnel: "vxlan" | ||||
|   # Enables L7 proxy for L7 policy enforcement and visibility | ||||
|   enable-l7-proxy: "true" | ||||
|  | ||||
|   enable-ipv4-masquerade: "true" | ||||
|   enable-ipv6-masquerade: "true" | ||||
|   enable-bpf-masquerade: "false" | ||||
|  | ||||
|   enable-xt-socket-fallback: "true" | ||||
|   install-iptables-rules: "true" | ||||
|   install-no-conntrack-iptables-rules: "false" | ||||
|  | ||||
|   auto-direct-node-routes: "false" | ||||
|   enable-local-redirect-policy: "true" | ||||
|   enable-host-firewall: "true" | ||||
|   # List of devices used to attach bpf_host.o (implements BPF NodePort, | ||||
|   # host-firewall and BPF masquerading) | ||||
|   devices: "eth+ wg+" | ||||
|  | ||||
|   kube-proxy-replacement: "strict" | ||||
|   kube-proxy-replacement-healthz-bind-address: "" | ||||
|   bpf-lb-sock: "false" | ||||
|   host-reachable-services-protos:  | ||||
|   enable-health-check-nodeport: "true" | ||||
|   node-port-bind-protection: "true" | ||||
|   enable-auto-protect-node-port-range: "true" | ||||
|   enable-svc-source-range-check: "true" | ||||
|   enable-l2-neigh-discovery: "true" | ||||
|   arping-refresh-period: "30s" | ||||
|   k8s-require-ipv4-pod-cidr: "true" | ||||
|   k8s-require-ipv6-pod-cidr: "true" | ||||
|   enable-endpoint-health-checking: "true" | ||||
|   enable-health-checking: "true" | ||||
|   enable-well-known-identities: "false" | ||||
|   enable-remote-node-identity: "true" | ||||
|   synchronize-k8s-nodes: "true" | ||||
|   operator-api-serve-addr: "127.0.0.1:9234" | ||||
|   ipam: "kubernetes" | ||||
|   disable-cnp-status-updates: "true" | ||||
|   enable-vtep: "false" | ||||
|   vtep-endpoint: "" | ||||
|   vtep-cidr: "" | ||||
|   vtep-mask: "" | ||||
|   vtep-mac: "" | ||||
|   enable-k8s-endpoint-slice: "true" | ||||
|   enable-bgp-control-plane: "false" | ||||
|   bpf-root: "/sys/fs/bpf" | ||||
|   cgroup-root: "/sys/fs/cgroup" | ||||
|   enable-k8s-terminating-endpoint: "true" | ||||
|   remove-cilium-node-taints: "true" | ||||
|   set-cilium-is-up-condition: "true" | ||||
|   unmanaged-pod-watcher-interval: "15" | ||||
|   tofqdns-dns-reject-response-code: "refused" | ||||
|   tofqdns-enable-dns-compression: "true" | ||||
|   tofqdns-endpoint-max-ip-per-hostname: "50" | ||||
|   tofqdns-idle-connection-grace-period: "0s" | ||||
|   tofqdns-max-deferred-connection-deletes: "10000" | ||||
|   tofqdns-min-ttl: "3600" | ||||
|   tofqdns-proxy-response-max-delay: "100ms" | ||||
|    | ||||
|   mtu: "1420" | ||||
|   agent-not-ready-taint-key: "node.cilium.io/agent-not-ready" | ||||
| --- | ||||
| # Source: cilium/templates/cilium-agent/clusterrole.yaml | ||||
| apiVersion: rbac.authorization.k8s.io/v1 | ||||
| kind: ClusterRole | ||||
| metadata: | ||||
|   name: cilium | ||||
| rules: | ||||
| - apiGroups: | ||||
|   - networking.k8s.io | ||||
|   resources: | ||||
|   - networkpolicies | ||||
|   verbs: | ||||
|   - get | ||||
|   - list | ||||
|   - watch | ||||
| - apiGroups: | ||||
|   - discovery.k8s.io | ||||
|   resources: | ||||
|   - endpointslices | ||||
|   verbs: | ||||
|   - get | ||||
|   - list | ||||
|   - watch | ||||
| - apiGroups: | ||||
|   - "" | ||||
|   resources: | ||||
|   - namespaces | ||||
|   - services | ||||
|   - pods | ||||
|   - endpoints | ||||
|   - nodes | ||||
|   verbs: | ||||
|   - get | ||||
|   - list | ||||
|   - watch | ||||
| - apiGroups: | ||||
|   - apiextensions.k8s.io | ||||
|   resources: | ||||
|   - customresourcedefinitions | ||||
|   verbs: | ||||
|   - list | ||||
|   - watch | ||||
|   # This is used when validating policies in preflight. This will need to stay | ||||
|   # until we figure out how to avoid "get" inside the preflight, and then | ||||
|   # should be removed ideally. | ||||
|   - get | ||||
| - apiGroups: | ||||
|   - cilium.io | ||||
|   resources: | ||||
|   - ciliumbgploadbalancerippools | ||||
|   - ciliumbgppeeringpolicies | ||||
|   - ciliumclusterwideenvoyconfigs | ||||
|   - ciliumclusterwidenetworkpolicies | ||||
|   - ciliumegressgatewaypolicies | ||||
|   - ciliumegressnatpolicies | ||||
|   - ciliumendpoints | ||||
|   - ciliumendpointslices | ||||
|   - ciliumenvoyconfigs | ||||
|   - ciliumidentities | ||||
|   - ciliumlocalredirectpolicies | ||||
|   - ciliumnetworkpolicies | ||||
|   - ciliumnodes | ||||
|   verbs: | ||||
|   - list | ||||
|   - watch | ||||
| - apiGroups: | ||||
|   - cilium.io | ||||
|   resources: | ||||
|   - ciliumidentities | ||||
|   - ciliumendpoints | ||||
|   - ciliumnodes | ||||
|   verbs: | ||||
|   - create | ||||
| - apiGroups: | ||||
|   - cilium.io | ||||
|   # To synchronize garbage collection of such resources | ||||
|   resources: | ||||
|   - ciliumidentities | ||||
|   verbs: | ||||
|   - update | ||||
| - apiGroups: | ||||
|   - cilium.io | ||||
|   resources: | ||||
|   - ciliumendpoints | ||||
|   verbs: | ||||
|   - delete | ||||
|   - get | ||||
| - apiGroups: | ||||
|   - cilium.io | ||||
|   resources: | ||||
|   - ciliumnodes | ||||
|   - ciliumnodes/status | ||||
|   verbs: | ||||
|   - get | ||||
|   - update | ||||
| - apiGroups: | ||||
|   - cilium.io | ||||
|   resources: | ||||
|   - ciliumnetworkpolicies/status | ||||
|   - ciliumclusterwidenetworkpolicies/status | ||||
|   - ciliumendpoints/status | ||||
|   - ciliumendpoints | ||||
|   verbs: | ||||
|   - patch | ||||
| --- | ||||
| # Source: cilium/templates/cilium-operator/clusterrole.yaml | ||||
| apiVersion: rbac.authorization.k8s.io/v1 | ||||
| kind: ClusterRole | ||||
| metadata: | ||||
|   name: cilium-operator | ||||
| rules: | ||||
| - apiGroups: | ||||
|   - "" | ||||
|   resources: | ||||
|   - pods | ||||
|   verbs: | ||||
|   - get | ||||
|   - list | ||||
|   - watch | ||||
|   # to automatically delete [core|kube]dns pods so that are starting to being | ||||
|   # managed by Cilium | ||||
|   - delete | ||||
| - apiGroups: | ||||
|   - "" | ||||
|   resources: | ||||
|   - nodes | ||||
|   verbs: | ||||
|   - list | ||||
|   - watch | ||||
| - apiGroups: | ||||
|   - "" | ||||
|   resources: | ||||
|   # To remove node taints | ||||
|   - nodes | ||||
|   # To set NetworkUnavailable false on startup | ||||
|   - nodes/status | ||||
|   verbs: | ||||
|   - patch | ||||
| - apiGroups: | ||||
|   - discovery.k8s.io | ||||
|   resources: | ||||
|   - endpointslices | ||||
|   verbs: | ||||
|   - get | ||||
|   - list | ||||
|   - watch | ||||
| - apiGroups: | ||||
|   - "" | ||||
|   resources: | ||||
|   # to perform LB IP allocation for BGP | ||||
|   - services/status | ||||
|   verbs: | ||||
|   - update | ||||
| - apiGroups: | ||||
|   - "" | ||||
|   resources: | ||||
|   # to check apiserver connectivity | ||||
|   - namespaces | ||||
|   verbs: | ||||
|   - get | ||||
|   - list | ||||
|   - watch | ||||
| - apiGroups: | ||||
|   - "" | ||||
|   resources: | ||||
|   # to perform the translation of a CNP that contains `ToGroup` to its endpoints | ||||
|   - services | ||||
|   - endpoints | ||||
|   verbs: | ||||
|   - get | ||||
|   - list | ||||
|   - watch | ||||
| - apiGroups: | ||||
|   - cilium.io | ||||
|   resources: | ||||
|   - ciliumnetworkpolicies | ||||
|   - ciliumclusterwidenetworkpolicies | ||||
|   verbs: | ||||
|   # Create auto-generated CNPs and CCNPs from Policies that have 'toGroups' | ||||
|   - create | ||||
|   - update | ||||
|   - deletecollection | ||||
|   # To update the status of the CNPs and CCNPs | ||||
|   - patch | ||||
|   - get | ||||
|   - list | ||||
|   - watch | ||||
| - apiGroups: | ||||
|   - cilium.io | ||||
|   resources: | ||||
|   - ciliumnetworkpolicies/status | ||||
|   - ciliumclusterwidenetworkpolicies/status | ||||
|   verbs: | ||||
|   # Update the auto-generated CNPs and CCNPs status. | ||||
|   - patch | ||||
|   - update | ||||
| - apiGroups: | ||||
|   - cilium.io | ||||
|   resources: | ||||
|   - ciliumendpoints | ||||
|   - ciliumidentities | ||||
|   verbs: | ||||
|   # To perform garbage collection of such resources | ||||
|   - delete | ||||
|   - list | ||||
|   - watch | ||||
| - apiGroups: | ||||
|   - cilium.io | ||||
|   resources: | ||||
|   - ciliumidentities | ||||
|   verbs: | ||||
|   # To synchronize garbage collection of such resources | ||||
|   - update | ||||
| - apiGroups: | ||||
|   - cilium.io | ||||
|   resources: | ||||
|   - ciliumnodes | ||||
|   verbs: | ||||
|   - create | ||||
|   - update | ||||
|   - get | ||||
|   - list | ||||
|   - watch | ||||
|     # To perform CiliumNode garbage collector | ||||
|   - delete | ||||
| - apiGroups: | ||||
|   - cilium.io | ||||
|   resources: | ||||
|   - ciliumnodes/status | ||||
|   verbs: | ||||
|   - update | ||||
| - apiGroups: | ||||
|   - cilium.io | ||||
|   resources: | ||||
|   - ciliumendpointslices | ||||
|   - ciliumenvoyconfigs | ||||
|   verbs: | ||||
|   - create | ||||
|   - update | ||||
|   - get | ||||
|   - list | ||||
|   - watch | ||||
|   - delete | ||||
| - apiGroups: | ||||
|   - apiextensions.k8s.io | ||||
|   resources: | ||||
|   - customresourcedefinitions | ||||
|   verbs: | ||||
|   - create | ||||
|   - get | ||||
|   - list | ||||
|   - watch | ||||
| - apiGroups: | ||||
|   - apiextensions.k8s.io | ||||
|   resources: | ||||
|   - customresourcedefinitions | ||||
|   verbs: | ||||
|   - update | ||||
|   resourceNames: | ||||
|   - ciliumbgploadbalancerippools.cilium.io | ||||
|   - ciliumbgppeeringpolicies.cilium.io | ||||
|   - ciliumclusterwideenvoyconfigs.cilium.io | ||||
|   - ciliumclusterwidenetworkpolicies.cilium.io | ||||
|   - ciliumegressgatewaypolicies.cilium.io | ||||
|   - ciliumegressnatpolicies.cilium.io | ||||
|   - ciliumendpoints.cilium.io | ||||
|   - ciliumendpointslices.cilium.io | ||||
|   - ciliumenvoyconfigs.cilium.io | ||||
|   - ciliumexternalworkloads.cilium.io | ||||
|   - ciliumidentities.cilium.io | ||||
|   - ciliumlocalredirectpolicies.cilium.io | ||||
|   - ciliumnetworkpolicies.cilium.io | ||||
|   - ciliumnodes.cilium.io | ||||
| # For cilium-operator running in HA mode. | ||||
| # | ||||
| # Cilium operator running in HA mode requires the use of ResourceLock for Leader Election | ||||
| # between multiple running instances. | ||||
| # The preferred way of doing this is to use LeasesResourceLock as edits to Leases are less | ||||
| # common and fewer objects in the cluster watch "all Leases". | ||||
| - apiGroups: | ||||
|   - coordination.k8s.io | ||||
|   resources: | ||||
|   - leases | ||||
|   verbs: | ||||
|   - create | ||||
|   - get | ||||
|   - update | ||||
| --- | ||||
| # Source: cilium/templates/cilium-agent/clusterrolebinding.yaml | ||||
| apiVersion: rbac.authorization.k8s.io/v1 | ||||
| kind: ClusterRoleBinding | ||||
| metadata: | ||||
|   name: cilium | ||||
| roleRef: | ||||
|   apiGroup: rbac.authorization.k8s.io | ||||
|   kind: ClusterRole | ||||
|   name: cilium | ||||
| subjects: | ||||
| - kind: ServiceAccount | ||||
|   name: "cilium" | ||||
|   namespace: kube-system | ||||
| --- | ||||
| # Source: cilium/templates/cilium-operator/clusterrolebinding.yaml | ||||
| apiVersion: rbac.authorization.k8s.io/v1 | ||||
| kind: ClusterRoleBinding | ||||
| metadata: | ||||
|   name: cilium-operator | ||||
| roleRef: | ||||
|   apiGroup: rbac.authorization.k8s.io | ||||
|   kind: ClusterRole | ||||
|   name: cilium-operator | ||||
| subjects: | ||||
| - kind: ServiceAccount | ||||
|   name: "cilium-operator" | ||||
|   namespace: kube-system | ||||
| --- | ||||
| # Source: cilium/templates/cilium-agent/service.yaml | ||||
| apiVersion: v1 | ||||
| kind: Service | ||||
| metadata: | ||||
|   name: cilium-agent | ||||
|   namespace: kube-system | ||||
|   annotations: | ||||
|     prometheus.io/scrape: "true" | ||||
|     prometheus.io/port: "9964" | ||||
|   labels: | ||||
|     k8s-app: cilium | ||||
| spec: | ||||
|   clusterIP: None | ||||
|   type: ClusterIP | ||||
|   selector: | ||||
|     k8s-app: cilium | ||||
|   ports: | ||||
|   - name: envoy-metrics | ||||
|     port: 9964 | ||||
|     protocol: TCP | ||||
|     targetPort: envoy-metrics | ||||
| --- | ||||
| # Source: cilium/templates/cilium-agent/daemonset.yaml | ||||
| apiVersion: apps/v1 | ||||
| kind: DaemonSet | ||||
| metadata: | ||||
|   name: cilium | ||||
|   namespace: kube-system | ||||
|   labels: | ||||
|     k8s-app: cilium | ||||
| spec: | ||||
|   selector: | ||||
|     matchLabels: | ||||
|       k8s-app: cilium | ||||
|   updateStrategy: | ||||
|     rollingUpdate: | ||||
|       maxUnavailable: 2 | ||||
|     type: RollingUpdate | ||||
|   template: | ||||
|     metadata: | ||||
|       annotations: | ||||
|         prometheus.io/port: "9962" | ||||
|         prometheus.io/scrape: "true" | ||||
|       labels: | ||||
|         k8s-app: cilium | ||||
|     spec: | ||||
|       containers: | ||||
|       - name: cilium-agent | ||||
|         image: "quay.io/cilium/cilium:v1.12.7@sha256:8cb6b4742cc27b39e4f789d282a1fc2041decb6f5698bfe09112085a07b1fd61" | ||||
|         imagePullPolicy: IfNotPresent | ||||
|         command: | ||||
|         - cilium-agent | ||||
|         args: | ||||
|         - --config-dir=/tmp/cilium/config-map | ||||
|         startupProbe: | ||||
|           httpGet: | ||||
|             host: "127.0.0.1" | ||||
|             path: /healthz | ||||
|             port: 9879 | ||||
|             scheme: HTTP | ||||
|             httpHeaders: | ||||
|             - name: "brief" | ||||
|               value: "true" | ||||
|           failureThreshold: 105 | ||||
|           periodSeconds: 2 | ||||
|           successThreshold: 1 | ||||
|         livenessProbe: | ||||
|           httpGet: | ||||
|             host: "127.0.0.1" | ||||
|             path: /healthz | ||||
|             port: 9879 | ||||
|             scheme: HTTP | ||||
|             httpHeaders: | ||||
|             - name: "brief" | ||||
|               value: "true" | ||||
|           periodSeconds: 30 | ||||
|           successThreshold: 1 | ||||
|           failureThreshold: 10 | ||||
|           timeoutSeconds: 5 | ||||
|         readinessProbe: | ||||
|           httpGet: | ||||
|             host: "127.0.0.1" | ||||
|             path: /healthz | ||||
|             port: 9879 | ||||
|             scheme: HTTP | ||||
|             httpHeaders: | ||||
|             - name: "brief" | ||||
|               value: "true" | ||||
|           periodSeconds: 30 | ||||
|           successThreshold: 1 | ||||
|           failureThreshold: 3 | ||||
|           timeoutSeconds: 5 | ||||
|         env: | ||||
|         - name: K8S_NODE_NAME | ||||
|           valueFrom: | ||||
|             fieldRef: | ||||
|               apiVersion: v1 | ||||
|               fieldPath: spec.nodeName | ||||
|         - name: CILIUM_K8S_NAMESPACE | ||||
|           valueFrom: | ||||
|             fieldRef: | ||||
|               apiVersion: v1 | ||||
|               fieldPath: metadata.namespace | ||||
|         - name: CILIUM_CLUSTERMESH_CONFIG | ||||
|           value: /var/lib/cilium/clustermesh/ | ||||
|         - name: CILIUM_CNI_CHAINING_MODE | ||||
|           valueFrom: | ||||
|             configMapKeyRef: | ||||
|               name: cilium-config | ||||
|               key: cni-chaining-mode | ||||
|               optional: true | ||||
|         - name: CILIUM_CUSTOM_CNI_CONF | ||||
|           valueFrom: | ||||
|             configMapKeyRef: | ||||
|               name: cilium-config | ||||
|               key: custom-cni-conf | ||||
|               optional: true | ||||
|         - name: KUBERNETES_SERVICE_HOST | ||||
|           value: "api.cluster.local" | ||||
|         - name: KUBERNETES_SERVICE_PORT | ||||
|           value: "6443" | ||||
|         lifecycle: | ||||
|           postStart: | ||||
|             exec: | ||||
|               command: | ||||
|               - "/cni-install.sh" | ||||
|               - "--enable-debug=false" | ||||
|               - "--cni-exclusive=true" | ||||
|               - "--log-file=/var/run/cilium/cilium-cni.log" | ||||
|           preStop: | ||||
|             exec: | ||||
|               command: | ||||
|               - /cni-uninstall.sh | ||||
|         resources: | ||||
|           limits: | ||||
|             cpu: 2 | ||||
|             memory: 1Gi | ||||
|           requests: | ||||
|             cpu: 100m | ||||
|             memory: 128Mi | ||||
|         ports: | ||||
|         - name: peer-service | ||||
|           containerPort: 4244 | ||||
|           hostPort: 4244 | ||||
|           protocol: TCP | ||||
|         - name: prometheus | ||||
|           containerPort: 9962 | ||||
|           hostPort: 9962 | ||||
|           protocol: TCP | ||||
|         - name: envoy-metrics | ||||
|           containerPort: 9964 | ||||
|           hostPort: 9964 | ||||
|           protocol: TCP | ||||
|         securityContext: | ||||
|           privileged: true | ||||
|         terminationMessagePolicy: FallbackToLogsOnError | ||||
|         volumeMounts: | ||||
|         - name: bpf-maps | ||||
|           mountPath: /sys/fs/bpf | ||||
|           mountPropagation: Bidirectional | ||||
|         # Check for duplicate mounts before mounting | ||||
|         - name: cilium-cgroup | ||||
|           mountPath: /sys/fs/cgroup | ||||
|         - name: cilium-run | ||||
|           mountPath: /var/run/cilium | ||||
|         - name: cni-path | ||||
|           mountPath: /host/opt/cni/bin | ||||
|         - name: etc-cni-netd | ||||
|           mountPath: /host/etc/cni/net.d | ||||
|         - name: clustermesh-secrets | ||||
|           mountPath: /var/lib/cilium/clustermesh | ||||
|           readOnly: true | ||||
|         - name: cilium-config-path | ||||
|           mountPath: /tmp/cilium/config-map | ||||
|           readOnly: true | ||||
|           # Needed to be able to load kernel modules | ||||
|         - name: lib-modules | ||||
|           mountPath: /lib/modules | ||||
|           readOnly: true | ||||
|         - name: xtables-lock | ||||
|           mountPath: /run/xtables.lock | ||||
|       initContainers: | ||||
|       - name: clean-cilium-state | ||||
|         image: "quay.io/cilium/cilium:v1.12.7@sha256:8cb6b4742cc27b39e4f789d282a1fc2041decb6f5698bfe09112085a07b1fd61" | ||||
|         imagePullPolicy: IfNotPresent | ||||
|         command: | ||||
|         - /init-container.sh | ||||
|         env: | ||||
|         - name: CILIUM_ALL_STATE | ||||
|           valueFrom: | ||||
|             configMapKeyRef: | ||||
|               name: cilium-config | ||||
|               key: clean-cilium-state | ||||
|               optional: true | ||||
|         - name: CILIUM_BPF_STATE | ||||
|           valueFrom: | ||||
|             configMapKeyRef: | ||||
|               name: cilium-config | ||||
|               key: clean-cilium-bpf-state | ||||
|               optional: true | ||||
|         - name: KUBERNETES_SERVICE_HOST | ||||
|           value: "api.cluster.local" | ||||
|         - name: KUBERNETES_SERVICE_PORT | ||||
|           value: "6443" | ||||
|         terminationMessagePolicy: FallbackToLogsOnError | ||||
|         securityContext: | ||||
|           privileged: true | ||||
|         volumeMounts: | ||||
|         - name: bpf-maps | ||||
|           mountPath: /sys/fs/bpf | ||||
|           # Required to mount cgroup filesystem from the host to cilium agent pod | ||||
|         - name: cilium-cgroup | ||||
|           mountPath: /sys/fs/cgroup | ||||
|           mountPropagation: HostToContainer | ||||
|         - name: cilium-run | ||||
|           mountPath: /var/run/cilium | ||||
|         resources: | ||||
|           requests: | ||||
|             cpu: 100m | ||||
|             memory: 100Mi # wait-for-kube-proxy | ||||
|       restartPolicy: Always | ||||
|       priorityClassName: system-node-critical | ||||
|       serviceAccount: "cilium" | ||||
|       serviceAccountName: "cilium" | ||||
|       terminationGracePeriodSeconds: 1 | ||||
|       hostNetwork: true | ||||
|       affinity: | ||||
|         podAntiAffinity: | ||||
|           requiredDuringSchedulingIgnoredDuringExecution: | ||||
|           - labelSelector: | ||||
|               matchLabels: | ||||
|                 k8s-app: cilium | ||||
|             topologyKey: kubernetes.io/hostname | ||||
|       nodeSelector: | ||||
|         kubernetes.io/os: linux | ||||
|       tolerations: | ||||
|         - operator: Exists | ||||
|       volumes: | ||||
|         # To keep state between restarts / upgrades | ||||
|       - name: cilium-run | ||||
|         hostPath: | ||||
|           path: /var/run/cilium | ||||
|           type: DirectoryOrCreate | ||||
|         # To keep state between restarts / upgrades for bpf maps | ||||
|       - name: bpf-maps | ||||
|         hostPath: | ||||
|           path: /sys/fs/bpf | ||||
|           type: DirectoryOrCreate | ||||
|       # To keep state between restarts / upgrades for cgroup2 filesystem | ||||
|       - name: cilium-cgroup | ||||
|         hostPath: | ||||
|           path: /sys/fs/cgroup | ||||
|           type: DirectoryOrCreate | ||||
|       # To install cilium cni plugin in the host | ||||
|       - name: cni-path | ||||
|         hostPath: | ||||
|           path:  /opt/cni/bin | ||||
|           type: DirectoryOrCreate | ||||
|         # To install cilium cni configuration in the host | ||||
|       - name: etc-cni-netd | ||||
|         hostPath: | ||||
|           path: /etc/cni/net.d | ||||
|           type: DirectoryOrCreate | ||||
|         # To be able to load kernel modules | ||||
|       - name: lib-modules | ||||
|         hostPath: | ||||
|           path: /lib/modules | ||||
|         # To access iptables concurrently with other processes (e.g. kube-proxy) | ||||
|       - name: xtables-lock | ||||
|         hostPath: | ||||
|           path: /run/xtables.lock | ||||
|           type: FileOrCreate | ||||
|         # To read the clustermesh configuration | ||||
|       - name: clustermesh-secrets | ||||
|         secret: | ||||
|           secretName: cilium-clustermesh | ||||
|           # note: the leading zero means this number is in octal representation: do not remove it | ||||
|           defaultMode: 0400 | ||||
|           optional: true | ||||
|         # To read the configuration from the config map | ||||
|       - name: cilium-config-path | ||||
|         configMap: | ||||
|           name: cilium-config | ||||
| --- | ||||
| # Source: cilium/templates/cilium-operator/deployment.yaml | ||||
| apiVersion: apps/v1 | ||||
| kind: Deployment | ||||
| metadata: | ||||
|   name: cilium-operator | ||||
|   namespace: kube-system | ||||
|   labels: | ||||
|     io.cilium/app: operator | ||||
|     name: cilium-operator | ||||
| spec: | ||||
|   # See docs on ServerCapabilities.LeasesResourceLock in file pkg/k8s/version/version.go | ||||
|   # for more details. | ||||
|   replicas: 1 | ||||
|   selector: | ||||
|     matchLabels: | ||||
|       io.cilium/app: operator | ||||
|       name: cilium-operator | ||||
|   strategy: | ||||
|     rollingUpdate: | ||||
|       maxSurge: 1 | ||||
|       maxUnavailable: 1 | ||||
|     type: RollingUpdate | ||||
|   template: | ||||
|     metadata: | ||||
|       annotations: | ||||
|         # ensure pods roll when configmap updates | ||||
|         cilium.io/cilium-configmap-checksum: "91a303965c397a04cb454230bd07a7565b12e96248534e33f52c8c2be5f59781" | ||||
|       labels: | ||||
|         io.cilium/app: operator | ||||
|         name: cilium-operator | ||||
|     spec: | ||||
|       containers: | ||||
|       - name: cilium-operator | ||||
|         image: "quay.io/cilium/operator-generic:v1.12.7@sha256:80f24810bf8484974c757382eb2c7408c9c024e5cb0719f4a56fba3f47695c72" | ||||
|         imagePullPolicy: IfNotPresent | ||||
|         command: | ||||
|         - cilium-operator-generic | ||||
|         args: | ||||
|         - --config-dir=/tmp/cilium/config-map | ||||
|         - --debug=$(CILIUM_DEBUG) | ||||
|         env: | ||||
|         - name: K8S_NODE_NAME | ||||
|           valueFrom: | ||||
|             fieldRef: | ||||
|               apiVersion: v1 | ||||
|               fieldPath: spec.nodeName | ||||
|         - name: CILIUM_K8S_NAMESPACE | ||||
|           valueFrom: | ||||
|             fieldRef: | ||||
|               apiVersion: v1 | ||||
|               fieldPath: metadata.namespace | ||||
|         - name: CILIUM_DEBUG | ||||
|           valueFrom: | ||||
|             configMapKeyRef: | ||||
|               key: debug | ||||
|               name: cilium-config | ||||
|               optional: true | ||||
|         - name: KUBERNETES_SERVICE_HOST | ||||
|           value: "api.cluster.local" | ||||
|         - name: KUBERNETES_SERVICE_PORT | ||||
|           value: "6443" | ||||
|         livenessProbe: | ||||
|           httpGet: | ||||
|             host: "127.0.0.1" | ||||
|             path: /healthz | ||||
|             port: 9234 | ||||
|             scheme: HTTP | ||||
|           initialDelaySeconds: 60 | ||||
|           periodSeconds: 10 | ||||
|           timeoutSeconds: 3 | ||||
|         volumeMounts: | ||||
|         - name: cilium-config-path | ||||
|           mountPath: /tmp/cilium/config-map | ||||
|           readOnly: true | ||||
|         terminationMessagePolicy: FallbackToLogsOnError | ||||
|       hostNetwork: true | ||||
|       restartPolicy: Always | ||||
|       priorityClassName: system-cluster-critical | ||||
|       serviceAccount: "cilium-operator" | ||||
|       serviceAccountName: "cilium-operator" | ||||
|       # In HA mode, cilium-operator pods must not be scheduled on the same | ||||
|       # node as they will clash with each other. | ||||
|       affinity: | ||||
|         podAntiAffinity: | ||||
|           requiredDuringSchedulingIgnoredDuringExecution: | ||||
|           - labelSelector: | ||||
|               matchLabels: | ||||
|                 io.cilium/app: operator | ||||
|             topologyKey: kubernetes.io/hostname | ||||
|       nodeSelector: | ||||
|         kubernetes.io/os: linux | ||||
|         node-role.kubernetes.io/control-plane: "" | ||||
|       tolerations: | ||||
|         - effect: NoSchedule | ||||
|           operator: Exists | ||||
|       volumes: | ||||
|         # To read the configuration from the config map | ||||
|       - name: cilium-config-path | ||||
|         configMap: | ||||
|           name: cilium-config | ||||
| @@ -1,79 +0,0 @@ | ||||
| --- | ||||
|  | ||||
| k8sServiceHost: "api.cluster.local" | ||||
| k8sServicePort: "6443" | ||||
|  | ||||
| operator: | ||||
|   enabled: true | ||||
|   rollOutPods: true | ||||
|   replicas: 1 | ||||
|   prometheus: | ||||
|     enabled: false | ||||
|   nodeSelector: | ||||
|     node-role.kubernetes.io/control-plane: "" | ||||
|   tolerations: | ||||
|     - operator: Exists | ||||
|       effect: NoSchedule | ||||
|  | ||||
| identityAllocationMode: crd | ||||
| kubeProxyReplacement: strict | ||||
| enableK8sEndpointSlice: true | ||||
| localRedirectPolicy: true | ||||
|  | ||||
| tunnel: "vxlan" | ||||
| autoDirectNodeRoutes: false | ||||
| devices: [eth+,wg+] | ||||
| extraConfig: | ||||
|   mtu: "1420" | ||||
|  | ||||
| healthChecking: true | ||||
|  | ||||
| cni: | ||||
|   install: true | ||||
|  | ||||
| ipam: | ||||
|   mode: "kubernetes" | ||||
| k8s: | ||||
|   requireIPv4PodCIDR: true | ||||
|   requireIPv6PodCIDR: true | ||||
|  | ||||
| bpf: | ||||
|   masquerade: false | ||||
| ipv4: | ||||
|   enabled: true | ||||
| ipv6: | ||||
|   enabled: true | ||||
| hostServices: | ||||
|   enabled: true | ||||
| hostPort: | ||||
|   enabled: true | ||||
| nodePort: | ||||
|   enabled: true | ||||
| externalIPs: | ||||
|   enabled: true | ||||
| hostFirewall: | ||||
|   enabled: true | ||||
| ingressController: | ||||
|   enabled: false | ||||
|  | ||||
| securityContext: | ||||
|   privileged: true | ||||
|  | ||||
| hubble: | ||||
|   enabled: false | ||||
|  | ||||
| prometheus: | ||||
|   enabled: true | ||||
|  | ||||
| cgroup: | ||||
|   autoMount: | ||||
|     enabled: false | ||||
|   hostRoot: /sys/fs/cgroup | ||||
|  | ||||
| resources: | ||||
|   limits: | ||||
|     cpu: 2 | ||||
|     memory: 1Gi | ||||
|   requests: | ||||
|     cpu: 100m | ||||
|     memory: 128Mi | ||||
| @@ -1,153 +0,0 @@ | ||||
| --- | ||||
| apiVersion: v1 | ||||
| kind: ConfigMap | ||||
| metadata: | ||||
|   name: coredns-local | ||||
|   namespace: kube-system | ||||
| data: | ||||
|   empty.db: | | ||||
|     @       60      IN      SOA     localnet. root.localnet. ( | ||||
|                     1       ; serial | ||||
|                     60      ; refresh | ||||
|                     60      ; retry | ||||
|                     60      ; expiry | ||||
|                     60 )    ; minimum | ||||
|     ; | ||||
|     @       IN      NS      localnet. | ||||
|  | ||||
|   hosts: | | ||||
|     # static hosts | ||||
|     169.254.2.53        dns.local | ||||
|  | ||||
|   Corefile.local: | | ||||
|     (empty) { | ||||
|         file /etc/coredns/empty.db | ||||
|     } | ||||
|  | ||||
|     .:53 { | ||||
|         errors | ||||
|         bind 169.254.2.53 | ||||
|  | ||||
|         health 127.0.0.1:8091 { | ||||
|            lameduck 5s | ||||
|         } | ||||
|  | ||||
|         hosts /etc/coredns/hosts { | ||||
|             reload 60s | ||||
|             fallthrough | ||||
|         } | ||||
|  | ||||
|         kubernetes cluster.local in-addr.arpa ip6.arpa { | ||||
|             endpoint https://api.cluster.local:6443 | ||||
|             kubeconfig /etc/coredns/kubeconfig.conf coredns | ||||
|             pods insecure | ||||
|             ttl 60 | ||||
|         } | ||||
|         prometheus :9153 | ||||
|  | ||||
|         forward . /etc/resolv.conf { | ||||
|             policy sequential | ||||
|             expire 30s | ||||
|         } | ||||
|  | ||||
|         cache 300 | ||||
|         loop | ||||
|         reload | ||||
|         loadbalance | ||||
|     } | ||||
|   kubeconfig.conf: |- | ||||
|     apiVersion: v1 | ||||
|     kind: Config | ||||
|     clusters: | ||||
|     - cluster: | ||||
|         certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt | ||||
|         server: https://api.cluster.local:6443 | ||||
|       name: default | ||||
|     contexts: | ||||
|     - context: | ||||
|         cluster: default | ||||
|         namespace: kube-system | ||||
|         user: coredns | ||||
|       name: coredns | ||||
|     current-context: coredns | ||||
|     users: | ||||
|     - name: coredns | ||||
|       user: | ||||
|         tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token | ||||
| --- | ||||
| apiVersion: apps/v1 | ||||
| kind: DaemonSet | ||||
| metadata: | ||||
|   name: coredns-local | ||||
|   namespace: kube-system | ||||
|   labels: | ||||
|     k8s-app: kube-dns-local | ||||
|     kubernetes.io/name: CoreDNS | ||||
| spec: | ||||
|   updateStrategy: | ||||
|     type: RollingUpdate | ||||
|   minReadySeconds: 15 | ||||
|   selector: | ||||
|     matchLabels: | ||||
|       k8s-app: kube-dns-local | ||||
|       kubernetes.io/name: CoreDNS | ||||
|   template: | ||||
|     metadata: | ||||
|       labels: | ||||
|         k8s-app: kube-dns-local | ||||
|         kubernetes.io/name: CoreDNS | ||||
|       annotations: | ||||
|         prometheus.io/scrape: "true" | ||||
|         prometheus.io/port: "9153" | ||||
|     spec: | ||||
|       priorityClassName: system-node-critical | ||||
|       serviceAccount: coredns | ||||
|       serviceAccountName: coredns | ||||
|       enableServiceLinks: false | ||||
|       tolerations: | ||||
|       - effect: NoSchedule | ||||
|         key: node-role.kubernetes.io/control-plane | ||||
|         operator: Exists | ||||
|       - effect: NoSchedule | ||||
|         key: node.cloudprovider.kubernetes.io/uninitialized | ||||
|         operator: Exists | ||||
|       hostNetwork: true | ||||
|       containers: | ||||
|       - name: coredns | ||||
|         image: coredns/coredns:1.9.4 | ||||
|         imagePullPolicy: IfNotPresent | ||||
|         resources: | ||||
|           limits: | ||||
|             cpu: 100m | ||||
|             memory: 128Mi | ||||
|           requests: | ||||
|             cpu: 50m | ||||
|             memory: 64Mi | ||||
|         args: [ "-conf", "/etc/coredns/Corefile.local" ] | ||||
|         volumeMounts: | ||||
|         - name: config-volume | ||||
|           mountPath: /etc/coredns | ||||
|           readOnly: true | ||||
|         livenessProbe: | ||||
|           httpGet: | ||||
|             host: 127.0.0.1 | ||||
|             path: /health | ||||
|             port: 8091 | ||||
|             scheme: HTTP | ||||
|           initialDelaySeconds: 60 | ||||
|           periodSeconds: 10 | ||||
|           successThreshold: 1 | ||||
|           timeoutSeconds: 5 | ||||
|         securityContext: | ||||
|           allowPrivilegeEscalation: false | ||||
|           capabilities: | ||||
|             add: | ||||
|             - NET_BIND_SERVICE | ||||
|             drop: | ||||
|             - all | ||||
|           readOnlyRootFilesystem: true | ||||
|       dnsPolicy: Default | ||||
|       volumes: | ||||
|         - name: config-volume | ||||
|           configMap: | ||||
|             name: coredns-local | ||||
| @@ -1,4 +0,0 @@ | ||||
| apiVersion: v1 | ||||
| kind: Namespace | ||||
| metadata: | ||||
|   name: ingress-nginx | ||||
| @@ -1,463 +0,0 @@ | ||||
| --- | ||||
| # Source: ingress-nginx/templates/controller-serviceaccount.yaml | ||||
| apiVersion: v1 | ||||
| kind: ServiceAccount | ||||
| metadata: | ||||
|   labels: | ||||
|     helm.sh/chart: ingress-nginx-4.4.2 | ||||
|     app.kubernetes.io/name: ingress-nginx | ||||
|     app.kubernetes.io/instance: ingress-nginx | ||||
|     app.kubernetes.io/version: "1.5.1" | ||||
|     app.kubernetes.io/part-of: ingress-nginx | ||||
|     app.kubernetes.io/managed-by: Helm | ||||
|     app.kubernetes.io/component: controller | ||||
|   name: ingress-nginx | ||||
|   namespace: ingress-nginx | ||||
| automountServiceAccountToken: true | ||||
| --- | ||||
| # Source: ingress-nginx/templates/controller-configmap.yaml | ||||
| apiVersion: v1 | ||||
| kind: ConfigMap | ||||
| metadata: | ||||
|   labels: | ||||
|     helm.sh/chart: ingress-nginx-4.4.2 | ||||
|     app.kubernetes.io/name: ingress-nginx | ||||
|     app.kubernetes.io/instance: ingress-nginx | ||||
|     app.kubernetes.io/version: "1.5.1" | ||||
|     app.kubernetes.io/part-of: ingress-nginx | ||||
|     app.kubernetes.io/managed-by: Helm | ||||
|     app.kubernetes.io/component: controller | ||||
|   name: ingress-nginx-controller | ||||
|   namespace: ingress-nginx | ||||
| data: | ||||
|   allow-snippet-annotations: "true" | ||||
|   client-body-timeout: "30" | ||||
|   client-header-timeout: "30" | ||||
|   enable-access-log-for-default-backend: "true" | ||||
|   error-log-level: "error" | ||||
|   hsts: "true" | ||||
|   hsts-include-subdomains: "true" | ||||
|   hsts-max-age: "31536000" | ||||
|   hsts-preload: "true" | ||||
|   http-redirect-code: "301" | ||||
|   limit-req-status-code: "429" | ||||
|   log-format-escape-json: "true" | ||||
|   log-format-upstream: "{\"ip\":\"$remote_addr\", \"ssl\":\"$ssl_protocol\", \"method\":\"$request_method\", \"proto\":\"$scheme\", \"host\":\"$host\", \"uri\":\"$request_uri\", \"status\":$status, \"size\":$bytes_sent, \"agent\":\"$http_user_agent\", \"referer\":\"$http_referer\", \"namespace\":\"$namespace\"}" | ||||
|   proxy-connect-timeout: "10" | ||||
|   proxy-headers-hash-bucket-size: "128" | ||||
|   proxy-hide-headers: "strict-transport-security" | ||||
|   proxy-read-timeout: "60" | ||||
|   proxy-real-ip-cidr: "173.245.48.0/20,103.21.244.0/22,103.22.200.0/22,103.31.4.0/22,141.101.64.0/18,108.162.192.0/18,190.93.240.0/20,188.114.96.0/20,197.234.240.0/22,198.41.128.0/17,162.158.0.0/15,172.64.0.0/13,131.0.72.0/22,104.16.0.0/13,104.24.0.0/14,172.16.0.0/12" | ||||
|   proxy-send-timeout: "60" | ||||
|   server-name-hash-bucket-size: "64" | ||||
|   server-name-hash-max-size: "512" | ||||
|   server-tokens: "false" | ||||
|   ssl-protocols: "TLSv1.3" | ||||
|   upstream-keepalive-connections: "32" | ||||
|   use-forwarded-headers: "true" | ||||
|   use-geoip: "false" | ||||
|   use-geoip2: "false" | ||||
|   use-gzip: "true" | ||||
|   worker-cpu-affinity: "auto" | ||||
|   worker-processes: "auto" | ||||
| --- | ||||
| # Source: ingress-nginx/templates/clusterrole.yaml | ||||
| apiVersion: rbac.authorization.k8s.io/v1 | ||||
| kind: ClusterRole | ||||
| metadata: | ||||
|   labels: | ||||
|     helm.sh/chart: ingress-nginx-4.4.2 | ||||
|     app.kubernetes.io/name: ingress-nginx | ||||
|     app.kubernetes.io/instance: ingress-nginx | ||||
|     app.kubernetes.io/version: "1.5.1" | ||||
|     app.kubernetes.io/part-of: ingress-nginx | ||||
|     app.kubernetes.io/managed-by: Helm | ||||
|   name: ingress-nginx | ||||
| rules: | ||||
|   - apiGroups: | ||||
|       - "" | ||||
|     resources: | ||||
|       - configmaps | ||||
|       - endpoints | ||||
|       - nodes | ||||
|       - pods | ||||
|       - secrets | ||||
|       - namespaces | ||||
|     verbs: | ||||
|       - list | ||||
|       - watch | ||||
|   - apiGroups: | ||||
|       - coordination.k8s.io | ||||
|     resources: | ||||
|       - leases | ||||
|     verbs: | ||||
|       - list | ||||
|       - watch | ||||
|   - apiGroups: | ||||
|       - "" | ||||
|     resources: | ||||
|       - nodes | ||||
|     verbs: | ||||
|       - get | ||||
|   - apiGroups: | ||||
|       - "" | ||||
|     resources: | ||||
|       - services | ||||
|     verbs: | ||||
|       - get | ||||
|       - list | ||||
|       - watch | ||||
|   - apiGroups: | ||||
|       - networking.k8s.io | ||||
|     resources: | ||||
|       - ingresses | ||||
|     verbs: | ||||
|       - get | ||||
|       - list | ||||
|       - watch | ||||
|   - apiGroups: | ||||
|       - "" | ||||
|     resources: | ||||
|       - events | ||||
|     verbs: | ||||
|       - create | ||||
|       - patch | ||||
|   - apiGroups: | ||||
|       - networking.k8s.io | ||||
|     resources: | ||||
|       - ingresses/status | ||||
|     verbs: | ||||
|       - update | ||||
|   - apiGroups: | ||||
|       - networking.k8s.io | ||||
|     resources: | ||||
|       - ingressclasses | ||||
|     verbs: | ||||
|       - get | ||||
|       - list | ||||
|       - watch | ||||
|   - apiGroups: | ||||
|       - discovery.k8s.io | ||||
|     resources: | ||||
|       - endpointslices | ||||
|     verbs: | ||||
|       - list | ||||
|       - watch | ||||
|       - get | ||||
| --- | ||||
| # Source: ingress-nginx/templates/clusterrolebinding.yaml | ||||
| apiVersion: rbac.authorization.k8s.io/v1 | ||||
| kind: ClusterRoleBinding | ||||
| metadata: | ||||
|   labels: | ||||
|     helm.sh/chart: ingress-nginx-4.4.2 | ||||
|     app.kubernetes.io/name: ingress-nginx | ||||
|     app.kubernetes.io/instance: ingress-nginx | ||||
|     app.kubernetes.io/version: "1.5.1" | ||||
|     app.kubernetes.io/part-of: ingress-nginx | ||||
|     app.kubernetes.io/managed-by: Helm | ||||
|   name: ingress-nginx | ||||
| roleRef: | ||||
|   apiGroup: rbac.authorization.k8s.io | ||||
|   kind: ClusterRole | ||||
|   name: ingress-nginx | ||||
| subjects: | ||||
|   - kind: ServiceAccount | ||||
|     name: ingress-nginx | ||||
|     namespace: "ingress-nginx" | ||||
| --- | ||||
| # Source: ingress-nginx/templates/controller-role.yaml | ||||
| apiVersion: rbac.authorization.k8s.io/v1 | ||||
| kind: Role | ||||
| metadata: | ||||
|   labels: | ||||
|     helm.sh/chart: ingress-nginx-4.4.2 | ||||
|     app.kubernetes.io/name: ingress-nginx | ||||
|     app.kubernetes.io/instance: ingress-nginx | ||||
|     app.kubernetes.io/version: "1.5.1" | ||||
|     app.kubernetes.io/part-of: ingress-nginx | ||||
|     app.kubernetes.io/managed-by: Helm | ||||
|     app.kubernetes.io/component: controller | ||||
|   name: ingress-nginx | ||||
|   namespace: ingress-nginx | ||||
| rules: | ||||
|   - apiGroups: | ||||
|       - "" | ||||
|     resources: | ||||
|       - namespaces | ||||
|     verbs: | ||||
|       - get | ||||
|   - apiGroups: | ||||
|       - "" | ||||
|     resources: | ||||
|       - configmaps | ||||
|       - pods | ||||
|       - secrets | ||||
|       - endpoints | ||||
|     verbs: | ||||
|       - get | ||||
|       - list | ||||
|       - watch | ||||
|   - apiGroups: | ||||
|       - "" | ||||
|     resources: | ||||
|       - services | ||||
|     verbs: | ||||
|       - get | ||||
|       - list | ||||
|       - watch | ||||
|   - apiGroups: | ||||
|       - networking.k8s.io | ||||
|     resources: | ||||
|       - ingresses | ||||
|     verbs: | ||||
|       - get | ||||
|       - list | ||||
|       - watch | ||||
|   - apiGroups: | ||||
|       - networking.k8s.io | ||||
|     resources: | ||||
|       - ingresses/status | ||||
|     verbs: | ||||
|       - update | ||||
|   - apiGroups: | ||||
|       - networking.k8s.io | ||||
|     resources: | ||||
|       - ingressclasses | ||||
|     verbs: | ||||
|       - get | ||||
|       - list | ||||
|       - watch | ||||
|   - apiGroups: | ||||
|       - coordination.k8s.io | ||||
|     resources: | ||||
|       - leases | ||||
|     resourceNames: | ||||
|       - ingress-nginx-leader | ||||
|     verbs: | ||||
|       - get | ||||
|       - update | ||||
|   - apiGroups: | ||||
|       - coordination.k8s.io | ||||
|     resources: | ||||
|       - leases | ||||
|     verbs: | ||||
|       - create | ||||
|   - apiGroups: | ||||
|       - "" | ||||
|     resources: | ||||
|       - events | ||||
|     verbs: | ||||
|       - create | ||||
|       - patch | ||||
|   - apiGroups: | ||||
|       - discovery.k8s.io | ||||
|     resources: | ||||
|       - endpointslices | ||||
|     verbs: | ||||
|       - list | ||||
|       - watch | ||||
|       - get | ||||
| --- | ||||
| # Source: ingress-nginx/templates/controller-rolebinding.yaml | ||||
| apiVersion: rbac.authorization.k8s.io/v1 | ||||
| kind: RoleBinding | ||||
| metadata: | ||||
|   labels: | ||||
|     helm.sh/chart: ingress-nginx-4.4.2 | ||||
|     app.kubernetes.io/name: ingress-nginx | ||||
|     app.kubernetes.io/instance: ingress-nginx | ||||
|     app.kubernetes.io/version: "1.5.1" | ||||
|     app.kubernetes.io/part-of: ingress-nginx | ||||
|     app.kubernetes.io/managed-by: Helm | ||||
|     app.kubernetes.io/component: controller | ||||
|   name: ingress-nginx | ||||
|   namespace: ingress-nginx | ||||
| roleRef: | ||||
|   apiGroup: rbac.authorization.k8s.io | ||||
|   kind: Role | ||||
|   name: ingress-nginx | ||||
| subjects: | ||||
|   - kind: ServiceAccount | ||||
|     name: ingress-nginx | ||||
|     namespace: "ingress-nginx" | ||||
| --- | ||||
| # Source: ingress-nginx/templates/controller-service.yaml | ||||
| apiVersion: v1 | ||||
| kind: Service | ||||
| metadata: | ||||
|   annotations: | ||||
|   labels: | ||||
|     helm.sh/chart: ingress-nginx-4.4.2 | ||||
|     app.kubernetes.io/name: ingress-nginx | ||||
|     app.kubernetes.io/instance: ingress-nginx | ||||
|     app.kubernetes.io/version: "1.5.1" | ||||
|     app.kubernetes.io/part-of: ingress-nginx | ||||
|     app.kubernetes.io/managed-by: Helm | ||||
|     app.kubernetes.io/component: controller | ||||
|   name: ingress-nginx-controller | ||||
|   namespace: ingress-nginx | ||||
| spec: | ||||
|   type: ClusterIP | ||||
|   clusterIP: None | ||||
|   ipFamilyPolicy: RequireDualStack | ||||
|   ipFamilies:  | ||||
|     - IPv4 | ||||
|     - IPv6 | ||||
|   ports: | ||||
|     - name: http | ||||
|       port: 80 | ||||
|       protocol: TCP | ||||
|       targetPort: http | ||||
|       appProtocol: http | ||||
|     - name: https | ||||
|       port: 443 | ||||
|       protocol: TCP | ||||
|       targetPort: https | ||||
|       appProtocol: https | ||||
|   selector: | ||||
|     app.kubernetes.io/name: ingress-nginx | ||||
|     app.kubernetes.io/instance: ingress-nginx | ||||
|     app.kubernetes.io/component: controller | ||||
| --- | ||||
| # Source: ingress-nginx/templates/controller-daemonset.yaml | ||||
| apiVersion: apps/v1 | ||||
| kind: DaemonSet | ||||
| metadata: | ||||
|   labels: | ||||
|     helm.sh/chart: ingress-nginx-4.4.2 | ||||
|     app.kubernetes.io/name: ingress-nginx | ||||
|     app.kubernetes.io/instance: ingress-nginx | ||||
|     app.kubernetes.io/version: "1.5.1" | ||||
|     app.kubernetes.io/part-of: ingress-nginx | ||||
|     app.kubernetes.io/managed-by: Helm | ||||
|     app.kubernetes.io/component: controller | ||||
|   name: ingress-nginx-controller | ||||
|   namespace: ingress-nginx | ||||
| spec: | ||||
|   selector: | ||||
|     matchLabels: | ||||
|       app.kubernetes.io/name: ingress-nginx | ||||
|       app.kubernetes.io/instance: ingress-nginx | ||||
|       app.kubernetes.io/component: controller | ||||
|   revisionHistoryLimit: 2 | ||||
|   updateStrategy:  | ||||
|     rollingUpdate: | ||||
|       maxUnavailable: 1 | ||||
|     type: RollingUpdate | ||||
|   minReadySeconds: 15 | ||||
|   template: | ||||
|     metadata: | ||||
|       annotations: | ||||
|         prometheus.io/port: "10254" | ||||
|         prometheus.io/scrape: "true" | ||||
|       labels: | ||||
|         app.kubernetes.io/name: ingress-nginx | ||||
|         app.kubernetes.io/instance: ingress-nginx | ||||
|         app.kubernetes.io/component: controller | ||||
|     spec: | ||||
|       dnsPolicy: ClusterFirstWithHostNet | ||||
|       containers: | ||||
|         - name: controller | ||||
|           image: "registry.k8s.io/ingress-nginx/controller:v1.5.1@sha256:4ba73c697770664c1e00e9f968de14e08f606ff961c76e5d7033a4a9c593c629" | ||||
|           imagePullPolicy: IfNotPresent | ||||
|           lifecycle:  | ||||
|             preStop: | ||||
|               exec: | ||||
|                 command: | ||||
|                 - /wait-shutdown | ||||
|           args: | ||||
|             - /nginx-ingress-controller | ||||
|             - --election-id=ingress-nginx-leader | ||||
|             - --controller-class=k8s.io/ingress-nginx | ||||
|             - --ingress-class=nginx | ||||
|             - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller | ||||
|           securityContext:  | ||||
|             capabilities: | ||||
|               drop: | ||||
|               - ALL | ||||
|               add: | ||||
|               - NET_BIND_SERVICE | ||||
|             runAsUser: 101 | ||||
|             allowPrivilegeEscalation: true | ||||
|           env: | ||||
|             - name: POD_NAME | ||||
|               valueFrom: | ||||
|                 fieldRef: | ||||
|                   fieldPath: metadata.name | ||||
|             - name: POD_NAMESPACE | ||||
|               valueFrom: | ||||
|                 fieldRef: | ||||
|                   fieldPath: metadata.namespace | ||||
|             - name: LD_PRELOAD | ||||
|               value: /usr/local/lib/libmimalloc.so | ||||
|             - name: NODE_NAME | ||||
|               valueFrom: | ||||
|                 fieldRef: | ||||
|                   fieldPath: metadata.name | ||||
|           livenessProbe:  | ||||
|             failureThreshold: 5 | ||||
|             httpGet: | ||||
|               path: /healthz | ||||
|               port: 10254 | ||||
|               scheme: HTTP | ||||
|             initialDelaySeconds: 15 | ||||
|             periodSeconds: 30 | ||||
|             successThreshold: 1 | ||||
|             timeoutSeconds: 1 | ||||
|           readinessProbe:  | ||||
|             failureThreshold: 3 | ||||
|             httpGet: | ||||
|               path: /healthz | ||||
|               port: 10254 | ||||
|               scheme: HTTP | ||||
|             initialDelaySeconds: 10 | ||||
|             periodSeconds: 30 | ||||
|             successThreshold: 1 | ||||
|             timeoutSeconds: 1 | ||||
|           ports: | ||||
|             - name: http | ||||
|               containerPort: 80 | ||||
|               protocol: TCP | ||||
|             - name: https | ||||
|               containerPort: 443 | ||||
|               protocol: TCP | ||||
|           resources:  | ||||
|             limits: | ||||
|               cpu: 1 | ||||
|               memory: 1Gi | ||||
|             requests: | ||||
|               cpu: 100m | ||||
|               memory: 128Mi | ||||
|       hostNetwork: true | ||||
|       nodeSelector:  | ||||
|         kubernetes.io/os: linux | ||||
|       affinity:  | ||||
|         nodeAffinity: | ||||
|           requiredDuringSchedulingIgnoredDuringExecution: | ||||
|             nodeSelectorTerms: | ||||
|             - matchExpressions: | ||||
|               - key: project.io/node-pool | ||||
|                 operator: In | ||||
|                 values: | ||||
|                 - web | ||||
|       serviceAccountName: ingress-nginx | ||||
|       terminationGracePeriodSeconds: 300 | ||||
| --- | ||||
| # Source: ingress-nginx/templates/controller-ingressclass.yaml | ||||
| # We don't support namespaced ingressClass yet | ||||
| # So a ClusterRole and a ClusterRoleBinding is required | ||||
| apiVersion: networking.k8s.io/v1 | ||||
| kind: IngressClass | ||||
| metadata: | ||||
|   labels: | ||||
|     helm.sh/chart: ingress-nginx-4.4.2 | ||||
|     app.kubernetes.io/name: ingress-nginx | ||||
|     app.kubernetes.io/instance: ingress-nginx | ||||
|     app.kubernetes.io/version: "1.5.1" | ||||
|     app.kubernetes.io/part-of: ingress-nginx | ||||
|     app.kubernetes.io/managed-by: Helm | ||||
|     app.kubernetes.io/component: controller | ||||
|   name: nginx | ||||
| spec: | ||||
|   controller: k8s.io/ingress-nginx | ||||
| @@ -1,116 +0,0 @@ | ||||
|  | ||||
| controller: | ||||
|   kind: DaemonSet | ||||
|  | ||||
|   hostNetwork: true | ||||
|   hostPort: | ||||
|     enabled: false | ||||
|     ports: | ||||
|       http: 80 | ||||
|       https: 443 | ||||
|  | ||||
|   dnsPolicy: ClusterFirstWithHostNet | ||||
|  | ||||
|   updateStrategy: | ||||
|     rollingUpdate: | ||||
|       maxUnavailable: 1 | ||||
|     type: RollingUpdate | ||||
|  | ||||
|   publishService: | ||||
|     enabled: false | ||||
|  | ||||
|   config: | ||||
|     worker-processes: "auto" | ||||
|     worker-cpu-affinity: "auto" | ||||
|     error-log-level: "error" | ||||
|  | ||||
|     server-tokens: "false" | ||||
|     http-redirect-code: "301" | ||||
|  | ||||
|     use-gzip: "true" | ||||
|     use-geoip: "false" | ||||
|     use-geoip2: "false" | ||||
|  | ||||
|     use-forwarded-headers: "true" | ||||
|     # curl https://www.cloudflare.com/ips-v4 2>/dev/null | tr '\n' ',' | ||||
|     proxy-real-ip-cidr: "173.245.48.0/20,103.21.244.0/22,103.22.200.0/22,103.31.4.0/22,141.101.64.0/18,108.162.192.0/18,190.93.240.0/20,188.114.96.0/20,197.234.240.0/22,198.41.128.0/17,162.158.0.0/15,172.64.0.0/13,131.0.72.0/22,104.16.0.0/13,104.24.0.0/14,172.16.0.0/12" | ||||
|  | ||||
|     enable-access-log-for-default-backend: "true" | ||||
|     log-format-escape-json: "true" | ||||
|     log-format-upstream: '{"ip":"$remote_addr", "ssl":"$ssl_protocol", "method":"$request_method", "proto":"$scheme", "host":"$host", "uri":"$request_uri", "status":$status, "size":$bytes_sent, "agent":"$http_user_agent", "referer":"$http_referer", "namespace":"$namespace"}' | ||||
|  | ||||
|     upstream-keepalive-connections: "32" | ||||
|     proxy-connect-timeout: "10" | ||||
|     proxy-read-timeout: "60" | ||||
|     proxy-send-timeout: "60" | ||||
|  | ||||
|     ssl-protocols: "TLSv1.3" | ||||
|     hsts: "true" | ||||
|     hsts-max-age: "31536000" | ||||
|     hsts-include-subdomains: "true" | ||||
|     hsts-preload: "true" | ||||
|     proxy-hide-headers: "strict-transport-security" | ||||
|     proxy-headers-hash-bucket-size: "128" | ||||
|  | ||||
|     server-name-hash-bucket-size: "64" | ||||
|     server-name-hash-max-size: "512" | ||||
|  | ||||
|     limit-req-status-code: "429" | ||||
|  | ||||
|     client-header-timeout: "30" | ||||
|     client-body-timeout: "30" | ||||
|  | ||||
|   minReadySeconds: 15 | ||||
|  | ||||
|   podAnnotations: | ||||
|     prometheus.io/scrape: "true" | ||||
|     prometheus.io/port: "10254" | ||||
|  | ||||
|   extraEnvs: | ||||
|     - name: NODE_NAME | ||||
|       valueFrom: | ||||
|         fieldRef: | ||||
|           fieldPath: metadata.name | ||||
|  | ||||
|   livenessProbe: | ||||
|     initialDelaySeconds: 15 | ||||
|     periodSeconds: 30 | ||||
|   readinessProbe: | ||||
|     periodSeconds: 30 | ||||
|  | ||||
|   resources: | ||||
|     limits: | ||||
|       cpu: 1 | ||||
|       memory: 1Gi | ||||
|     requests: | ||||
|       cpu: 100m | ||||
|       memory: 128Mi | ||||
|  | ||||
|   affinity: | ||||
|     nodeAffinity: | ||||
|       requiredDuringSchedulingIgnoredDuringExecution: | ||||
|         nodeSelectorTerms: | ||||
|           - matchExpressions: | ||||
|               - key: project.io/node-pool | ||||
|                 operator: In | ||||
|                 values: | ||||
|                   - web | ||||
|  | ||||
|   service: | ||||
|     enabled: true | ||||
|     type: ClusterIP | ||||
|     clusterIP: None | ||||
|     ipFamilyPolicy: "RequireDualStack" | ||||
|     ipFamilies: | ||||
|       - IPv4 | ||||
|       - IPv6 | ||||
|  | ||||
|   admissionWebhooks: | ||||
|     enabled: false | ||||
|   metrics: | ||||
|     enabled: false | ||||
|  | ||||
| revisionHistoryLimit: 2 | ||||
|  | ||||
| defaultBackend: | ||||
|   enabled: false | ||||
| @@ -1,250 +0,0 @@ | ||||
| apiVersion: v1 | ||||
| kind: Namespace | ||||
| metadata: | ||||
|   labels: | ||||
|     app.kubernetes.io/instance: kubelet-serving-cert-approver | ||||
|     app.kubernetes.io/name: kubelet-serving-cert-approver | ||||
|   name: kubelet-serving-cert-approver | ||||
| --- | ||||
| apiVersion: v1 | ||||
| kind: ServiceAccount | ||||
| metadata: | ||||
|   labels: | ||||
|     app.kubernetes.io/instance: kubelet-serving-cert-approver | ||||
|     app.kubernetes.io/name: kubelet-serving-cert-approver | ||||
|   name: kubelet-serving-cert-approver | ||||
|   namespace: kubelet-serving-cert-approver | ||||
| --- | ||||
| apiVersion: rbac.authorization.k8s.io/v1 | ||||
| kind: ClusterRole | ||||
| metadata: | ||||
|   labels: | ||||
|     app.kubernetes.io/instance: kubelet-serving-cert-approver | ||||
|     app.kubernetes.io/name: kubelet-serving-cert-approver | ||||
|   name: certificates:kubelet-serving-cert-approver | ||||
| rules: | ||||
| - apiGroups: | ||||
|   - certificates.k8s.io | ||||
|   resources: | ||||
|   - certificatesigningrequests | ||||
|   verbs: | ||||
|   - get | ||||
|   - list | ||||
|   - watch | ||||
| - apiGroups: | ||||
|   - certificates.k8s.io | ||||
|   resources: | ||||
|   - certificatesigningrequests/approval | ||||
|   verbs: | ||||
|   - update | ||||
| - apiGroups: | ||||
|   - authorization.k8s.io | ||||
|   resources: | ||||
|   - subjectaccessreviews | ||||
|   verbs: | ||||
|   - create | ||||
| - apiGroups: | ||||
|   - certificates.k8s.io | ||||
|   resourceNames: | ||||
|   - kubernetes.io/kubelet-serving | ||||
|   resources: | ||||
|   - signers | ||||
|   verbs: | ||||
|   - approve | ||||
| --- | ||||
| apiVersion: rbac.authorization.k8s.io/v1 | ||||
| kind: ClusterRole | ||||
| metadata: | ||||
|   labels: | ||||
|     app.kubernetes.io/instance: kubelet-serving-cert-approver | ||||
|     app.kubernetes.io/name: kubelet-serving-cert-approver | ||||
|   name: events:kubelet-serving-cert-approver | ||||
| rules: | ||||
| - apiGroups: | ||||
|   - "" | ||||
|   resources: | ||||
|   - events | ||||
|   verbs: | ||||
|   - create | ||||
|   - patch | ||||
| --- | ||||
| apiVersion: rbac.authorization.k8s.io/v1 | ||||
| kind: ClusterRole | ||||
| metadata: | ||||
|   labels: | ||||
|     app.kubernetes.io/instance: kubelet-serving-cert-approver | ||||
|     app.kubernetes.io/name: kubelet-serving-cert-approver | ||||
|   name: psp:kubelet-serving-cert-approver | ||||
| rules: | ||||
| - apiGroups: | ||||
|   - policy | ||||
|   resourceNames: | ||||
|   - kubelet-serving-cert-approver | ||||
|   resources: | ||||
|   - podsecuritypolicies | ||||
|   verbs: | ||||
|   - use | ||||
| --- | ||||
| apiVersion: rbac.authorization.k8s.io/v1 | ||||
| kind: RoleBinding | ||||
| metadata: | ||||
|   labels: | ||||
|     app.kubernetes.io/instance: kubelet-serving-cert-approver | ||||
|     app.kubernetes.io/name: kubelet-serving-cert-approver | ||||
|   name: events:kubelet-serving-cert-approver | ||||
|   namespace: default | ||||
| roleRef: | ||||
|   apiGroup: rbac.authorization.k8s.io | ||||
|   kind: ClusterRole | ||||
|   name: events:kubelet-serving-cert-approver | ||||
| subjects: | ||||
| - kind: ServiceAccount | ||||
|   name: kubelet-serving-cert-approver | ||||
|   namespace: kubelet-serving-cert-approver | ||||
| --- | ||||
| apiVersion: rbac.authorization.k8s.io/v1 | ||||
| kind: RoleBinding | ||||
| metadata: | ||||
|   labels: | ||||
|     app.kubernetes.io/instance: kubelet-serving-cert-approver | ||||
|     app.kubernetes.io/name: kubelet-serving-cert-approver | ||||
|   name: psp:kubelet-serving-cert-approver | ||||
|   namespace: kubelet-serving-cert-approver | ||||
| roleRef: | ||||
|   apiGroup: rbac.authorization.k8s.io | ||||
|   kind: ClusterRole | ||||
|   name: psp:kubelet-serving-cert-approver | ||||
| subjects: | ||||
| - kind: ServiceAccount | ||||
|   name: kubelet-serving-cert-approver | ||||
|   namespace: kubelet-serving-cert-approver | ||||
| --- | ||||
| apiVersion: rbac.authorization.k8s.io/v1 | ||||
| kind: ClusterRoleBinding | ||||
| metadata: | ||||
|   labels: | ||||
|     app.kubernetes.io/instance: kubelet-serving-cert-approver | ||||
|     app.kubernetes.io/name: kubelet-serving-cert-approver | ||||
|   name: kubelet-serving-cert-approver | ||||
| roleRef: | ||||
|   apiGroup: rbac.authorization.k8s.io | ||||
|   kind: ClusterRole | ||||
|   name: certificates:kubelet-serving-cert-approver | ||||
| subjects: | ||||
| - kind: ServiceAccount | ||||
|   name: kubelet-serving-cert-approver | ||||
|   namespace: kubelet-serving-cert-approver | ||||
| --- | ||||
| apiVersion: v1 | ||||
| kind: Service | ||||
| metadata: | ||||
|   labels: | ||||
|     app.kubernetes.io/instance: kubelet-serving-cert-approver | ||||
|     app.kubernetes.io/name: kubelet-serving-cert-approver | ||||
|   name: kubelet-serving-cert-approver | ||||
|   namespace: kubelet-serving-cert-approver | ||||
| spec: | ||||
|   ports: | ||||
|   - name: metrics | ||||
|     port: 9090 | ||||
|     protocol: TCP | ||||
|     targetPort: metrics | ||||
|   selector: | ||||
|     app.kubernetes.io/instance: kubelet-serving-cert-approver | ||||
|     app.kubernetes.io/name: kubelet-serving-cert-approver | ||||
| --- | ||||
| apiVersion: apps/v1 | ||||
| kind: Deployment | ||||
| metadata: | ||||
|   labels: | ||||
|     app.kubernetes.io/instance: kubelet-serving-cert-approver | ||||
|     app.kubernetes.io/name: kubelet-serving-cert-approver | ||||
|   name: kubelet-serving-cert-approver | ||||
|   namespace: kubelet-serving-cert-approver | ||||
| spec: | ||||
|   replicas: 1 | ||||
|   selector: | ||||
|     matchLabels: | ||||
|       app.kubernetes.io/instance: kubelet-serving-cert-approver | ||||
|       app.kubernetes.io/name: kubelet-serving-cert-approver | ||||
|   template: | ||||
|     metadata: | ||||
|       labels: | ||||
|         app.kubernetes.io/instance: kubelet-serving-cert-approver | ||||
|         app.kubernetes.io/name: kubelet-serving-cert-approver | ||||
|     spec: | ||||
|       tolerations: | ||||
|         - key: "node.cloudprovider.kubernetes.io/uninitialized" | ||||
|           value: "true" | ||||
|           effect: NoSchedule | ||||
|         - key: "CriticalAddonsOnly" | ||||
|           operator: Exists | ||||
|         - key: "node-role.kubernetes.io/master" | ||||
|           effect: NoSchedule | ||||
|       affinity: | ||||
|         nodeAffinity: | ||||
|           preferredDuringSchedulingIgnoredDuringExecution: | ||||
|           - preference: | ||||
|               matchExpressions: | ||||
|               - key: node-role.kubernetes.io/master | ||||
|                 operator: DoesNotExist | ||||
|               - key: node-role.kubernetes.io/control-plane | ||||
|                 operator: DoesNotExist | ||||
|             weight: 100 | ||||
|       containers: | ||||
|       - args: | ||||
|         - serve | ||||
|         env: | ||||
|         - name: NAMESPACE | ||||
|           valueFrom: | ||||
|             fieldRef: | ||||
|               fieldPath: metadata.namespace | ||||
|         image: ghcr.io/alex1989hu/kubelet-serving-cert-approver:main | ||||
|         imagePullPolicy: IfNotPresent | ||||
|         livenessProbe: | ||||
|           httpGet: | ||||
|             path: /healthz | ||||
|             port: health | ||||
|           initialDelaySeconds: 6 | ||||
|         name: cert-approver | ||||
|         ports: | ||||
|         - containerPort: 8080 | ||||
|           name: health | ||||
|         - containerPort: 9090 | ||||
|           name: metrics | ||||
|         readinessProbe: | ||||
|           httpGet: | ||||
|             path: /readyz | ||||
|             port: health | ||||
|           initialDelaySeconds: 3 | ||||
|         resources: | ||||
|           limits: | ||||
|             cpu: 250m | ||||
|             memory: 32Mi | ||||
|           requests: | ||||
|             cpu: 10m | ||||
|             memory: 16Mi | ||||
|         securityContext: | ||||
|           allowPrivilegeEscalation: false | ||||
|           capabilities: | ||||
|             drop: | ||||
|             - ALL | ||||
|           privileged: false | ||||
|           readOnlyRootFilesystem: true | ||||
|           runAsNonRoot: true | ||||
|       priorityClassName: system-cluster-critical | ||||
|       securityContext: | ||||
|         fsGroup: 65534 | ||||
|         runAsGroup: 65534 | ||||
|         runAsUser: 65534 | ||||
|       serviceAccountName: kubelet-serving-cert-approver | ||||
|       tolerations: | ||||
|       - effect: NoSchedule | ||||
|         key: node-role.kubernetes.io/master | ||||
|         operator: Exists | ||||
|       - effect: NoSchedule | ||||
|         key: node-role.kubernetes.io/control-plane | ||||
|         operator: Exists | ||||
|       - effect: NoSchedule | ||||
|         key: node.cloudprovider.kubernetes.io/uninitialized | ||||
|         operator: Exists | ||||
| @@ -1,140 +0,0 @@ | ||||
| apiVersion: v1 | ||||
| kind: Namespace | ||||
| metadata: | ||||
|   name: local-path-storage | ||||
|  | ||||
| --- | ||||
| apiVersion: v1 | ||||
| kind: ServiceAccount | ||||
| metadata: | ||||
|   name: local-path-provisioner-service-account | ||||
|   namespace: local-path-storage | ||||
|  | ||||
| --- | ||||
| apiVersion: rbac.authorization.k8s.io/v1 | ||||
| kind: ClusterRole | ||||
| metadata: | ||||
|   name: local-path-provisioner-role | ||||
| rules: | ||||
|   - apiGroups: [ "" ] | ||||
|     resources: [ "nodes", "persistentvolumeclaims", "configmaps" ] | ||||
|     verbs: [ "get", "list", "watch" ] | ||||
|   - apiGroups: [ "" ] | ||||
|     resources: [ "endpoints", "persistentvolumes", "pods" ] | ||||
|     verbs: [ "*" ] | ||||
|   - apiGroups: [ "" ] | ||||
|     resources: [ "events" ] | ||||
|     verbs: [ "create", "patch" ] | ||||
|   - apiGroups: [ "storage.k8s.io" ] | ||||
|     resources: [ "storageclasses" ] | ||||
|     verbs: [ "get", "list", "watch" ] | ||||
|  | ||||
| --- | ||||
| apiVersion: rbac.authorization.k8s.io/v1 | ||||
| kind: ClusterRoleBinding | ||||
| metadata: | ||||
|   name: local-path-provisioner-bind | ||||
| roleRef: | ||||
|   apiGroup: rbac.authorization.k8s.io | ||||
|   kind: ClusterRole | ||||
|   name: local-path-provisioner-role | ||||
| subjects: | ||||
|   - kind: ServiceAccount | ||||
|     name: local-path-provisioner-service-account | ||||
|     namespace: local-path-storage | ||||
|  | ||||
| --- | ||||
| apiVersion: apps/v1 | ||||
| kind: Deployment | ||||
| metadata: | ||||
|   name: local-path-provisioner | ||||
|   namespace: local-path-storage | ||||
| spec: | ||||
|   replicas: 1 | ||||
|   selector: | ||||
|     matchLabels: | ||||
|       app: local-path-provisioner | ||||
|   template: | ||||
|     metadata: | ||||
|       labels: | ||||
|         app: local-path-provisioner | ||||
|     spec: | ||||
|       nodeSelector: | ||||
|         node-role.kubernetes.io/control-plane: "" | ||||
|       tolerations: | ||||
|         - key: "node-role.kubernetes.io/control-plane" | ||||
|           effect: NoSchedule | ||||
|       serviceAccountName: local-path-provisioner-service-account | ||||
|       containers: | ||||
|         - name: local-path-provisioner | ||||
|           image: rancher/local-path-provisioner:v0.0.23 | ||||
|           imagePullPolicy: IfNotPresent | ||||
|           command: | ||||
|             - local-path-provisioner | ||||
|             - --debug | ||||
|             - start | ||||
|             - --config | ||||
|             - /etc/config/config.json | ||||
|           volumeMounts: | ||||
|             - name: config-volume | ||||
|               mountPath: /etc/config/ | ||||
|           env: | ||||
|             - name: POD_NAMESPACE | ||||
|               valueFrom: | ||||
|                 fieldRef: | ||||
|                   fieldPath: metadata.namespace | ||||
|       volumes: | ||||
|         - name: config-volume | ||||
|           configMap: | ||||
|             name: local-path-config | ||||
|  | ||||
| --- | ||||
| apiVersion: storage.k8s.io/v1 | ||||
| kind: StorageClass | ||||
| metadata: | ||||
|   name: local-path | ||||
|   annotations: | ||||
|     storageclass.kubernetes.io/is-default-class: "true" | ||||
| provisioner: rancher.io/local-path | ||||
| volumeBindingMode: WaitForFirstConsumer | ||||
| reclaimPolicy: Delete | ||||
|  | ||||
| --- | ||||
| kind: ConfigMap | ||||
| apiVersion: v1 | ||||
| metadata: | ||||
|   name: local-path-config | ||||
|   namespace: local-path-storage | ||||
| data: | ||||
|   config.json: |- | ||||
|     { | ||||
|             "nodePathMap":[ | ||||
|             { | ||||
|                     "node":"DEFAULT_PATH_FOR_NON_LISTED_NODES", | ||||
|                     "paths":["/var/data"] | ||||
|             } | ||||
|             ] | ||||
|     } | ||||
|   setup: |- | ||||
|     #!/bin/sh | ||||
|     set -eu | ||||
|     mkdir -m 0777 -p "$VOL_DIR" | ||||
|   teardown: |- | ||||
|     #!/bin/sh | ||||
|     set -eu | ||||
|     rm -rf "$VOL_DIR" | ||||
|   helperPod.yaml: |- | ||||
|     apiVersion: v1 | ||||
|     kind: Pod | ||||
|     metadata: | ||||
|       name: helper-pod | ||||
|     spec: | ||||
|       priorityClassName: system-node-critical | ||||
|       tolerations: | ||||
|       - key: node.kubernetes.io/disk-pressure | ||||
|         operator: Exists | ||||
|         effect: NoSchedule | ||||
|       containers: | ||||
|       - name: helper-pod | ||||
|         image: busybox | ||||
|         imagePullPolicy: IfNotPresent | ||||
| @@ -1,197 +0,0 @@ | ||||
| apiVersion: v1 | ||||
| kind: ServiceAccount | ||||
| metadata: | ||||
|   labels: | ||||
|     k8s-app: metrics-server | ||||
|   name: metrics-server | ||||
|   namespace: kube-system | ||||
| --- | ||||
| apiVersion: rbac.authorization.k8s.io/v1 | ||||
| kind: ClusterRole | ||||
| metadata: | ||||
|   labels: | ||||
|     k8s-app: metrics-server | ||||
|     rbac.authorization.k8s.io/aggregate-to-admin: "true" | ||||
|     rbac.authorization.k8s.io/aggregate-to-edit: "true" | ||||
|     rbac.authorization.k8s.io/aggregate-to-view: "true" | ||||
|   name: system:aggregated-metrics-reader | ||||
| rules: | ||||
| - apiGroups: | ||||
|   - metrics.k8s.io | ||||
|   resources: | ||||
|   - pods | ||||
|   - nodes | ||||
|   verbs: | ||||
|   - get | ||||
|   - list | ||||
|   - watch | ||||
| --- | ||||
| apiVersion: rbac.authorization.k8s.io/v1 | ||||
| kind: ClusterRole | ||||
| metadata: | ||||
|   labels: | ||||
|     k8s-app: metrics-server | ||||
|   name: system:metrics-server | ||||
| rules: | ||||
| - apiGroups: | ||||
|   - "" | ||||
|   resources: | ||||
|   - pods | ||||
|   - nodes | ||||
|   - nodes/stats | ||||
|   - namespaces | ||||
|   - configmaps | ||||
|   verbs: | ||||
|   - get | ||||
|   - list | ||||
|   - watch | ||||
| --- | ||||
| apiVersion: rbac.authorization.k8s.io/v1 | ||||
| kind: RoleBinding | ||||
| metadata: | ||||
|   labels: | ||||
|     k8s-app: metrics-server | ||||
|   name: metrics-server-auth-reader | ||||
|   namespace: kube-system | ||||
| roleRef: | ||||
|   apiGroup: rbac.authorization.k8s.io | ||||
|   kind: Role | ||||
|   name: extension-apiserver-authentication-reader | ||||
| subjects: | ||||
| - kind: ServiceAccount | ||||
|   name: metrics-server | ||||
|   namespace: kube-system | ||||
| --- | ||||
| apiVersion: rbac.authorization.k8s.io/v1 | ||||
| kind: ClusterRoleBinding | ||||
| metadata: | ||||
|   labels: | ||||
|     k8s-app: metrics-server | ||||
|   name: metrics-server:system:auth-delegator | ||||
| roleRef: | ||||
|   apiGroup: rbac.authorization.k8s.io | ||||
|   kind: ClusterRole | ||||
|   name: system:auth-delegator | ||||
| subjects: | ||||
| - kind: ServiceAccount | ||||
|   name: metrics-server | ||||
|   namespace: kube-system | ||||
| --- | ||||
| apiVersion: rbac.authorization.k8s.io/v1 | ||||
| kind: ClusterRoleBinding | ||||
| metadata: | ||||
|   labels: | ||||
|     k8s-app: metrics-server | ||||
|   name: system:metrics-server | ||||
| roleRef: | ||||
|   apiGroup: rbac.authorization.k8s.io | ||||
|   kind: ClusterRole | ||||
|   name: system:metrics-server | ||||
| subjects: | ||||
| - kind: ServiceAccount | ||||
|   name: metrics-server | ||||
|   namespace: kube-system | ||||
| --- | ||||
| apiVersion: v1 | ||||
| kind: Service | ||||
| metadata: | ||||
|   labels: | ||||
|     k8s-app: metrics-server | ||||
|   name: metrics-server | ||||
|   namespace: kube-system | ||||
| spec: | ||||
|   ports: | ||||
|   - name: https | ||||
|     port: 443 | ||||
|     protocol: TCP | ||||
|     targetPort: https | ||||
|   selector: | ||||
|     k8s-app: metrics-server | ||||
| --- | ||||
| apiVersion: apps/v1 | ||||
| kind: Deployment | ||||
| metadata: | ||||
|   labels: | ||||
|     k8s-app: metrics-server | ||||
|   name: metrics-server | ||||
|   namespace: kube-system | ||||
| spec: | ||||
|   selector: | ||||
|     matchLabels: | ||||
|       k8s-app: metrics-server | ||||
|   strategy: | ||||
|     rollingUpdate: | ||||
|       maxUnavailable: 0 | ||||
|   template: | ||||
|     metadata: | ||||
|       labels: | ||||
|         k8s-app: metrics-server | ||||
|     spec: | ||||
|       nodeSelector: | ||||
|         kubernetes.io/os: linux | ||||
|         node-role.kubernetes.io/control-plane: "" | ||||
|       tolerations: | ||||
|         - key: "node-role.kubernetes.io/control-plane" | ||||
|           effect: NoSchedule | ||||
|       containers: | ||||
|       - args: | ||||
|         - --cert-dir=/tmp | ||||
|         - --secure-port=6443 | ||||
|         - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname | ||||
|         - --kubelet-use-node-status-port | ||||
|         - --metric-resolution=15s | ||||
|         - --authorization-always-allow-paths=/metrics | ||||
|         image: k8s.gcr.io/metrics-server/metrics-server:v0.5.0 | ||||
|         imagePullPolicy: IfNotPresent | ||||
|         livenessProbe: | ||||
|           failureThreshold: 3 | ||||
|           httpGet: | ||||
|             path: /livez | ||||
|             port: https | ||||
|             scheme: HTTPS | ||||
|           periodSeconds: 10 | ||||
|         name: metrics-server | ||||
|         ports: | ||||
|         - containerPort: 6443 | ||||
|           name: https | ||||
|           protocol: TCP | ||||
|         readinessProbe: | ||||
|           failureThreshold: 3 | ||||
|           httpGet: | ||||
|             path: /readyz | ||||
|             port: https | ||||
|             scheme: HTTPS | ||||
|           initialDelaySeconds: 20 | ||||
|           periodSeconds: 10 | ||||
|         resources: | ||||
|           requests: | ||||
|             cpu: 100m | ||||
|             memory: 200Mi | ||||
|         securityContext: | ||||
|           readOnlyRootFilesystem: true | ||||
|           runAsNonRoot: true | ||||
|           runAsUser: 1000 | ||||
|         volumeMounts: | ||||
|         - mountPath: /tmp | ||||
|           name: tmp-dir | ||||
|       priorityClassName: system-cluster-critical | ||||
|       serviceAccountName: metrics-server | ||||
|       volumes: | ||||
|       - emptyDir: {} | ||||
|         name: tmp-dir | ||||
| --- | ||||
| apiVersion: apiregistration.k8s.io/v1 | ||||
| kind: APIService | ||||
| metadata: | ||||
|   labels: | ||||
|     k8s-app: metrics-server | ||||
|   name: v1beta1.metrics.k8s.io | ||||
| spec: | ||||
|   group: metrics.k8s.io | ||||
|   groupPriorityMinimum: 100 | ||||
|   insecureSkipTLSVerify: true | ||||
|   service: | ||||
|     name: metrics-server | ||||
|     namespace: kube-system | ||||
|   version: v1beta1 | ||||
|   versionPriority: 100 | ||||
| @@ -14,10 +14,10 @@ regions         = ["uksouth", "ukwest", "westeurope"] | ||||
| ## Init and upload images | ||||
|  | ||||
| ```shell | ||||
| wget -q https://github.com/siderolabs/talos/releases/download/v1.3.4/azure-amd64.tar.gz | ||||
| wget -q https://github.com/siderolabs/talos/releases/download/v1.4.5/azure-amd64.tar.gz | ||||
| tar -xzf azure-amd64.tar.gz && mv disk.vhd disk-x64.vhd | ||||
|  | ||||
| wget -q https://github.com/siderolabs/talos/releases/download/v1.3.4/azure-arm64.tar.gz | ||||
| wget -q https://github.com/siderolabs/talos/releases/download/v1.4.5/azure-arm64.tar.gz | ||||
| tar -xzf azure-arm64.tar.gz && mv disk.vhd disk-arm64.vhd | ||||
|  | ||||
| terraform init && terraform apply | ||||
|   | ||||
| @@ -24,7 +24,7 @@ variable "name" { | ||||
| variable "release" { | ||||
|   description = "The image name" | ||||
|   type        = string | ||||
|   default     = "1.3.4" | ||||
|   default     = "1.4.5" | ||||
| } | ||||
|  | ||||
| variable "arch" { | ||||
|   | ||||
| @@ -3,8 +3,8 @@ terraform { | ||||
|   required_providers { | ||||
|     azurerm = { | ||||
|       source  = "hashicorp/azurerm" | ||||
|       version = "~> 3.43.0" | ||||
|       version = "~> 3.61.0" | ||||
|     } | ||||
|   } | ||||
|   required_version = ">= 1.2" | ||||
|   required_version = ">= 1.5" | ||||
| } | ||||
|   | ||||
| @@ -11,59 +11,195 @@ resource "azurerm_availability_set" "controlplane" { | ||||
|   tags = merge(var.tags, { type = "infra" }) | ||||
| } | ||||
|  | ||||
| module "controlplane" { | ||||
|   source          = "./modules/controlplane" | ||||
|   for_each        = { for idx, name in local.regions : name => idx } | ||||
|   region          = each.key | ||||
|   subscription_id = local.subscription_id | ||||
| locals { | ||||
|   controlplane_labels = "kubernetes.azure.com/managed=false" | ||||
|  | ||||
|   instance_availability_set = azurerm_availability_set.controlplane[each.key].id | ||||
|   instance_count            = lookup(try(var.controlplane[each.key], {}), "count", 0) | ||||
|   instance_resource_group   = local.resource_group | ||||
|   instance_type             = lookup(try(var.controlplane[each.key], {}), "instance_type", "Standard_B2ms") | ||||
|   instance_image            = data.azurerm_shared_image_version.talos.id | ||||
|   instance_tags             = merge(var.tags, { type = "infra" }) | ||||
|   instance_secgroup         = local.network_secgroup[each.key].controlplane | ||||
|   instance_role_definition  = var.ccm_role_definition | ||||
|   instance_params = merge(var.kubernetes, { | ||||
|     lbv4   = local.network_controlplane[each.key].controlplane_lb[0] | ||||
|     lbv6   = try(local.network_controlplane[each.key].controlplane_lb[1], "") | ||||
|     region = each.key | ||||
|   controlplanes = { for k in flatten([ | ||||
|     for region in local.regions : [ | ||||
|       for inx in range(lookup(try(var.controlplane[region], {}), "count", 0)) : { | ||||
|         inx : inx | ||||
|         name : "controlplane-${region}-${1 + inx}" | ||||
|         region : region | ||||
|         availability_set : azurerm_availability_set.controlplane[region].id | ||||
|  | ||||
|     ccm = templatefile("${path.module}/deployments/azure.json.tpl", { | ||||
|       subscriptionId = local.subscription_id | ||||
|       tenantId       = data.azurerm_client_config.terraform.tenant_id | ||||
|       region         = each.key | ||||
|       resourceGroup  = local.resource_group | ||||
|       vnetName       = local.network[each.key].name | ||||
|       tags           = join(",", [for k, v in var.tags : "${k}=${v}"]) | ||||
|     }) | ||||
|   }) | ||||
|         image : data.azurerm_shared_image_version.talos[startswith(lookup(try(var.controlplane[region], {}), "type", ""), "Standard_D2p") ? "Arm64" : "x64"].id | ||||
|         type : lookup(try(var.controlplane[region], {}), "type", "Standard_B2ms") | ||||
|  | ||||
|   network_internal = local.network_controlplane[each.key] | ||||
|         ip : 11 + inx | ||||
|         secgroup : local.network_secgroup[region].controlplane | ||||
|         network : local.network_controlplane[region] | ||||
|         vnetName : local.network[region].name | ||||
|       } | ||||
|     ] | ||||
|   ]) : k.name => k } | ||||
|  | ||||
|   lbv4s = [for ip in flatten([for c in local.network_controlplane : c.controlplane_lb]) : ip if length(split(".", ip)) > 1] | ||||
|   lbv6s = [for ip in flatten([for c in local.network_controlplane : c.controlplane_lb]) : ip if length(split(":", ip)) > 1] | ||||
| } | ||||
|  | ||||
| resource "local_file" "azure" { | ||||
|   content = templatefile("${path.module}/deployments/azure-as.json.tpl", { | ||||
|     subscriptionId = local.subscription_id | ||||
|     tenantId       = data.azurerm_client_config.terraform.tenant_id | ||||
|     region         = local.regions[0] | ||||
|     resourceGroup  = local.resource_group | ||||
|     tags           = join(",", [for k, v in var.tags : "${k}=${v}"]) | ||||
|   }) | ||||
|   filename        = "_cfgs/azure.json" | ||||
| resource "azurerm_public_ip" "controlplane_v4" { | ||||
|   for_each                = local.controlplanes | ||||
|   name                    = "${each.value.name}-v4" | ||||
|   resource_group_name     = local.resource_group | ||||
|   location                = each.value.region | ||||
|   ip_version              = "IPv4" | ||||
|   sku                     = each.value.network.sku | ||||
|   allocation_method       = each.value.network.sku == "Standard" ? "Static" : "Dynamic" | ||||
|   idle_timeout_in_minutes = 15 | ||||
|  | ||||
|   tags = merge(var.tags, { type = "infra" }) | ||||
| } | ||||
|  | ||||
| resource "azurerm_public_ip" "controlplane_v6" { | ||||
|   for_each                = { for k, v in local.controlplanes : k => v if v.network.sku == "Standard" } | ||||
|   name                    = "${each.value.name}-v6" | ||||
|   resource_group_name     = local.resource_group | ||||
|   location                = each.value.region | ||||
|   ip_version              = "IPv6" | ||||
|   sku                     = each.value.network.sku | ||||
|   allocation_method       = "Static" | ||||
|   idle_timeout_in_minutes = 15 | ||||
|  | ||||
|   tags = merge(var.tags, { type = "infra" }) | ||||
| } | ||||
|  | ||||
| resource "azurerm_network_interface" "controlplane" { | ||||
|   for_each            = local.controlplanes | ||||
|   name                = each.value.name | ||||
|   resource_group_name = local.resource_group | ||||
|   location            = each.value.region | ||||
|  | ||||
|   dynamic "ip_configuration" { | ||||
|     for_each = each.value.network.cidr | ||||
|  | ||||
|     content { | ||||
|       name                          = "${each.value.name}-v${length(split(".", ip_configuration.value)) > 1 ? "4" : "6"}" | ||||
|       primary                       = length(split(".", ip_configuration.value)) > 1 | ||||
|       subnet_id                     = each.value.network.network_id | ||||
|       private_ip_address            = cidrhost(ip_configuration.value, each.value.ip) | ||||
|       private_ip_address_version    = length(split(".", ip_configuration.value)) > 1 ? "IPv4" : "IPv6" | ||||
|       private_ip_address_allocation = "Static" | ||||
|       public_ip_address_id          = length(split(".", ip_configuration.value)) > 1 ? azurerm_public_ip.controlplane_v4[each.key].id : try(azurerm_public_ip.controlplane_v6[each.key].id, null) | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   tags = merge(var.tags, { type = "infra" }) | ||||
| } | ||||
|  | ||||
| resource "azurerm_network_interface_security_group_association" "controlplane" { | ||||
|   for_each                  = { for k, v in local.controlplanes : k => v if length(v.secgroup) > 0 } | ||||
|   network_interface_id      = azurerm_network_interface.controlplane[each.key].id | ||||
|   network_security_group_id = each.value.secgroup | ||||
| } | ||||
|  | ||||
| # Different basic sku and standard sku load balancer or public Ip resources in availability set is not allowed | ||||
| resource "azurerm_network_interface_backend_address_pool_association" "controlplane_v4" { | ||||
|   for_each                = { for k, v in local.controlplanes : k => v if length(v.network.controlplane_pool_v4) > 0 } | ||||
|   network_interface_id    = azurerm_network_interface.controlplane[each.key].id | ||||
|   ip_configuration_name   = "${each.value.name}-v4" | ||||
|   backend_address_pool_id = local.network_controlplane[each.value.region].controlplane_pool_v4 | ||||
| } | ||||
|  | ||||
| resource "azurerm_network_interface_backend_address_pool_association" "controlplane_v6" { | ||||
|   for_each                = { for k, v in local.controlplanes : k => v if length(v.network.controlplane_pool_v6) > 0 } | ||||
|   network_interface_id    = azurerm_network_interface.controlplane[each.key].id | ||||
|   ip_configuration_name   = "${each.value.name}-v6" | ||||
|   backend_address_pool_id = local.network_controlplane[each.value.region].controlplane_pool_v6 | ||||
| } | ||||
|  | ||||
| resource "local_file" "controlplane" { | ||||
|   for_each = local.controlplanes | ||||
|  | ||||
|   content = templatefile("${path.module}/templates/controlplane.yaml.tpl", | ||||
|     merge(var.kubernetes, { | ||||
|       name   = each.value.name | ||||
|       labels = local.controlplane_labels | ||||
|       certSANs = flatten([ | ||||
|         var.kubernetes["apiDomain"], | ||||
|         each.value.network.controlplane_lb, | ||||
|         azurerm_public_ip.controlplane_v4[each.key].ip_address, | ||||
|       ]) | ||||
|       ipAliases   = compact(each.value.network.controlplane_lb) | ||||
|       nodeSubnets = [cidrsubnet(each.value.network.cidr[0], 1, 0), "!${each.value.network.controlplane_lb[0]}"] | ||||
|  | ||||
|       ccm = templatefile("${path.module}/deployments/azure.json.tpl", { | ||||
|         subscriptionId = local.subscription_id | ||||
|         tenantId       = data.azurerm_client_config.terraform.tenant_id | ||||
|         region         = local.regions[0] # each.value.region | ||||
|         resourceGroup  = local.resource_group | ||||
|         vnetName       = local.network[each.value.region].name | ||||
|         tags           = join(",", [for k, v in var.tags : "${k}=${v}"]) | ||||
|       }) | ||||
|     }) | ||||
|   ) | ||||
|   filename        = "_cfgs/${each.value.name}.yaml" | ||||
|   file_permission = "0600" | ||||
| } | ||||
|  | ||||
| resource "azurerm_linux_virtual_machine" "controlplane" { | ||||
|   for_each                   = local.controlplanes | ||||
|   name                       = each.value.name | ||||
|   computer_name              = each.value.name | ||||
|   resource_group_name        = local.resource_group | ||||
|   location                   = each.value.region | ||||
|   size                       = each.value.type | ||||
|   allow_extension_operations = false | ||||
|   provision_vm_agent         = false | ||||
|   availability_set_id        = each.value.availability_set | ||||
|   network_interface_ids      = [azurerm_network_interface.controlplane[each.key].id] | ||||
|  | ||||
|   identity { | ||||
|     type = "SystemAssigned" | ||||
|   } | ||||
|  | ||||
|   # vtpm_enabled               = false | ||||
|   # encryption_at_host_enabled = true | ||||
|   os_disk { | ||||
|     name                 = each.value.name | ||||
|     caching              = "ReadOnly" | ||||
|     storage_account_type = "Premium_LRS" | ||||
|     disk_size_gb         = 48 | ||||
|   } | ||||
|  | ||||
|   admin_username = "talos" | ||||
|   admin_ssh_key { | ||||
|     username   = "talos" | ||||
|     public_key = file("~/.ssh/terraform.pub") | ||||
|   } | ||||
|  | ||||
|   source_image_id = length(each.value.image) > 0 ? each.value.image : null | ||||
|   dynamic "source_image_reference" { | ||||
|     for_each = length(each.value.image) == 0 ? ["gallery"] : [] | ||||
|     content { | ||||
|       publisher = "talos" | ||||
|       offer     = "Talos" | ||||
|       sku       = "MPL-2.0" | ||||
|       version   = "latest" | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   tags = merge(var.tags, { type = "infra" }) | ||||
|  | ||||
|   boot_diagnostics {} | ||||
|   lifecycle { | ||||
|     ignore_changes = [admin_username, admin_ssh_key, os_disk, custom_data, source_image_id, tags] | ||||
|   } | ||||
| } | ||||
|  | ||||
| resource "azurerm_role_assignment" "controlplane" { | ||||
|   for_each             = local.controlplanes | ||||
|   scope                = "/subscriptions/${local.subscription_id}" | ||||
|   role_definition_name = var.controlplane_role_definition | ||||
|   principal_id         = azurerm_linux_virtual_machine.controlplane[each.key].identity[0].principal_id | ||||
| } | ||||
|  | ||||
| locals { | ||||
|   lbv4s    = [for ip in flatten([for c in local.network_controlplane : c.controlplane_lb]) : ip if length(split(".", ip)) > 1] | ||||
|   lbv6s    = [for ip in flatten([for c in local.network_controlplane : c.controlplane_lb]) : ip if length(split(":", ip)) > 1] | ||||
|   endpoint = try(flatten([for c in module.controlplane : c.controlplane_endpoints])[0], "") | ||||
|   controlplane_endpoints = try([for ip in azurerm_public_ip.controlplane_v4 : ip.ip_address if ip.ip_address != ""], []) | ||||
| } | ||||
|  | ||||
| resource "azurerm_private_dns_a_record" "controlplane" { | ||||
|   for_each            = toset(values({ for zone, name in local.network : zone => name.dns if name.dns != "" })) | ||||
|   name                = "controlplane" | ||||
|   name                = split(".", var.kubernetes["apiDomain"])[0] | ||||
|   resource_group_name = local.resource_group | ||||
|   zone_name           = each.key | ||||
|   ttl                 = 300 | ||||
| @@ -74,7 +210,7 @@ resource "azurerm_private_dns_a_record" "controlplane" { | ||||
|  | ||||
| resource "azurerm_private_dns_aaaa_record" "controlplane" { | ||||
|   for_each            = toset(values({ for zone, name in local.network : zone => name.dns if name.dns != "" && length(local.lbv6s) > 0 })) | ||||
|   name                = "controlplane" | ||||
|   name                = split(".", var.kubernetes["apiDomain"])[0] | ||||
|   resource_group_name = local.resource_group | ||||
|   zone_name           = each.key | ||||
|   ttl                 = 300 | ||||
| @@ -84,12 +220,14 @@ resource "azurerm_private_dns_aaaa_record" "controlplane" { | ||||
| } | ||||
|  | ||||
| resource "azurerm_private_dns_a_record" "controlplane_zonal" { | ||||
|   for_each            = { for idx, name in local.regions : name => idx if lookup(try(var.controlplane[name], {}), "count", 0) > 1 && local.network[name].dns != "" } | ||||
|   name                = "controlplane-${each.key}" | ||||
|   for_each            = { for idx, region in local.regions : region => idx if local.network[region].dns != "" } | ||||
|   name                = "${split(".", var.kubernetes["apiDomain"])[0]}-${each.key}" | ||||
|   resource_group_name = local.resource_group | ||||
|   zone_name           = local.network[each.key].dns | ||||
|   ttl                 = 300 | ||||
|   records             = flatten(module.controlplane[each.key].controlplane_endpoints) | ||||
|   records = flatten([for cp in azurerm_network_interface.controlplane : | ||||
|     [for ip in cp.ip_configuration : ip.private_ip_address if ip.private_ip_address_version == "IPv4"] if cp.location == each.key | ||||
|   ]) | ||||
|  | ||||
|   tags = merge(var.tags, { type = "infra" }) | ||||
| } | ||||
|   | ||||
| @@ -20,25 +20,22 @@ resource "azurerm_linux_virtual_machine_scale_set" "db" { | ||||
|     name                      = "db-${lower(each.key)}" | ||||
|     primary                   = true | ||||
|     network_security_group_id = local.network_secgroup[each.key].common | ||||
|  | ||||
|     ip_configuration { | ||||
|       name      = "db-${lower(each.key)}-v4" | ||||
|       primary   = true | ||||
|       version   = "IPv4" | ||||
|       subnet_id = local.network_public[each.key].network_id | ||||
|       public_ip_address { | ||||
|         name    = "db-${lower(each.key)}-v4" | ||||
|         version = "IPv4" | ||||
|       } | ||||
|       subnet_id = local.network_private[each.key].network_id | ||||
|     } | ||||
|     ip_configuration { | ||||
|       name      = "db-${lower(each.key)}-v6" | ||||
|       version   = "IPv6" | ||||
|       subnet_id = local.network_public[each.key].network_id | ||||
|       subnet_id = local.network_private[each.key].network_id | ||||
|  | ||||
|       dynamic "public_ip_address" { | ||||
|         for_each = local.network_public[each.key].sku == "Standard" ? ["IPv6"] : [] | ||||
|         content { | ||||
|           name    = "worker-${lower(each.key)}-v6" | ||||
|           name    = "db-${lower(each.key)}-v6" | ||||
|           version = public_ip_address.value | ||||
|         } | ||||
|       } | ||||
| @@ -65,7 +62,7 @@ resource "azurerm_linux_virtual_machine_scale_set" "db" { | ||||
|     disk_size_gb         = 50 | ||||
|   } | ||||
|  | ||||
|   source_image_id = data.azurerm_shared_image_version.talos.id | ||||
|   source_image_id = data.azurerm_shared_image_version.talos[startswith(lookup(try(var.instances[each.key], {}), "db_type", ""), "Standard_D2p") ? "Arm64" : "x64"].id | ||||
|   #   source_image_reference { | ||||
|   #     publisher = "talos" | ||||
|   #     offer     = "Talos" | ||||
|   | ||||
| @@ -17,11 +17,11 @@ resource "azurerm_linux_virtual_machine_scale_set" "web" { | ||||
|   platform_fault_domain_count  = 2 | ||||
|   proximity_placement_group_id = azurerm_proximity_placement_group.common[each.key].id | ||||
|  | ||||
|   # health_probe_id = local.network_public[each.key].sku != "Basic" ? azurerm_lb_probe.web[each.key].id : null | ||||
|   # automatic_instance_repair { | ||||
|   #   enabled      = local.network_public[each.key].sku != "Basic" | ||||
|   #   grace_period = "PT60M" | ||||
|   # } | ||||
|   #   health_probe_id = local.network_public[each.key].sku != "Basic" ? azurerm_lb_probe.web[each.key].id : null | ||||
|   #   automatic_instance_repair { | ||||
|   #     enabled      = local.network_public[each.key].sku != "Basic" | ||||
|   #     grace_period = "PT60M" | ||||
|   #   } | ||||
|  | ||||
|   network_interface { | ||||
|     name                      = "web-${lower(each.key)}" | ||||
| @@ -32,7 +32,7 @@ resource "azurerm_linux_virtual_machine_scale_set" "web" { | ||||
|       primary                                = true | ||||
|       version                                = "IPv4" | ||||
|       subnet_id                              = local.network_public[each.key].network_id | ||||
|       load_balancer_backend_address_pool_ids = [azurerm_lb_backend_address_pool.web_v4[each.key].id] | ||||
|       load_balancer_backend_address_pool_ids = lookup(try(var.instances[each.key], {}), "web_count", 0) > 0 ? [azurerm_lb_backend_address_pool.web_v4[each.key].id] : [] | ||||
|     } | ||||
|     ip_configuration { | ||||
|       name      = "web-${lower(each.key)}-v6" | ||||
| @@ -42,7 +42,7 @@ resource "azurerm_linux_virtual_machine_scale_set" "web" { | ||||
|       dynamic "public_ip_address" { | ||||
|         for_each = local.network_public[each.key].sku == "Standard" ? ["IPv6"] : [] | ||||
|         content { | ||||
|           name    = "worker-${lower(each.key)}-v6" | ||||
|           name    = "web-${lower(each.key)}-v6" | ||||
|           version = public_ip_address.value | ||||
|         } | ||||
|       } | ||||
| @@ -69,7 +69,7 @@ resource "azurerm_linux_virtual_machine_scale_set" "web" { | ||||
|     disk_size_gb         = 50 | ||||
|   } | ||||
|  | ||||
|   source_image_id = data.azurerm_shared_image_version.talos.id | ||||
|   source_image_id = data.azurerm_shared_image_version.talos[startswith(lookup(try(var.instances[each.key], {}), "worker_type", ""), "Standard_D2p") ? "Arm64" : "x64"].id | ||||
|   #   source_image_reference { | ||||
|   #     publisher = "talos" | ||||
|   #     offer     = "Talos" | ||||
|   | ||||
| @@ -18,8 +18,9 @@ resource "azurerm_linux_virtual_machine_scale_set" "worker" { | ||||
|   proximity_placement_group_id = azurerm_proximity_placement_group.common[each.key].id | ||||
|  | ||||
|   network_interface { | ||||
|     name    = "worker-${lower(each.key)}" | ||||
|     primary = true | ||||
|     name                      = "worker-${lower(each.key)}" | ||||
|     primary                   = true | ||||
|     network_security_group_id = local.network_secgroup[each.key].common | ||||
|  | ||||
|     enable_accelerated_networking = lookup(try(var.instances[each.key], {}), "worker_os_ephemeral", false) | ||||
|     ip_configuration { | ||||
| @@ -71,7 +72,7 @@ resource "azurerm_linux_virtual_machine_scale_set" "worker" { | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   source_image_id = data.azurerm_shared_image_version.talos.id | ||||
|   source_image_id = data.azurerm_shared_image_version.talos[startswith(lookup(try(var.instances[each.key], {}), "worker_type", ""), "Standard_D2p") ? "Arm64" : "x64"].id | ||||
|   #   source_image_reference { | ||||
|   #     publisher = "talos" | ||||
|   #     offer     = "Talos" | ||||
|   | ||||
| @@ -3,8 +3,8 @@ terraform { | ||||
|   required_providers { | ||||
|     azurerm = { | ||||
|       source  = "hashicorp/azurerm" | ||||
|       version = "~> 3.43.0" | ||||
|       version = "~> 3.62.1" | ||||
|     } | ||||
|   } | ||||
|   required_version = ">= 1.2" | ||||
|   required_version = ">= 1.5" | ||||
| } | ||||
|   | ||||
| @@ -11,7 +11,7 @@ resource "azurerm_public_ip" "web_v4" { | ||||
| } | ||||
|  | ||||
| resource "azurerm_lb" "web" { | ||||
|   for_each            = { for idx, name in local.regions : name => idx } | ||||
|   for_each            = { for idx, name in local.regions : name => idx if lookup(try(var.instances[name], {}), "web_count", 0) > 0 } | ||||
|   location            = each.key | ||||
|   name                = "web-${lower(each.key)}" | ||||
|   resource_group_name = local.resource_group | ||||
| @@ -26,13 +26,13 @@ resource "azurerm_lb" "web" { | ||||
| } | ||||
|  | ||||
| resource "azurerm_lb_backend_address_pool" "web_v4" { | ||||
|   for_each        = { for idx, name in local.regions : name => idx } | ||||
|   for_each        = { for idx, name in local.regions : name => idx if lookup(try(var.instances[name], {}), "web_count", 0) > 0 } | ||||
|   loadbalancer_id = azurerm_lb.web[each.key].id | ||||
|   name            = "web-pool-v4" | ||||
| } | ||||
|  | ||||
| resource "azurerm_lb_probe" "web" { | ||||
|   for_each            = { for idx, name in local.regions : name => idx } | ||||
|   for_each            = { for idx, name in local.regions : name => idx if lookup(try(var.instances[name], {}), "web_count", 0) > 0 } | ||||
|   name                = "web-http-probe" | ||||
|   loadbalancer_id     = azurerm_lb.web[each.key].id | ||||
|   interval_in_seconds = 30 | ||||
| @@ -42,7 +42,7 @@ resource "azurerm_lb_probe" "web" { | ||||
| } | ||||
|  | ||||
| resource "azurerm_lb_rule" "web_http_v4" { | ||||
|   for_each                       = { for idx, name in local.regions : name => idx } | ||||
|   for_each                       = { for idx, name in local.regions : name => idx if lookup(try(var.instances[name], {}), "web_count", 0) > 0 } | ||||
|   name                           = "web_http-v4" | ||||
|   loadbalancer_id                = azurerm_lb.web[each.key].id | ||||
|   frontend_ip_configuration_name = "web-lb-v4" | ||||
| @@ -58,7 +58,7 @@ resource "azurerm_lb_rule" "web_http_v4" { | ||||
| } | ||||
|  | ||||
| resource "azurerm_lb_rule" "web_https_v4" { | ||||
|   for_each                       = { for idx, name in local.regions : name => idx } | ||||
|   for_each                       = { for idx, name in local.regions : name => idx if lookup(try(var.instances[name], {}), "web_count", 0) > 0 } | ||||
|   name                           = "web-https-v4" | ||||
|   loadbalancer_id                = azurerm_lb.web[each.key].id | ||||
|   frontend_ip_configuration_name = "web-lb-v4" | ||||
| @@ -74,7 +74,7 @@ resource "azurerm_lb_rule" "web_https_v4" { | ||||
| } | ||||
|  | ||||
| resource "azurerm_lb_outbound_rule" "web" { | ||||
|   for_each                 = { for idx, name in local.regions : name => idx if local.network_public[name].sku != "Basic" } | ||||
|   for_each                 = { for idx, name in local.regions : name => idx if lookup(try(var.instances[name], {}), "web_count", 0) > 0 && local.network_public[name].sku != "Basic" } | ||||
|   name                     = "snat" | ||||
|   loadbalancer_id          = azurerm_lb.web[each.key].id | ||||
|   backend_address_pool_id  = azurerm_lb_backend_address_pool.web_v4[each.key].id | ||||
|   | ||||
| @@ -1,15 +1,31 @@ | ||||
|  | ||||
| output "controlplane_endpoints" { | ||||
|   description = "Kubernetes controlplane endpoints" | ||||
|   value       = try([for ip in azurerm_public_ip.controlplane_v4 : ip.ip_address if ip.ip_address != ""], []) | ||||
| } | ||||
|  | ||||
| output "controlplane_endpoint" { | ||||
|   description = "Kubernetes controlplane endpoint" | ||||
|   value       = module.controlplane | ||||
|   description = "Kubernetes controlplane endpoints" | ||||
|   value = one(flatten([for cp in azurerm_network_interface.controlplane : | ||||
|     [for ip in cp.ip_configuration : ip.private_ip_address if ip.private_ip_address_version == "IPv4"] | ||||
|   ])) | ||||
| } | ||||
|  | ||||
| output "controlplane_bootstrap" { | ||||
|   description = "Kubernetes controlplane bootstrap command" | ||||
|   value = try([ | ||||
|     for cp in azurerm_linux_virtual_machine.controlplane : "talosctl apply-config --insecure --nodes ${cp.public_ip_addresses[0]} --timeout 5m0s --config-patch @_cfgs/${cp.name}.yaml --file _cfgs/controlplane.yaml" | ||||
|   ]) | ||||
|  | ||||
|   depends_on = [azurerm_linux_virtual_machine.controlplane] | ||||
| } | ||||
|  | ||||
| output "controlplane_endpoint_public" { | ||||
|   description = "Kubernetes controlplane endpoint public" | ||||
|   value       = try(flatten([for c in module.controlplane : c.controlplane_endpoints])[0], "127.0.0.1") | ||||
|   value       = try(one([for ip in azurerm_public_ip.controlplane_v4 : ip.ip_address if ip.ip_address != ""]), "127.0.0.1") | ||||
| } | ||||
|  | ||||
| output "web_endpoint" { | ||||
|   description = "Kubernetes controlplane endpoint" | ||||
|   value       = compact([for lb in azurerm_public_ip.web_v4 : lb.ip_address]) | ||||
| } | ||||
| # output "web_endpoint" { | ||||
| #   description = "Kubernetes controlplane endpoint" | ||||
| #   value       = compact([for lb in azurerm_public_ip.web_v4 : lb.ip_address]) | ||||
| # } | ||||
|   | ||||
| @@ -17,7 +17,6 @@ resource "azurerm_subnet" "controlplane" { | ||||
|   address_prefixes = [ | ||||
|     for cidr in azurerm_virtual_network.main[each.key].address_space : cidrsubnet(cidr, length(split(".", cidr)) > 1 ? 4 : 2, 0) | ||||
|   ] | ||||
|   service_endpoints = ["Microsoft.ContainerRegistry", "Microsoft.Storage"] | ||||
| } | ||||
|  | ||||
| resource "azurerm_subnet" "shared" { | ||||
| @@ -36,7 +35,18 @@ resource "azurerm_subnet" "services" { | ||||
|   resource_group_name  = var.resource_group | ||||
|   virtual_network_name = azurerm_virtual_network.main[each.key].name | ||||
|   address_prefixes = [ | ||||
|     for cidr in azurerm_virtual_network.main[each.key].address_space : cidrsubnet(cidr, 3, 1) if length(split(".", cidr)) > 1 | ||||
|     for cidr in azurerm_virtual_network.main[each.key].address_space : cidrsubnet(cidr, 4, 2) if length(split(".", cidr)) > 1 | ||||
|   ] | ||||
|   service_endpoints = ["Microsoft.ContainerRegistry", "Microsoft.Storage", "Microsoft.KeyVault"] | ||||
| } | ||||
|  | ||||
| resource "azurerm_subnet" "databases" { | ||||
|   for_each             = { for idx, name in var.regions : name => idx } | ||||
|   name                 = "databases" | ||||
|   resource_group_name  = var.resource_group | ||||
|   virtual_network_name = azurerm_virtual_network.main[each.key].name | ||||
|   address_prefixes = [ | ||||
|     for cidr in azurerm_virtual_network.main[each.key].address_space : cidrsubnet(cidr, 4, 3) if length(split(".", cidr)) > 1 | ||||
|   ] | ||||
| } | ||||
|  | ||||
| @@ -48,7 +58,6 @@ resource "azurerm_subnet" "public" { | ||||
|   address_prefixes = [ | ||||
|     for cidr in azurerm_virtual_network.main[each.key].address_space : cidrsubnet(cidr, 2, 2) | ||||
|   ] | ||||
|   service_endpoints = ["Microsoft.ContainerRegistry", "Microsoft.Storage"] | ||||
| } | ||||
|  | ||||
| resource "azurerm_subnet" "private" { | ||||
| @@ -59,7 +68,6 @@ resource "azurerm_subnet" "private" { | ||||
|   address_prefixes = [ | ||||
|     for cidr in azurerm_virtual_network.main[each.key].address_space : cidrsubnet(cidr, 2, 3) | ||||
|   ] | ||||
|   service_endpoints = ["Microsoft.ContainerRegistry", "Microsoft.Storage"] | ||||
| } | ||||
|  | ||||
| resource "azurerm_virtual_network_peering" "peering" { | ||||
|   | ||||
| @@ -3,8 +3,8 @@ terraform { | ||||
|   required_providers { | ||||
|     azurerm = { | ||||
|       source  = "hashicorp/azurerm" | ||||
|       version = "~> 3.39.1" | ||||
|       version = "~> 3.62.1" | ||||
|     } | ||||
|   } | ||||
|   required_version = ">= 1.2" | ||||
|   required_version = ">= 1.5" | ||||
| } | ||||
|   | ||||
| @@ -1,16 +1,5 @@ | ||||
| version: v1alpha1 | ||||
| debug: false | ||||
| persist: true | ||||
| machine: | ||||
|   type: controlplane | ||||
|   certSANs: ${format("%#v",certSANs)} | ||||
|   features: | ||||
|     kubernetesTalosAPIAccess: | ||||
|       enabled: true | ||||
|       allowedRoles: | ||||
|         - os:reader | ||||
|       allowedKubernetesNamespaces: | ||||
|         - kube-system | ||||
|   kubelet: | ||||
|     extraArgs: | ||||
|       node-labels: "${labels}" | ||||
| @@ -36,36 +25,41 @@ machine: | ||||
|         addresses: | ||||
|           - 169.254.2.53/32 | ||||
|     extraHostEntries: | ||||
|       - ip: ${lbv4} | ||||
|       - ip: 127.0.0.1 | ||||
|         aliases: | ||||
|           - ${apiDomain} | ||||
|   install: | ||||
|     wipe: false | ||||
|   sysctls: | ||||
|     net.core.somaxconn: 65535 | ||||
|     net.core.netdev_max_backlog: 4096 | ||||
|   systemDiskEncryption: | ||||
|     state: | ||||
|       provider: luks2 | ||||
|       options: | ||||
|         - no_read_workqueue | ||||
|         - no_write_workqueue | ||||
|       keys: | ||||
|         - nodeID: {} | ||||
|           slot: 0 | ||||
|     ephemeral: | ||||
|       provider: luks2 | ||||
|       keys: | ||||
|         - nodeID: {} | ||||
|           slot: 0 | ||||
|       options: | ||||
|         - no_read_workqueue | ||||
|         - no_write_workqueue | ||||
|       keys: | ||||
|         - nodeID: {} | ||||
|           slot: 0 | ||||
|   features: | ||||
|     kubernetesTalosAPIAccess: | ||||
|       enabled: true | ||||
|       allowedRoles: | ||||
|         - os:reader | ||||
|       allowedKubernetesNamespaces: | ||||
|         - kube-system | ||||
| cluster: | ||||
|   id: ${clusterID} | ||||
|   secret: ${clusterSecret} | ||||
|   adminKubeconfig: | ||||
|     certLifetime: 8h0m0s | ||||
|   controlPlane: | ||||
|     endpoint: https://${apiDomain}:6443 | ||||
|   clusterName: ${clusterName} | ||||
|   discovery: | ||||
|     enabled: true | ||||
|   network: | ||||
|     dnsDomain: ${domain} | ||||
|     podSubnets: ${format("%#v",split(",",podSubnets))} | ||||
| @@ -73,37 +67,15 @@ cluster: | ||||
|     cni: | ||||
|       name: custom | ||||
|       urls: | ||||
|         - https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/azure/deployments/cilium-result.yaml | ||||
|         - https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/cilium-result.yaml | ||||
|   proxy: | ||||
|     disabled: true | ||||
|   apiServer: | ||||
|     certSANs: ${format("%#v",certSANs)} | ||||
|     admissionControl: | ||||
|       - name: PodSecurity | ||||
|         configuration: | ||||
|           apiVersion: pod-security.admission.config.k8s.io/v1alpha1 | ||||
|           defaults: | ||||
|             audit: restricted | ||||
|             audit-version: latest | ||||
|             enforce: baseline | ||||
|             enforce-version: latest | ||||
|             warn: restricted | ||||
|             warn-version: latest | ||||
|           exemptions: | ||||
|             namespaces: | ||||
|               - kube-system | ||||
|               - ingress-nginx | ||||
|               - monitoring | ||||
|               - local-path-storage | ||||
|               - local-lvm | ||||
|             runtimeClasses: [] | ||||
|             usernames: [] | ||||
|           kind: PodSecurityConfiguration | ||||
|   controllerManager: | ||||
|     extraArgs: | ||||
|         node-cidr-mask-size-ipv4: 24 | ||||
|         node-cidr-mask-size-ipv6: 112 | ||||
|   scheduler: {} | ||||
|   etcd: | ||||
|     advertisedSubnets: | ||||
|       - ${nodeSubnets[0]} | ||||
| @@ -126,14 +98,14 @@ cluster: | ||||
|   externalCloudProvider: | ||||
|     enabled: true | ||||
|     manifests: | ||||
|       - https://raw.githubusercontent.com/siderolabs/talos-cloud-controller-manager/main/docs/deploy/cloud-controller-manager.yml | ||||
|       - https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/talos-cloud-controller-manager-result.yaml | ||||
|       - https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/azure/deployments/azure-cloud-controller-manager.yaml | ||||
|       - https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/azure/deployments/azure-csi-node.yaml | ||||
|       - https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/azure/deployments/azure-csi.yaml | ||||
|       - https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/azure/deployments/azure-storage.yaml | ||||
|       - https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/azure/deployments/kubelet-serving-cert-approver.yaml | ||||
|       - https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/azure/deployments/metrics-server.yaml | ||||
|       - https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/azure/deployments/local-path-storage.yaml | ||||
|       - https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/azure/deployments/coredns-local.yaml | ||||
|       - https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/azure/deployments/ingress-ns.yaml | ||||
|       - https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/azure/deployments/ingress-result.yaml | ||||
|       - https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/metrics-server-result.yaml | ||||
|       - https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/local-path-storage-ns.yaml | ||||
|       - https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/local-path-storage-result.yaml | ||||
|       - https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/coredns-local.yaml | ||||
|       - https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/ingress-ns.yaml | ||||
|       - https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/ingress-result.yaml | ||||
|   | ||||
| @@ -11,11 +11,11 @@ machine: | ||||
|       cloud-provider: external | ||||
|       rotate-server-certificates: true | ||||
|       node-labels: "${labels}" | ||||
|     nodeIP: | ||||
|       validSubnets: ${format("%#v",nodeSubnets)} | ||||
|     clusterDNS: | ||||
|       - 169.254.2.53 | ||||
|       - ${cidrhost(split(",",serviceSubnets)[0], 10)} | ||||
|     nodeIP: | ||||
|       validSubnets: ${format("%#v",nodeSubnets)} | ||||
|   network: | ||||
|     interfaces: | ||||
|       - interface: eth0 | ||||
| @@ -32,11 +32,28 @@ machine: | ||||
|       - ip: ${lbv4} | ||||
|         aliases: | ||||
|           - ${apiDomain} | ||||
|   install: | ||||
|     wipe: false | ||||
|   sysctls: | ||||
|     net.core.somaxconn: 65535 | ||||
|     net.core.netdev_max_backlog: 4096 | ||||
|   install: | ||||
|     wipe: false | ||||
|   systemDiskEncryption: | ||||
|     state: | ||||
|       provider: luks2 | ||||
|       options: | ||||
|         - no_read_workqueue | ||||
|         - no_write_workqueue | ||||
|       keys: | ||||
|         - nodeID: {} | ||||
|           slot: 0 | ||||
|     ephemeral: | ||||
|       provider: luks2 | ||||
|       options: | ||||
|         - no_read_workqueue | ||||
|         - no_write_workqueue | ||||
|       keys: | ||||
|         - nodeID: {} | ||||
|           slot: 0 | ||||
| cluster: | ||||
|   id: ${clusterID} | ||||
|   secret: ${clusterSecret} | ||||
|   | ||||
| @@ -1,5 +1,5 @@ | ||||
|  | ||||
| variable "ccm_role_definition" { | ||||
| variable "controlplane_role_definition" { | ||||
|   default = "kubernetes-ccm" | ||||
| } | ||||
|  | ||||
| @@ -26,6 +26,12 @@ locals { | ||||
|   network_secgroup     = data.terraform_remote_state.prepare.outputs.secgroups | ||||
| } | ||||
|  | ||||
| variable "arch" { | ||||
|   description = "The Talos architecture list" | ||||
|   type        = list(string) | ||||
|   default     = ["x64", "Arm64"] | ||||
| } | ||||
|  | ||||
| variable "tags" { | ||||
|   description = "Tags of resources" | ||||
|   type        = map(string) | ||||
| @@ -39,12 +45,12 @@ variable "controlplane" { | ||||
|   type        = map(any) | ||||
|   default = { | ||||
|     "uksouth" = { | ||||
|       count         = 0, | ||||
|       instance_type = "Standard_B2s", | ||||
|       count = 0, | ||||
|       type  = "Standard_B2ms", | ||||
|     }, | ||||
|     "ukwest" = { | ||||
|       count         = 0, | ||||
|       instance_type = "Standard_B2s", | ||||
|       count = 0, | ||||
|       type  = "Standard_B2ms", | ||||
|     }, | ||||
|   } | ||||
| } | ||||
|   | ||||
| @@ -3,8 +3,8 @@ terraform { | ||||
|   required_providers { | ||||
|     azurerm = { | ||||
|       source  = "hashicorp/azurerm" | ||||
|       version = "~> 3.43.0" | ||||
|       version = "~> 3.62.1" | ||||
|     } | ||||
|   } | ||||
|   required_version = ">= 1.2" | ||||
|   required_version = ">= 1.5" | ||||
| } | ||||
|   | ||||
| @@ -6,7 +6,6 @@ machine: | ||||
|   token: ${tokenMachine} | ||||
|   ca: | ||||
|     crt: ${caMachine} | ||||
|   certSANs: [] | ||||
|   nodeLabels: | ||||
|     node.kubernetes.io/disktype: ssd | ||||
|   kubelet: | ||||
| @@ -59,6 +58,8 @@ cluster: | ||||
|   network: | ||||
|     dnsDomain: ${domain} | ||||
|     serviceSubnets: ${format("%#v",split(",",serviceSubnets))} | ||||
|   proxy: | ||||
|     disabled: true | ||||
|   token: ${token} | ||||
|   ca: | ||||
|     crt: ${ca} | ||||
|   | ||||
| @@ -2,7 +2,7 @@ | ||||
| packer { | ||||
|   required_plugins { | ||||
|     proxmox = { | ||||
|       version = ">= 1.1.2" | ||||
|       version = ">= 1.1.3" | ||||
|       source  = "github.com/hashicorp/proxmox" | ||||
|     } | ||||
|   } | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Serge Logvinov
					Serge Logvinov