mirror of
				https://github.com/optim-enterprises-bv/terraform-talos.git
				synced 2025-11-03 19:57:46 +00:00 
			
		
		
		
	fix load-balancer
This commit is contained in:
		@@ -9,7 +9,7 @@ helm-repos: ## add helm repos
 | 
			
		||||
	helm repo update
 | 
			
		||||
 | 
			
		||||
create-deployments: ## create templates
 | 
			
		||||
	helm template --namespace=kube-system  --version=1.12.7 -f vars/cilium.yaml cilium \
 | 
			
		||||
	helm template --namespace=kube-system  --version=1.15.7 -f vars/cilium.yaml cilium \
 | 
			
		||||
		cilium/cilium > vars/cilium-result.yaml
 | 
			
		||||
 | 
			
		||||
	# helm template --namespace=kube-system -f vars/talos-cloud-controller-manager.yaml talos-cloud-controller-manager \
 | 
			
		||||
@@ -18,8 +18,8 @@ create-deployments: ## create templates
 | 
			
		||||
	helm template --namespace=kube-system -f vars/metrics-server.yaml metrics-server \
 | 
			
		||||
		metrics-server/metrics-server > vars/metrics-server-result.yaml
 | 
			
		||||
 | 
			
		||||
	# helm template --namespace=local-path-storage -f vars/local-path-storage.yaml local-path-provisioner \
 | 
			
		||||
	# 	~/work/sergelogvinov/local-path-provisioner/deploy/chart/local-path-provisioner > vars/local-path-storage-result.yaml
 | 
			
		||||
	helm template --namespace=local-path-storage -f vars/local-path-storage.yaml local-path-provisioner \
 | 
			
		||||
		~/work/sergelogvinov/local-path-provisioner/deploy/chart/local-path-provisioner > vars/local-path-storage-result.yaml
 | 
			
		||||
 | 
			
		||||
	helm template --namespace=ingress-nginx --version=4.7.0 -f vars/ingress.yaml ingress-nginx \
 | 
			
		||||
	helm template --namespace=ingress-nginx --version=4.11.1 -f vars/ingress.yaml ingress-nginx \
 | 
			
		||||
		ingress-nginx/ingress-nginx > vars/ingress-result.yaml
 | 
			
		||||
 
 | 
			
		||||
@@ -32,18 +32,20 @@ data:
 | 
			
		||||
  #   the kvstore by commenting out the identity-allocation-mode below, or
 | 
			
		||||
  #   setting it to "kvstore".
 | 
			
		||||
  identity-allocation-mode: crd
 | 
			
		||||
  identity-heartbeat-timeout: "30m0s"
 | 
			
		||||
  identity-gc-interval: "15m0s"
 | 
			
		||||
  cilium-endpoint-gc-interval: "5m0s"
 | 
			
		||||
  nodes-gc-interval: "5m0s"
 | 
			
		||||
  skip-cnp-status-startup-clean: "false"
 | 
			
		||||
  # Disable the usage of CiliumEndpoint CRD
 | 
			
		||||
  disable-endpoint-crd: "false"
 | 
			
		||||
 | 
			
		||||
  # If you want to run cilium in debug mode change this value to true
 | 
			
		||||
  debug: "false"
 | 
			
		||||
  debug-verbose: ""
 | 
			
		||||
  # The agent can be put into the following three policy enforcement modes
 | 
			
		||||
  # default, always and never.
 | 
			
		||||
  # https://docs.cilium.io/en/latest/policy/intro/#policy-enforcement-modes
 | 
			
		||||
  # https://docs.cilium.io/en/latest/security/policy/intro/#policy-enforcement-modes
 | 
			
		||||
  enable-policy: "default"
 | 
			
		||||
  policy-cidr-match-mode: ""
 | 
			
		||||
  # If you want metrics enabled in all of your Cilium agents, set the port for
 | 
			
		||||
  # which the Cilium agents will have their metrics exposed.
 | 
			
		||||
  # This option deprecates the "prometheus-serve-addr" in the
 | 
			
		||||
@@ -51,6 +53,12 @@ data:
 | 
			
		||||
  # NOTE that this will open the port on ALL nodes where Cilium pods are
 | 
			
		||||
  # scheduled.
 | 
			
		||||
  prometheus-serve-addr: ":9962"
 | 
			
		||||
  # A space-separated list of controller groups for which to enable metrics.
 | 
			
		||||
  # The special values of "all" and "none" are supported.
 | 
			
		||||
  controller-group-metrics:
 | 
			
		||||
    write-cni-file
 | 
			
		||||
    sync-host-ips
 | 
			
		||||
    sync-lb-maps-with-k8s-services
 | 
			
		||||
  # Port to expose Envoy metrics (e.g. "9964"). Envoy metrics listener will be disabled if this
 | 
			
		||||
  # field is not set.
 | 
			
		||||
  proxy-prometheus-port: "9964"
 | 
			
		||||
@@ -65,7 +73,7 @@ data:
 | 
			
		||||
  # Users who wish to specify their own custom CNI configuration file must set
 | 
			
		||||
  # custom-cni-conf to "true", otherwise Cilium may overwrite the configuration.
 | 
			
		||||
  custom-cni-conf: "false"
 | 
			
		||||
  enable-bpf-clock-probe: "true"
 | 
			
		||||
  enable-bpf-clock-probe: "false"
 | 
			
		||||
  # If you want cilium monitor to aggregate tracing for packets, set this level
 | 
			
		||||
  # to "low", "medium", or "maximum". The higher the level, the less packets
 | 
			
		||||
  # that will be seen in monitor output.
 | 
			
		||||
@@ -75,14 +83,14 @@ data:
 | 
			
		||||
  # notification events for each allowed connection.
 | 
			
		||||
  #
 | 
			
		||||
  # Only effective when monitor aggregation is set to "medium" or higher.
 | 
			
		||||
  monitor-aggregation-interval: 5s
 | 
			
		||||
  monitor-aggregation-interval: "5s"
 | 
			
		||||
 | 
			
		||||
  # The monitor aggregation flags determine which TCP flags which, upon the
 | 
			
		||||
  # first observation, cause monitor notifications to be generated.
 | 
			
		||||
  #
 | 
			
		||||
  # Only effective when monitor aggregation is set to "medium" or higher.
 | 
			
		||||
  monitor-aggregation-flags: all
 | 
			
		||||
  # Specifies the ratio (0.0-1.0) of total system memory to use for dynamic
 | 
			
		||||
  # Specifies the ratio (0.0-1.0] of total system memory to use for dynamic
 | 
			
		||||
  # sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps.
 | 
			
		||||
  bpf-map-dynamic-size-ratio: "0.0025"
 | 
			
		||||
  # bpf-policy-map-max specifies the maximum number of entries in endpoint
 | 
			
		||||
@@ -91,8 +99,6 @@ data:
 | 
			
		||||
  # bpf-lb-map-max specifies the maximum number of entries in bpf lb service,
 | 
			
		||||
  # backend and affinity maps.
 | 
			
		||||
  bpf-lb-map-max: "65536"
 | 
			
		||||
  # bpf-lb-bypass-fib-lookup instructs Cilium to enable the FIB lookup bypass
 | 
			
		||||
  # optimization for nodeport reverse NAT handling.
 | 
			
		||||
  bpf-lb-external-clusterip: "false"
 | 
			
		||||
 | 
			
		||||
  # Pre-allocation of map entries allows per-packet latency to be reduced, at
 | 
			
		||||
@@ -127,16 +133,23 @@ data:
 | 
			
		||||
  #   - disabled
 | 
			
		||||
  #   - vxlan (default)
 | 
			
		||||
  #   - geneve
 | 
			
		||||
  tunnel: "vxlan"
 | 
			
		||||
  # Default case
 | 
			
		||||
  routing-mode: "tunnel"
 | 
			
		||||
  tunnel-protocol: "vxlan"
 | 
			
		||||
  service-no-backend-response: "reject"
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  # Enables L7 proxy for L7 policy enforcement and visibility
 | 
			
		||||
  enable-l7-proxy: "true"
 | 
			
		||||
 | 
			
		||||
  enable-ipv4-masquerade: "true"
 | 
			
		||||
  enable-ipv4-big-tcp: "false"
 | 
			
		||||
  enable-ipv6-big-tcp: "false"
 | 
			
		||||
  enable-ipv6-masquerade: "true"
 | 
			
		||||
  enable-bpf-masquerade: "false"
 | 
			
		||||
  enable-masquerade-to-route-source: "false"
 | 
			
		||||
 | 
			
		||||
  enable-xt-socket-fallback: "true"
 | 
			
		||||
  install-iptables-rules: "true"
 | 
			
		||||
  install-no-conntrack-iptables-rules: "false"
 | 
			
		||||
 | 
			
		||||
  auto-direct-node-routes: "false"
 | 
			
		||||
@@ -149,15 +162,21 @@ data:
 | 
			
		||||
  kube-proxy-replacement: "strict"
 | 
			
		||||
  kube-proxy-replacement-healthz-bind-address: ""
 | 
			
		||||
  bpf-lb-sock: "false"
 | 
			
		||||
  host-reachable-services-protos: 
 | 
			
		||||
  enable-health-check-nodeport: "true"
 | 
			
		||||
  enable-health-check-loadbalancer-ip: "false"
 | 
			
		||||
  node-port-bind-protection: "true"
 | 
			
		||||
  enable-auto-protect-node-port-range: "true"
 | 
			
		||||
  bpf-lb-acceleration: "disabled"
 | 
			
		||||
  enable-svc-source-range-check: "true"
 | 
			
		||||
  enable-l2-neigh-discovery: "true"
 | 
			
		||||
  arping-refresh-period: "30s"
 | 
			
		||||
  k8s-require-ipv4-pod-cidr: "true"
 | 
			
		||||
  k8s-require-ipv6-pod-cidr: "true"
 | 
			
		||||
  enable-k8s-networkpolicy: "true"
 | 
			
		||||
  # Tell the agent to generate and write a CNI configuration file
 | 
			
		||||
  write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist
 | 
			
		||||
  cni-exclusive: "true"
 | 
			
		||||
  cni-log-file: "/var/run/cilium/cilium-cni.log"
 | 
			
		||||
  enable-endpoint-health-checking: "true"
 | 
			
		||||
  enable-health-checking: "true"
 | 
			
		||||
  enable-well-known-identities: "false"
 | 
			
		||||
@@ -165,7 +184,8 @@ data:
 | 
			
		||||
  synchronize-k8s-nodes: "true"
 | 
			
		||||
  operator-api-serve-addr: "127.0.0.1:9234"
 | 
			
		||||
  ipam: "kubernetes"
 | 
			
		||||
  disable-cnp-status-updates: "true"
 | 
			
		||||
  ipam-cilium-node-update-rate: "15s"
 | 
			
		||||
  egress-gateway-reconciliation-trigger-interval: "1s"
 | 
			
		||||
  enable-vtep: "false"
 | 
			
		||||
  vtep-endpoint: ""
 | 
			
		||||
  vtep-cidr: ""
 | 
			
		||||
@@ -176,23 +196,49 @@ data:
 | 
			
		||||
  bpf-root: "/sys/fs/bpf"
 | 
			
		||||
  cgroup-root: "/sys/fs/cgroup"
 | 
			
		||||
  enable-k8s-terminating-endpoint: "true"
 | 
			
		||||
  enable-sctp: "false"
 | 
			
		||||
 | 
			
		||||
  k8s-client-qps: "10"
 | 
			
		||||
  k8s-client-burst: "20"
 | 
			
		||||
  remove-cilium-node-taints: "true"
 | 
			
		||||
  set-cilium-node-taints: "true"
 | 
			
		||||
  set-cilium-is-up-condition: "true"
 | 
			
		||||
  unmanaged-pod-watcher-interval: "15"
 | 
			
		||||
  # default DNS proxy to transparent mode in non-chaining modes
 | 
			
		||||
  dnsproxy-enable-transparent-mode: "true"
 | 
			
		||||
  tofqdns-dns-reject-response-code: "refused"
 | 
			
		||||
  tofqdns-enable-dns-compression: "true"
 | 
			
		||||
  tofqdns-endpoint-max-ip-per-hostname: "50"
 | 
			
		||||
  tofqdns-idle-connection-grace-period: "0s"
 | 
			
		||||
  tofqdns-max-deferred-connection-deletes: "10000"
 | 
			
		||||
  tofqdns-min-ttl: "3600"
 | 
			
		||||
  tofqdns-proxy-response-max-delay: "100ms"
 | 
			
		||||
  agent-not-ready-taint-key: "node.cilium.io/agent-not-ready"
 | 
			
		||||
 | 
			
		||||
  mesh-auth-enabled: "true"
 | 
			
		||||
  mesh-auth-queue-size: "1024"
 | 
			
		||||
  mesh-auth-rotated-identities-queue-size: "1024"
 | 
			
		||||
  mesh-auth-gc-interval: "5m0s"
 | 
			
		||||
 | 
			
		||||
  proxy-xff-num-trusted-hops-ingress: "0"
 | 
			
		||||
  proxy-xff-num-trusted-hops-egress: "0"
 | 
			
		||||
  proxy-connect-timeout: "2"
 | 
			
		||||
  proxy-max-requests-per-connection: "0"
 | 
			
		||||
  proxy-max-connection-duration-seconds: "0"
 | 
			
		||||
  proxy-idle-timeout-seconds: "60"
 | 
			
		||||
 | 
			
		||||
  external-envoy-proxy: "false"
 | 
			
		||||
  max-connected-clusters: "255"
 | 
			
		||||
 | 
			
		||||
# Extra config allows adding arbitrary properties to the cilium config.
 | 
			
		||||
# By putting it at the end of the ConfigMap, it's also possible to override existing properties.
 | 
			
		||||
---
 | 
			
		||||
# Source: cilium/templates/cilium-agent/clusterrole.yaml
 | 
			
		||||
apiVersion: rbac.authorization.k8s.io/v1
 | 
			
		||||
kind: ClusterRole
 | 
			
		||||
metadata:
 | 
			
		||||
  name: cilium
 | 
			
		||||
  labels:
 | 
			
		||||
    app.kubernetes.io/part-of: cilium
 | 
			
		||||
rules:
 | 
			
		||||
- apiGroups:
 | 
			
		||||
  - networking.k8s.io
 | 
			
		||||
@@ -236,12 +282,14 @@ rules:
 | 
			
		||||
- apiGroups:
 | 
			
		||||
  - cilium.io
 | 
			
		||||
  resources:
 | 
			
		||||
  - ciliumbgploadbalancerippools
 | 
			
		||||
  - ciliumloadbalancerippools
 | 
			
		||||
  - ciliumbgppeeringpolicies
 | 
			
		||||
  - ciliumbgpnodeconfigs
 | 
			
		||||
  - ciliumbgpadvertisements
 | 
			
		||||
  - ciliumbgppeerconfigs
 | 
			
		||||
  - ciliumclusterwideenvoyconfigs
 | 
			
		||||
  - ciliumclusterwidenetworkpolicies
 | 
			
		||||
  - ciliumegressgatewaypolicies
 | 
			
		||||
  - ciliumegressnatpolicies
 | 
			
		||||
  - ciliumendpoints
 | 
			
		||||
  - ciliumendpointslices
 | 
			
		||||
  - ciliumenvoyconfigs
 | 
			
		||||
@@ -249,6 +297,10 @@ rules:
 | 
			
		||||
  - ciliumlocalredirectpolicies
 | 
			
		||||
  - ciliumnetworkpolicies
 | 
			
		||||
  - ciliumnodes
 | 
			
		||||
  - ciliumnodeconfigs
 | 
			
		||||
  - ciliumcidrgroups
 | 
			
		||||
  - ciliuml2announcementpolicies
 | 
			
		||||
  - ciliumpodippools
 | 
			
		||||
  verbs:
 | 
			
		||||
  - list
 | 
			
		||||
  - watch
 | 
			
		||||
@@ -289,6 +341,8 @@ rules:
 | 
			
		||||
  - ciliumclusterwidenetworkpolicies/status
 | 
			
		||||
  - ciliumendpoints/status
 | 
			
		||||
  - ciliumendpoints
 | 
			
		||||
  - ciliuml2announcementpolicies/status
 | 
			
		||||
  - ciliumbgpnodeconfigs/status
 | 
			
		||||
  verbs:
 | 
			
		||||
  - patch
 | 
			
		||||
---
 | 
			
		||||
@@ -297,6 +351,8 @@ apiVersion: rbac.authorization.k8s.io/v1
 | 
			
		||||
kind: ClusterRole
 | 
			
		||||
metadata:
 | 
			
		||||
  name: cilium-operator
 | 
			
		||||
  labels:
 | 
			
		||||
    app.kubernetes.io/part-of: cilium
 | 
			
		||||
rules:
 | 
			
		||||
- apiGroups:
 | 
			
		||||
  - ""
 | 
			
		||||
@@ -340,6 +396,7 @@ rules:
 | 
			
		||||
  - services/status
 | 
			
		||||
  verbs:
 | 
			
		||||
  - update
 | 
			
		||||
  - patch
 | 
			
		||||
- apiGroups:
 | 
			
		||||
  - ""
 | 
			
		||||
  resources:
 | 
			
		||||
@@ -423,6 +480,9 @@ rules:
 | 
			
		||||
  resources:
 | 
			
		||||
  - ciliumendpointslices
 | 
			
		||||
  - ciliumenvoyconfigs
 | 
			
		||||
  - ciliumbgppeerconfigs
 | 
			
		||||
  - ciliumbgpadvertisements
 | 
			
		||||
  - ciliumbgpnodeconfigs
 | 
			
		||||
  verbs:
 | 
			
		||||
  - create
 | 
			
		||||
  - update
 | 
			
		||||
@@ -430,6 +490,7 @@ rules:
 | 
			
		||||
  - list
 | 
			
		||||
  - watch
 | 
			
		||||
  - delete
 | 
			
		||||
  - patch
 | 
			
		||||
- apiGroups:
 | 
			
		||||
  - apiextensions.k8s.io
 | 
			
		||||
  resources:
 | 
			
		||||
@@ -446,12 +507,16 @@ rules:
 | 
			
		||||
  verbs:
 | 
			
		||||
  - update
 | 
			
		||||
  resourceNames:
 | 
			
		||||
  - ciliumbgploadbalancerippools.cilium.io
 | 
			
		||||
  - ciliumloadbalancerippools.cilium.io
 | 
			
		||||
  - ciliumbgppeeringpolicies.cilium.io
 | 
			
		||||
  - ciliumbgpclusterconfigs.cilium.io
 | 
			
		||||
  - ciliumbgppeerconfigs.cilium.io
 | 
			
		||||
  - ciliumbgpadvertisements.cilium.io
 | 
			
		||||
  - ciliumbgpnodeconfigs.cilium.io
 | 
			
		||||
  - ciliumbgpnodeconfigoverrides.cilium.io
 | 
			
		||||
  - ciliumclusterwideenvoyconfigs.cilium.io
 | 
			
		||||
  - ciliumclusterwidenetworkpolicies.cilium.io
 | 
			
		||||
  - ciliumegressgatewaypolicies.cilium.io
 | 
			
		||||
  - ciliumegressnatpolicies.cilium.io
 | 
			
		||||
  - ciliumendpoints.cilium.io
 | 
			
		||||
  - ciliumendpointslices.cilium.io
 | 
			
		||||
  - ciliumenvoyconfigs.cilium.io
 | 
			
		||||
@@ -460,6 +525,33 @@ rules:
 | 
			
		||||
  - ciliumlocalredirectpolicies.cilium.io
 | 
			
		||||
  - ciliumnetworkpolicies.cilium.io
 | 
			
		||||
  - ciliumnodes.cilium.io
 | 
			
		||||
  - ciliumnodeconfigs.cilium.io
 | 
			
		||||
  - ciliumcidrgroups.cilium.io
 | 
			
		||||
  - ciliuml2announcementpolicies.cilium.io
 | 
			
		||||
  - ciliumpodippools.cilium.io
 | 
			
		||||
- apiGroups:
 | 
			
		||||
  - cilium.io
 | 
			
		||||
  resources:
 | 
			
		||||
  - ciliumloadbalancerippools
 | 
			
		||||
  - ciliumpodippools
 | 
			
		||||
  - ciliumbgpclusterconfigs
 | 
			
		||||
  - ciliumbgpnodeconfigoverrides
 | 
			
		||||
  verbs:
 | 
			
		||||
  - get
 | 
			
		||||
  - list
 | 
			
		||||
  - watch
 | 
			
		||||
- apiGroups:
 | 
			
		||||
    - cilium.io
 | 
			
		||||
  resources:
 | 
			
		||||
    - ciliumpodippools
 | 
			
		||||
  verbs:
 | 
			
		||||
    - create
 | 
			
		||||
- apiGroups:
 | 
			
		||||
  - cilium.io
 | 
			
		||||
  resources:
 | 
			
		||||
  - ciliumloadbalancerippools/status
 | 
			
		||||
  verbs:
 | 
			
		||||
  - patch
 | 
			
		||||
# For cilium-operator running in HA mode.
 | 
			
		||||
#
 | 
			
		||||
# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election
 | 
			
		||||
@@ -480,6 +572,8 @@ apiVersion: rbac.authorization.k8s.io/v1
 | 
			
		||||
kind: ClusterRoleBinding
 | 
			
		||||
metadata:
 | 
			
		||||
  name: cilium
 | 
			
		||||
  labels:
 | 
			
		||||
    app.kubernetes.io/part-of: cilium
 | 
			
		||||
roleRef:
 | 
			
		||||
  apiGroup: rbac.authorization.k8s.io
 | 
			
		||||
  kind: ClusterRole
 | 
			
		||||
@@ -494,6 +588,8 @@ apiVersion: rbac.authorization.k8s.io/v1
 | 
			
		||||
kind: ClusterRoleBinding
 | 
			
		||||
metadata:
 | 
			
		||||
  name: cilium-operator
 | 
			
		||||
  labels:
 | 
			
		||||
    app.kubernetes.io/part-of: cilium
 | 
			
		||||
roleRef:
 | 
			
		||||
  apiGroup: rbac.authorization.k8s.io
 | 
			
		||||
  kind: ClusterRole
 | 
			
		||||
@@ -503,6 +599,41 @@ subjects:
 | 
			
		||||
  name: "cilium-operator"
 | 
			
		||||
  namespace: kube-system
 | 
			
		||||
---
 | 
			
		||||
# Source: cilium/templates/cilium-agent/role.yaml
 | 
			
		||||
apiVersion: rbac.authorization.k8s.io/v1
 | 
			
		||||
kind: Role
 | 
			
		||||
metadata:
 | 
			
		||||
  name: cilium-config-agent
 | 
			
		||||
  namespace: kube-system
 | 
			
		||||
  labels:
 | 
			
		||||
    app.kubernetes.io/part-of: cilium
 | 
			
		||||
rules:
 | 
			
		||||
- apiGroups:
 | 
			
		||||
  - ""
 | 
			
		||||
  resources:
 | 
			
		||||
  - configmaps
 | 
			
		||||
  verbs:
 | 
			
		||||
  - get
 | 
			
		||||
  - list
 | 
			
		||||
  - watch
 | 
			
		||||
---
 | 
			
		||||
# Source: cilium/templates/cilium-agent/rolebinding.yaml
 | 
			
		||||
apiVersion: rbac.authorization.k8s.io/v1
 | 
			
		||||
kind: RoleBinding
 | 
			
		||||
metadata:
 | 
			
		||||
  name: cilium-config-agent
 | 
			
		||||
  namespace: kube-system
 | 
			
		||||
  labels:
 | 
			
		||||
    app.kubernetes.io/part-of: cilium
 | 
			
		||||
roleRef:
 | 
			
		||||
  apiGroup: rbac.authorization.k8s.io
 | 
			
		||||
  kind: Role
 | 
			
		||||
  name: cilium-config-agent
 | 
			
		||||
subjects:
 | 
			
		||||
  - kind: ServiceAccount
 | 
			
		||||
    name: "cilium"
 | 
			
		||||
    namespace: kube-system
 | 
			
		||||
---
 | 
			
		||||
# Source: cilium/templates/cilium-agent/service.yaml
 | 
			
		||||
apiVersion: v1
 | 
			
		||||
kind: Service
 | 
			
		||||
@@ -514,6 +645,8 @@ metadata:
 | 
			
		||||
    prometheus.io/port: "9964"
 | 
			
		||||
  labels:
 | 
			
		||||
    k8s-app: cilium
 | 
			
		||||
    app.kubernetes.io/name: cilium-agent
 | 
			
		||||
    app.kubernetes.io/part-of: cilium
 | 
			
		||||
spec:
 | 
			
		||||
  clusterIP: None
 | 
			
		||||
  type: ClusterIP
 | 
			
		||||
@@ -533,6 +666,8 @@ metadata:
 | 
			
		||||
  namespace: kube-system
 | 
			
		||||
  labels:
 | 
			
		||||
    k8s-app: cilium
 | 
			
		||||
    app.kubernetes.io/part-of: cilium
 | 
			
		||||
    app.kubernetes.io/name: cilium-agent
 | 
			
		||||
spec:
 | 
			
		||||
  selector:
 | 
			
		||||
    matchLabels:
 | 
			
		||||
@@ -548,10 +683,15 @@ spec:
 | 
			
		||||
        prometheus.io/scrape: "true"
 | 
			
		||||
      labels:
 | 
			
		||||
        k8s-app: cilium
 | 
			
		||||
        app.kubernetes.io/name: cilium-agent
 | 
			
		||||
        app.kubernetes.io/part-of: cilium
 | 
			
		||||
    spec:
 | 
			
		||||
      securityContext:
 | 
			
		||||
        appArmorProfile:
 | 
			
		||||
          type: Unconfined
 | 
			
		||||
      containers:
 | 
			
		||||
      - name: cilium-agent
 | 
			
		||||
        image: "quay.io/cilium/cilium:v1.12.7@sha256:8cb6b4742cc27b39e4f789d282a1fc2041decb6f5698bfe09112085a07b1fd61"
 | 
			
		||||
        image: "quay.io/cilium/cilium:v1.15.7@sha256:2e432bf6879feb8b891c497d6fd784b13e53456017d2b8e4ea734145f0282ef0"
 | 
			
		||||
        imagePullPolicy: IfNotPresent
 | 
			
		||||
        command:
 | 
			
		||||
        - cilium-agent
 | 
			
		||||
@@ -569,6 +709,7 @@ spec:
 | 
			
		||||
          failureThreshold: 105
 | 
			
		||||
          periodSeconds: 2
 | 
			
		||||
          successThreshold: 1
 | 
			
		||||
          initialDelaySeconds: 5
 | 
			
		||||
        livenessProbe:
 | 
			
		||||
          httpGet:
 | 
			
		||||
            host: "127.0.0.1"
 | 
			
		||||
@@ -608,18 +749,11 @@ spec:
 | 
			
		||||
              fieldPath: metadata.namespace
 | 
			
		||||
        - name: CILIUM_CLUSTERMESH_CONFIG
 | 
			
		||||
          value: /var/lib/cilium/clustermesh/
 | 
			
		||||
        - name: CILIUM_CNI_CHAINING_MODE
 | 
			
		||||
        - name: GOMEMLIMIT
 | 
			
		||||
          valueFrom:
 | 
			
		||||
            configMapKeyRef:
 | 
			
		||||
              name: cilium-config
 | 
			
		||||
              key: cni-chaining-mode
 | 
			
		||||
              optional: true
 | 
			
		||||
        - name: CILIUM_CUSTOM_CNI_CONF
 | 
			
		||||
          valueFrom:
 | 
			
		||||
            configMapKeyRef:
 | 
			
		||||
              name: cilium-config
 | 
			
		||||
              key: custom-cni-conf
 | 
			
		||||
              optional: true
 | 
			
		||||
            resourceFieldRef:
 | 
			
		||||
              resource: limits.memory
 | 
			
		||||
              divisor: '1'
 | 
			
		||||
        - name: KUBERNETES_SERVICE_HOST
 | 
			
		||||
          value: "api.cluster.local"
 | 
			
		||||
        - name: KUBERNETES_SERVICE_PORT
 | 
			
		||||
@@ -628,10 +762,29 @@ spec:
 | 
			
		||||
          postStart:
 | 
			
		||||
            exec:
 | 
			
		||||
              command:
 | 
			
		||||
              - "/cni-install.sh"
 | 
			
		||||
              - "--enable-debug=false"
 | 
			
		||||
              - "--cni-exclusive=true"
 | 
			
		||||
              - "--log-file=/var/run/cilium/cilium-cni.log"
 | 
			
		||||
              - "bash"
 | 
			
		||||
              - "-c"
 | 
			
		||||
              - |
 | 
			
		||||
                    set -o errexit
 | 
			
		||||
                    set -o pipefail
 | 
			
		||||
                    set -o nounset
 | 
			
		||||
                    
 | 
			
		||||
                    # When running in AWS ENI mode, it's likely that 'aws-node' has
 | 
			
		||||
                    # had a chance to install SNAT iptables rules. These can result
 | 
			
		||||
                    # in dropped traffic, so we should attempt to remove them.
 | 
			
		||||
                    # We do it using a 'postStart' hook since this may need to run
 | 
			
		||||
                    # for nodes which might have already been init'ed but may still
 | 
			
		||||
                    # have dangling rules. This is safe because there are no
 | 
			
		||||
                    # dependencies on anything that is part of the startup script
 | 
			
		||||
                    # itself, and can be safely run multiple times per node (e.g. in
 | 
			
		||||
                    # case of a restart).
 | 
			
		||||
                    if [[ "$(iptables-save | grep -E -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]];
 | 
			
		||||
                    then
 | 
			
		||||
                        echo 'Deleting iptables rules created by the AWS CNI VPC plugin'
 | 
			
		||||
                        iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore
 | 
			
		||||
                    fi
 | 
			
		||||
                    echo 'Done!'
 | 
			
		||||
                    
 | 
			
		||||
          preStop:
 | 
			
		||||
            exec:
 | 
			
		||||
              command:
 | 
			
		||||
@@ -668,25 +821,73 @@ spec:
 | 
			
		||||
          mountPath: /sys/fs/cgroup
 | 
			
		||||
        - name: cilium-run
 | 
			
		||||
          mountPath: /var/run/cilium
 | 
			
		||||
        - name: cni-path
 | 
			
		||||
          mountPath: /host/opt/cni/bin
 | 
			
		||||
        - name: etc-cni-netd
 | 
			
		||||
          mountPath: /host/etc/cni/net.d
 | 
			
		||||
        - name: clustermesh-secrets
 | 
			
		||||
          mountPath: /var/lib/cilium/clustermesh
 | 
			
		||||
          readOnly: true
 | 
			
		||||
        - name: cilium-config-path
 | 
			
		||||
          mountPath: /tmp/cilium/config-map
 | 
			
		||||
          readOnly: true
 | 
			
		||||
          # Needed to be able to load kernel modules
 | 
			
		||||
        - name: lib-modules
 | 
			
		||||
          mountPath: /lib/modules
 | 
			
		||||
          readOnly: true
 | 
			
		||||
        - name: xtables-lock
 | 
			
		||||
          mountPath: /run/xtables.lock
 | 
			
		||||
        - name: tmp
 | 
			
		||||
          mountPath: /tmp
 | 
			
		||||
      initContainers:
 | 
			
		||||
      - name: config
 | 
			
		||||
        image: "quay.io/cilium/cilium:v1.15.7@sha256:2e432bf6879feb8b891c497d6fd784b13e53456017d2b8e4ea734145f0282ef0"
 | 
			
		||||
        imagePullPolicy: IfNotPresent
 | 
			
		||||
        command:
 | 
			
		||||
        - cilium-dbg
 | 
			
		||||
        - build-config
 | 
			
		||||
        env:
 | 
			
		||||
        - name: K8S_NODE_NAME
 | 
			
		||||
          valueFrom:
 | 
			
		||||
            fieldRef:
 | 
			
		||||
              apiVersion: v1
 | 
			
		||||
              fieldPath: spec.nodeName
 | 
			
		||||
        - name: CILIUM_K8S_NAMESPACE
 | 
			
		||||
          valueFrom:
 | 
			
		||||
            fieldRef:
 | 
			
		||||
              apiVersion: v1
 | 
			
		||||
              fieldPath: metadata.namespace
 | 
			
		||||
        - name: KUBERNETES_SERVICE_HOST
 | 
			
		||||
          value: "api.cluster.local"
 | 
			
		||||
        - name: KUBERNETES_SERVICE_PORT
 | 
			
		||||
          value: "6443"
 | 
			
		||||
        volumeMounts:
 | 
			
		||||
        - name: tmp
 | 
			
		||||
          mountPath: /tmp
 | 
			
		||||
        terminationMessagePolicy: FallbackToLogsOnError
 | 
			
		||||
      - name: apply-sysctl-overwrites
 | 
			
		||||
        image: "quay.io/cilium/cilium:v1.15.7@sha256:2e432bf6879feb8b891c497d6fd784b13e53456017d2b8e4ea734145f0282ef0"
 | 
			
		||||
        imagePullPolicy: IfNotPresent
 | 
			
		||||
        env:
 | 
			
		||||
        - name: BIN_PATH
 | 
			
		||||
          value: /opt/cni/bin
 | 
			
		||||
        command:
 | 
			
		||||
        - sh
 | 
			
		||||
        - -ec
 | 
			
		||||
        # The statically linked Go program binary is invoked to avoid any
 | 
			
		||||
        # dependency on utilities like sh that can be missing on certain
 | 
			
		||||
        # distros installed on the underlying host. Copy the binary to the
 | 
			
		||||
        # same directory where we install cilium cni plugin so that exec permissions
 | 
			
		||||
        # are available.
 | 
			
		||||
        - |
 | 
			
		||||
          cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;
 | 
			
		||||
          nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";
 | 
			
		||||
          rm /hostbin/cilium-sysctlfix
 | 
			
		||||
        volumeMounts:
 | 
			
		||||
        - name: hostproc
 | 
			
		||||
          mountPath: /hostproc
 | 
			
		||||
        - name: cni-path
 | 
			
		||||
          mountPath: /hostbin
 | 
			
		||||
        terminationMessagePolicy: FallbackToLogsOnError
 | 
			
		||||
        securityContext:
 | 
			
		||||
          privileged: true
 | 
			
		||||
      - name: clean-cilium-state
 | 
			
		||||
        image: "quay.io/cilium/cilium:v1.12.7@sha256:8cb6b4742cc27b39e4f789d282a1fc2041decb6f5698bfe09112085a07b1fd61"
 | 
			
		||||
        image: "quay.io/cilium/cilium:v1.15.7@sha256:2e432bf6879feb8b891c497d6fd784b13e53456017d2b8e4ea734145f0282ef0"
 | 
			
		||||
        imagePullPolicy: IfNotPresent
 | 
			
		||||
        command:
 | 
			
		||||
        - /init-container.sh
 | 
			
		||||
@@ -703,6 +904,12 @@ spec:
 | 
			
		||||
              name: cilium-config
 | 
			
		||||
              key: clean-cilium-bpf-state
 | 
			
		||||
              optional: true
 | 
			
		||||
        - name: WRITE_CNI_CONF_WHEN_READY
 | 
			
		||||
          valueFrom:
 | 
			
		||||
            configMapKeyRef:
 | 
			
		||||
              name: cilium-config
 | 
			
		||||
              key: write-cni-conf-when-ready
 | 
			
		||||
              optional: true
 | 
			
		||||
        - name: KUBERNETES_SERVICE_HOST
 | 
			
		||||
          value: "api.cluster.local"
 | 
			
		||||
        - name: KUBERNETES_SERVICE_PORT
 | 
			
		||||
@@ -718,15 +925,31 @@ spec:
 | 
			
		||||
          mountPath: /sys/fs/cgroup
 | 
			
		||||
          mountPropagation: HostToContainer
 | 
			
		||||
        - name: cilium-run
 | 
			
		||||
          mountPath: /var/run/cilium
 | 
			
		||||
          mountPath: /var/run/cilium # wait-for-kube-proxy
 | 
			
		||||
      # Install the CNI binaries in an InitContainer so we don't have a writable host mount in the agent
 | 
			
		||||
      - name: install-cni-binaries
 | 
			
		||||
        image: "quay.io/cilium/cilium:v1.15.7@sha256:2e432bf6879feb8b891c497d6fd784b13e53456017d2b8e4ea734145f0282ef0"
 | 
			
		||||
        imagePullPolicy: IfNotPresent
 | 
			
		||||
        command:
 | 
			
		||||
          - "/install-plugin.sh"
 | 
			
		||||
        resources:
 | 
			
		||||
          requests:
 | 
			
		||||
            cpu: 100m
 | 
			
		||||
            memory: 100Mi # wait-for-kube-proxy
 | 
			
		||||
            memory: 10Mi
 | 
			
		||||
        securityContext:
 | 
			
		||||
          privileged: true
 | 
			
		||||
          capabilities:
 | 
			
		||||
            drop:
 | 
			
		||||
              - ALL
 | 
			
		||||
        terminationMessagePolicy: FallbackToLogsOnError
 | 
			
		||||
        volumeMounts:
 | 
			
		||||
          - name: cni-path
 | 
			
		||||
            mountPath: /host/opt/cni/bin # .Values.cni.install
 | 
			
		||||
      restartPolicy: Always
 | 
			
		||||
      priorityClassName: system-node-critical
 | 
			
		||||
      serviceAccount: "cilium"
 | 
			
		||||
      serviceAccountName: "cilium"
 | 
			
		||||
      automountServiceAccountToken: true
 | 
			
		||||
      terminationGracePeriodSeconds: 1
 | 
			
		||||
      hostNetwork: true
 | 
			
		||||
      affinity:
 | 
			
		||||
@@ -741,6 +964,9 @@ spec:
 | 
			
		||||
      tolerations:
 | 
			
		||||
        - operator: Exists
 | 
			
		||||
      volumes:
 | 
			
		||||
        # For sharing configuration between the "config" initContainer and the agent
 | 
			
		||||
      - name: tmp
 | 
			
		||||
        emptyDir: {}
 | 
			
		||||
        # To keep state between restarts / upgrades
 | 
			
		||||
      - name: cilium-run
 | 
			
		||||
        hostPath:
 | 
			
		||||
@@ -751,6 +977,11 @@ spec:
 | 
			
		||||
        hostPath:
 | 
			
		||||
          path: /sys/fs/bpf
 | 
			
		||||
          type: DirectoryOrCreate
 | 
			
		||||
      # To mount cgroup2 filesystem on the host or apply sysctlfix
 | 
			
		||||
      - name: hostproc
 | 
			
		||||
        hostPath:
 | 
			
		||||
          path: /proc
 | 
			
		||||
          type: Directory
 | 
			
		||||
      # To keep state between restarts / upgrades for cgroup2 filesystem
 | 
			
		||||
      - name: cilium-cgroup
 | 
			
		||||
        hostPath:
 | 
			
		||||
@@ -777,15 +1008,27 @@ spec:
 | 
			
		||||
          type: FileOrCreate
 | 
			
		||||
        # To read the clustermesh configuration
 | 
			
		||||
      - name: clustermesh-secrets
 | 
			
		||||
        secret:
 | 
			
		||||
          secretName: cilium-clustermesh
 | 
			
		||||
        projected:
 | 
			
		||||
          # note: the leading zero means this number is in octal representation: do not remove it
 | 
			
		||||
          defaultMode: 0400
 | 
			
		||||
          sources:
 | 
			
		||||
          - secret:
 | 
			
		||||
              name: cilium-clustermesh
 | 
			
		||||
              optional: true
 | 
			
		||||
        # To read the configuration from the config map
 | 
			
		||||
      - name: cilium-config-path
 | 
			
		||||
        configMap:
 | 
			
		||||
          name: cilium-config
 | 
			
		||||
              # note: items are not explicitly listed here, since the entries of this secret
 | 
			
		||||
              # depend on the peers configured, and that would cause a restart of all agents
 | 
			
		||||
              # at every addition/removal. Leaving the field empty makes each secret entry
 | 
			
		||||
              # to be automatically projected into the volume as a file whose name is the key.
 | 
			
		||||
          - secret:
 | 
			
		||||
              name: clustermesh-apiserver-remote-cert
 | 
			
		||||
              optional: true
 | 
			
		||||
              items:
 | 
			
		||||
              - key: tls.key
 | 
			
		||||
                path: common-etcd-client.key
 | 
			
		||||
              - key: tls.crt
 | 
			
		||||
                path: common-etcd-client.crt
 | 
			
		||||
              - key: ca.crt
 | 
			
		||||
                path: common-etcd-client-ca.crt
 | 
			
		||||
---
 | 
			
		||||
# Source: cilium/templates/cilium-operator/deployment.yaml
 | 
			
		||||
apiVersion: apps/v1
 | 
			
		||||
@@ -796,6 +1039,8 @@ metadata:
 | 
			
		||||
  labels:
 | 
			
		||||
    io.cilium/app: operator
 | 
			
		||||
    name: cilium-operator
 | 
			
		||||
    app.kubernetes.io/part-of: cilium
 | 
			
		||||
    app.kubernetes.io/name: cilium-operator
 | 
			
		||||
spec:
 | 
			
		||||
  # See docs on ServerCapabilities.LeasesResourceLock in file pkg/k8s/version/version.go
 | 
			
		||||
  # for more details.
 | 
			
		||||
@@ -804,23 +1049,29 @@ spec:
 | 
			
		||||
    matchLabels:
 | 
			
		||||
      io.cilium/app: operator
 | 
			
		||||
      name: cilium-operator
 | 
			
		||||
  # ensure operator update on single node k8s clusters, by using rolling update with maxUnavailable=100% in case
 | 
			
		||||
  # of one replica and no user configured Recreate strategy.
 | 
			
		||||
  # otherwise an update might get stuck due to the default maxUnavailable=50% in combination with the
 | 
			
		||||
  # podAntiAffinity which prevents deployments of multiple operator replicas on the same node.
 | 
			
		||||
  strategy:
 | 
			
		||||
    rollingUpdate:
 | 
			
		||||
      maxSurge: 1
 | 
			
		||||
      maxUnavailable: 1
 | 
			
		||||
      maxSurge: 25%
 | 
			
		||||
      maxUnavailable: 100%
 | 
			
		||||
    type: RollingUpdate
 | 
			
		||||
  template:
 | 
			
		||||
    metadata:
 | 
			
		||||
      annotations:
 | 
			
		||||
        # ensure pods roll when configmap updates
 | 
			
		||||
        cilium.io/cilium-configmap-checksum: "93ed3047796c548140dd014145d2cb313155de38c36595eb2f05f60856400ae5"
 | 
			
		||||
        cilium.io/cilium-configmap-checksum: "fc9863bdf15518f8321ffc2278b755f844cf8067e1bb8a7e7c3ebc7cdf5f0e2f"
 | 
			
		||||
      labels:
 | 
			
		||||
        io.cilium/app: operator
 | 
			
		||||
        name: cilium-operator
 | 
			
		||||
        app.kubernetes.io/part-of: cilium
 | 
			
		||||
        app.kubernetes.io/name: cilium-operator
 | 
			
		||||
    spec:
 | 
			
		||||
      containers:
 | 
			
		||||
      - name: cilium-operator
 | 
			
		||||
        image: "quay.io/cilium/operator-generic:v1.12.7@sha256:80f24810bf8484974c757382eb2c7408c9c024e5cb0719f4a56fba3f47695c72"
 | 
			
		||||
        image: "quay.io/cilium/operator-generic:v1.15.7@sha256:6840a6dde703b3e73dd31e03390327a9184fcb888efbad9d9d098d65b9035b54"
 | 
			
		||||
        imagePullPolicy: IfNotPresent
 | 
			
		||||
        command:
 | 
			
		||||
        - cilium-operator-generic
 | 
			
		||||
@@ -857,6 +1108,16 @@ spec:
 | 
			
		||||
          initialDelaySeconds: 60
 | 
			
		||||
          periodSeconds: 10
 | 
			
		||||
          timeoutSeconds: 3
 | 
			
		||||
        readinessProbe:
 | 
			
		||||
          httpGet:
 | 
			
		||||
            host: "127.0.0.1"
 | 
			
		||||
            path: /healthz
 | 
			
		||||
            port: 9234
 | 
			
		||||
            scheme: HTTP
 | 
			
		||||
          initialDelaySeconds: 0
 | 
			
		||||
          periodSeconds: 5
 | 
			
		||||
          timeoutSeconds: 3
 | 
			
		||||
          failureThreshold: 5
 | 
			
		||||
        volumeMounts:
 | 
			
		||||
        - name: cilium-config-path
 | 
			
		||||
          mountPath: /tmp/cilium/config-map
 | 
			
		||||
@@ -867,6 +1128,7 @@ spec:
 | 
			
		||||
      priorityClassName: system-cluster-critical
 | 
			
		||||
      serviceAccount: "cilium-operator"
 | 
			
		||||
      serviceAccountName: "cilium-operator"
 | 
			
		||||
      automountServiceAccountToken: true
 | 
			
		||||
      # In HA mode, cilium-operator pods must not be scheduled on the same
 | 
			
		||||
      # node as they will clash with each other.
 | 
			
		||||
      affinity:
 | 
			
		||||
 
 | 
			
		||||
@@ -4,10 +4,10 @@ apiVersion: v1
 | 
			
		||||
kind: ServiceAccount
 | 
			
		||||
metadata:
 | 
			
		||||
  labels:
 | 
			
		||||
    helm.sh/chart: ingress-nginx-4.7.0
 | 
			
		||||
    helm.sh/chart: ingress-nginx-4.11.1
 | 
			
		||||
    app.kubernetes.io/name: ingress-nginx
 | 
			
		||||
    app.kubernetes.io/instance: ingress-nginx
 | 
			
		||||
    app.kubernetes.io/version: "1.8.0"
 | 
			
		||||
    app.kubernetes.io/version: "1.11.1"
 | 
			
		||||
    app.kubernetes.io/part-of: ingress-nginx
 | 
			
		||||
    app.kubernetes.io/managed-by: Helm
 | 
			
		||||
    app.kubernetes.io/component: controller
 | 
			
		||||
@@ -20,17 +20,17 @@ apiVersion: v1
 | 
			
		||||
kind: ConfigMap
 | 
			
		||||
metadata:
 | 
			
		||||
  labels:
 | 
			
		||||
    helm.sh/chart: ingress-nginx-4.7.0
 | 
			
		||||
    helm.sh/chart: ingress-nginx-4.11.1
 | 
			
		||||
    app.kubernetes.io/name: ingress-nginx
 | 
			
		||||
    app.kubernetes.io/instance: ingress-nginx
 | 
			
		||||
    app.kubernetes.io/version: "1.8.0"
 | 
			
		||||
    app.kubernetes.io/version: "1.11.1"
 | 
			
		||||
    app.kubernetes.io/part-of: ingress-nginx
 | 
			
		||||
    app.kubernetes.io/managed-by: Helm
 | 
			
		||||
    app.kubernetes.io/component: controller
 | 
			
		||||
  name: ingress-nginx-controller
 | 
			
		||||
  namespace: ingress-nginx
 | 
			
		||||
data:
 | 
			
		||||
  allow-snippet-annotations: "true"
 | 
			
		||||
  allow-snippet-annotations: "false"
 | 
			
		||||
  client-body-timeout: "30"
 | 
			
		||||
  client-header-timeout: "30"
 | 
			
		||||
  enable-access-log-for-default-backend: "true"
 | 
			
		||||
@@ -66,10 +66,10 @@ apiVersion: rbac.authorization.k8s.io/v1
 | 
			
		||||
kind: ClusterRole
 | 
			
		||||
metadata:
 | 
			
		||||
  labels:
 | 
			
		||||
    helm.sh/chart: ingress-nginx-4.7.0
 | 
			
		||||
    helm.sh/chart: ingress-nginx-4.11.1
 | 
			
		||||
    app.kubernetes.io/name: ingress-nginx
 | 
			
		||||
    app.kubernetes.io/instance: ingress-nginx
 | 
			
		||||
    app.kubernetes.io/version: "1.8.0"
 | 
			
		||||
    app.kubernetes.io/version: "1.11.1"
 | 
			
		||||
    app.kubernetes.io/part-of: ingress-nginx
 | 
			
		||||
    app.kubernetes.io/managed-by: Helm
 | 
			
		||||
  name: ingress-nginx
 | 
			
		||||
@@ -150,10 +150,10 @@ apiVersion: rbac.authorization.k8s.io/v1
 | 
			
		||||
kind: ClusterRoleBinding
 | 
			
		||||
metadata:
 | 
			
		||||
  labels:
 | 
			
		||||
    helm.sh/chart: ingress-nginx-4.7.0
 | 
			
		||||
    helm.sh/chart: ingress-nginx-4.11.1
 | 
			
		||||
    app.kubernetes.io/name: ingress-nginx
 | 
			
		||||
    app.kubernetes.io/instance: ingress-nginx
 | 
			
		||||
    app.kubernetes.io/version: "1.8.0"
 | 
			
		||||
    app.kubernetes.io/version: "1.11.1"
 | 
			
		||||
    app.kubernetes.io/part-of: ingress-nginx
 | 
			
		||||
    app.kubernetes.io/managed-by: Helm
 | 
			
		||||
  name: ingress-nginx
 | 
			
		||||
@@ -164,17 +164,17 @@ roleRef:
 | 
			
		||||
subjects:
 | 
			
		||||
  - kind: ServiceAccount
 | 
			
		||||
    name: ingress-nginx
 | 
			
		||||
    namespace: "ingress-nginx"
 | 
			
		||||
    namespace: ingress-nginx
 | 
			
		||||
---
 | 
			
		||||
# Source: ingress-nginx/templates/controller-role.yaml
 | 
			
		||||
apiVersion: rbac.authorization.k8s.io/v1
 | 
			
		||||
kind: Role
 | 
			
		||||
metadata:
 | 
			
		||||
  labels:
 | 
			
		||||
    helm.sh/chart: ingress-nginx-4.7.0
 | 
			
		||||
    helm.sh/chart: ingress-nginx-4.11.1
 | 
			
		||||
    app.kubernetes.io/name: ingress-nginx
 | 
			
		||||
    app.kubernetes.io/instance: ingress-nginx
 | 
			
		||||
    app.kubernetes.io/version: "1.8.0"
 | 
			
		||||
    app.kubernetes.io/version: "1.11.1"
 | 
			
		||||
    app.kubernetes.io/part-of: ingress-nginx
 | 
			
		||||
    app.kubernetes.io/managed-by: Helm
 | 
			
		||||
    app.kubernetes.io/component: controller
 | 
			
		||||
@@ -214,6 +214,7 @@ rules:
 | 
			
		||||
      - get
 | 
			
		||||
      - list
 | 
			
		||||
      - watch
 | 
			
		||||
  # Omit Ingress status permissions if `--update-status` is disabled.
 | 
			
		||||
  - apiGroups:
 | 
			
		||||
      - networking.k8s.io
 | 
			
		||||
    resources:
 | 
			
		||||
@@ -264,10 +265,10 @@ apiVersion: rbac.authorization.k8s.io/v1
 | 
			
		||||
kind: RoleBinding
 | 
			
		||||
metadata:
 | 
			
		||||
  labels:
 | 
			
		||||
    helm.sh/chart: ingress-nginx-4.7.0
 | 
			
		||||
    helm.sh/chart: ingress-nginx-4.11.1
 | 
			
		||||
    app.kubernetes.io/name: ingress-nginx
 | 
			
		||||
    app.kubernetes.io/instance: ingress-nginx
 | 
			
		||||
    app.kubernetes.io/version: "1.8.0"
 | 
			
		||||
    app.kubernetes.io/version: "1.11.1"
 | 
			
		||||
    app.kubernetes.io/part-of: ingress-nginx
 | 
			
		||||
    app.kubernetes.io/managed-by: Helm
 | 
			
		||||
    app.kubernetes.io/component: controller
 | 
			
		||||
@@ -280,7 +281,7 @@ roleRef:
 | 
			
		||||
subjects:
 | 
			
		||||
  - kind: ServiceAccount
 | 
			
		||||
    name: ingress-nginx
 | 
			
		||||
    namespace: "ingress-nginx"
 | 
			
		||||
    namespace: ingress-nginx
 | 
			
		||||
---
 | 
			
		||||
# Source: ingress-nginx/templates/controller-service.yaml
 | 
			
		||||
apiVersion: v1
 | 
			
		||||
@@ -288,10 +289,10 @@ kind: Service
 | 
			
		||||
metadata:
 | 
			
		||||
  annotations:
 | 
			
		||||
  labels:
 | 
			
		||||
    helm.sh/chart: ingress-nginx-4.7.0
 | 
			
		||||
    helm.sh/chart: ingress-nginx-4.11.1
 | 
			
		||||
    app.kubernetes.io/name: ingress-nginx
 | 
			
		||||
    app.kubernetes.io/instance: ingress-nginx
 | 
			
		||||
    app.kubernetes.io/version: "1.8.0"
 | 
			
		||||
    app.kubernetes.io/version: "1.11.1"
 | 
			
		||||
    app.kubernetes.io/part-of: ingress-nginx
 | 
			
		||||
    app.kubernetes.io/managed-by: Helm
 | 
			
		||||
    app.kubernetes.io/component: controller
 | 
			
		||||
@@ -325,10 +326,10 @@ apiVersion: apps/v1
 | 
			
		||||
kind: DaemonSet
 | 
			
		||||
metadata:
 | 
			
		||||
  labels:
 | 
			
		||||
    helm.sh/chart: ingress-nginx-4.7.0
 | 
			
		||||
    helm.sh/chart: ingress-nginx-4.11.1
 | 
			
		||||
    app.kubernetes.io/name: ingress-nginx
 | 
			
		||||
    app.kubernetes.io/instance: ingress-nginx
 | 
			
		||||
    app.kubernetes.io/version: "1.8.0"
 | 
			
		||||
    app.kubernetes.io/version: "1.11.1"
 | 
			
		||||
    app.kubernetes.io/part-of: ingress-nginx
 | 
			
		||||
    app.kubernetes.io/managed-by: Helm
 | 
			
		||||
    app.kubernetes.io/component: controller
 | 
			
		||||
@@ -352,10 +353,10 @@ spec:
 | 
			
		||||
        prometheus.io/port: "10254"
 | 
			
		||||
        prometheus.io/scrape: "true"
 | 
			
		||||
      labels:
 | 
			
		||||
        helm.sh/chart: ingress-nginx-4.7.0
 | 
			
		||||
        helm.sh/chart: ingress-nginx-4.11.1
 | 
			
		||||
        app.kubernetes.io/name: ingress-nginx
 | 
			
		||||
        app.kubernetes.io/instance: ingress-nginx
 | 
			
		||||
        app.kubernetes.io/version: "1.8.0"
 | 
			
		||||
        app.kubernetes.io/version: "1.11.1"
 | 
			
		||||
        app.kubernetes.io/part-of: ingress-nginx
 | 
			
		||||
        app.kubernetes.io/managed-by: Helm
 | 
			
		||||
        app.kubernetes.io/component: controller
 | 
			
		||||
@@ -363,7 +364,7 @@ spec:
 | 
			
		||||
      dnsPolicy: ClusterFirstWithHostNet
 | 
			
		||||
      containers:
 | 
			
		||||
        - name: controller
 | 
			
		||||
          image: "registry.k8s.io/ingress-nginx/controller:v1.8.0@sha256:744ae2afd433a395eeb13dc03d3313facba92e96ad71d9feaafc85925493fee3"
 | 
			
		||||
          image: registry.k8s.io/ingress-nginx/controller:v1.11.1@sha256:e6439a12b52076965928e83b7b56aae6731231677b01e81818bce7fa5c60161a
 | 
			
		||||
          imagePullPolicy: IfNotPresent
 | 
			
		||||
          lifecycle: 
 | 
			
		||||
            preStop:
 | 
			
		||||
@@ -376,17 +377,22 @@ spec:
 | 
			
		||||
            - --controller-class=k8s.io/ingress-nginx
 | 
			
		||||
            - --ingress-class=nginx
 | 
			
		||||
            - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
 | 
			
		||||
            - --enable-metrics=false
 | 
			
		||||
            - --enable-topology-aware-routing=true
 | 
			
		||||
            - --post-shutdown-grace-period=30
 | 
			
		||||
            - --report-node-internal-ip-address=true
 | 
			
		||||
          securityContext: 
 | 
			
		||||
            runAsNonRoot: true
 | 
			
		||||
            runAsUser: 101
 | 
			
		||||
            allowPrivilegeEscalation: false
 | 
			
		||||
            seccompProfile: 
 | 
			
		||||
              type: RuntimeDefault
 | 
			
		||||
            capabilities:
 | 
			
		||||
              drop:
 | 
			
		||||
              - ALL
 | 
			
		||||
              add:
 | 
			
		||||
              - NET_BIND_SERVICE
 | 
			
		||||
            runAsUser: 101
 | 
			
		||||
            allowPrivilegeEscalation: true
 | 
			
		||||
            readOnlyRootFilesystem: false
 | 
			
		||||
          env:
 | 
			
		||||
            - name: POD_NAME
 | 
			
		||||
              valueFrom:
 | 
			
		||||
@@ -452,19 +458,21 @@ spec:
 | 
			
		||||
      terminationGracePeriodSeconds: 300
 | 
			
		||||
---
 | 
			
		||||
# Source: ingress-nginx/templates/controller-ingressclass.yaml
 | 
			
		||||
# We don't support namespaced ingressClass yet
 | 
			
		||||
# So a ClusterRole and a ClusterRoleBinding is required
 | 
			
		||||
apiVersion: networking.k8s.io/v1
 | 
			
		||||
kind: IngressClass
 | 
			
		||||
metadata:
 | 
			
		||||
  labels:
 | 
			
		||||
    helm.sh/chart: ingress-nginx-4.7.0
 | 
			
		||||
    helm.sh/chart: ingress-nginx-4.11.1
 | 
			
		||||
    app.kubernetes.io/name: ingress-nginx
 | 
			
		||||
    app.kubernetes.io/instance: ingress-nginx
 | 
			
		||||
    app.kubernetes.io/version: "1.8.0"
 | 
			
		||||
    app.kubernetes.io/version: "1.11.1"
 | 
			
		||||
    app.kubernetes.io/part-of: ingress-nginx
 | 
			
		||||
    app.kubernetes.io/managed-by: Helm
 | 
			
		||||
    app.kubernetes.io/component: controller
 | 
			
		||||
  name: nginx
 | 
			
		||||
spec:
 | 
			
		||||
  controller: k8s.io/ingress-nginx
 | 
			
		||||
---
 | 
			
		||||
# Source: ingress-nginx/templates/controller-poddisruptionbudget.yaml
 | 
			
		||||
# PDB is not supported for DaemonSets.
 | 
			
		||||
# https://github.com/kubernetes/kubernetes/issues/108124
 | 
			
		||||
 
 | 
			
		||||
@@ -7,9 +7,9 @@ metadata:
 | 
			
		||||
  namespace: local-path-storage
 | 
			
		||||
  labels:
 | 
			
		||||
    app.kubernetes.io/name: local-path-provisioner
 | 
			
		||||
    helm.sh/chart: local-path-provisioner-0.0.25-dev
 | 
			
		||||
    helm.sh/chart: local-path-provisioner-0.0.25
 | 
			
		||||
    app.kubernetes.io/instance: local-path-provisioner
 | 
			
		||||
    app.kubernetes.io/version: "v0.0.25-dev"
 | 
			
		||||
    app.kubernetes.io/version: "v0.0.25"
 | 
			
		||||
    app.kubernetes.io/managed-by: Helm
 | 
			
		||||
imagePullSecrets:
 | 
			
		||||
---
 | 
			
		||||
@@ -21,9 +21,9 @@ metadata:
 | 
			
		||||
  namespace: local-path-storage
 | 
			
		||||
  labels:
 | 
			
		||||
    app.kubernetes.io/name: local-path-provisioner
 | 
			
		||||
    helm.sh/chart: local-path-provisioner-0.0.25-dev
 | 
			
		||||
    helm.sh/chart: local-path-provisioner-0.0.25
 | 
			
		||||
    app.kubernetes.io/instance: local-path-provisioner
 | 
			
		||||
    app.kubernetes.io/version: "v0.0.25-dev"
 | 
			
		||||
    app.kubernetes.io/version: "v0.0.25"
 | 
			
		||||
    app.kubernetes.io/managed-by: Helm
 | 
			
		||||
data:
 | 
			
		||||
  config.json: |-
 | 
			
		||||
@@ -68,9 +68,9 @@ metadata:
 | 
			
		||||
  name: local-path
 | 
			
		||||
  labels:
 | 
			
		||||
    app.kubernetes.io/name: local-path-provisioner
 | 
			
		||||
    helm.sh/chart: local-path-provisioner-0.0.25-dev
 | 
			
		||||
    helm.sh/chart: local-path-provisioner-0.0.25
 | 
			
		||||
    app.kubernetes.io/instance: local-path-provisioner
 | 
			
		||||
    app.kubernetes.io/version: "v0.0.25-dev"
 | 
			
		||||
    app.kubernetes.io/version: "v0.0.25"
 | 
			
		||||
    app.kubernetes.io/managed-by: Helm
 | 
			
		||||
  annotations:
 | 
			
		||||
    storageclass.kubernetes.io/is-default-class: "true"
 | 
			
		||||
@@ -87,9 +87,9 @@ metadata:
 | 
			
		||||
  name: local-path-provisioner
 | 
			
		||||
  labels:
 | 
			
		||||
    app.kubernetes.io/name: local-path-provisioner
 | 
			
		||||
    helm.sh/chart: local-path-provisioner-0.0.25-dev
 | 
			
		||||
    helm.sh/chart: local-path-provisioner-0.0.25
 | 
			
		||||
    app.kubernetes.io/instance: local-path-provisioner
 | 
			
		||||
    app.kubernetes.io/version: "v0.0.25-dev"
 | 
			
		||||
    app.kubernetes.io/version: "v0.0.25"
 | 
			
		||||
    app.kubernetes.io/managed-by: Helm
 | 
			
		||||
rules:
 | 
			
		||||
  - apiGroups: [""]
 | 
			
		||||
@@ -112,9 +112,9 @@ metadata:
 | 
			
		||||
  name: local-path-provisioner
 | 
			
		||||
  labels:
 | 
			
		||||
    app.kubernetes.io/name: local-path-provisioner
 | 
			
		||||
    helm.sh/chart: local-path-provisioner-0.0.25-dev
 | 
			
		||||
    helm.sh/chart: local-path-provisioner-0.0.25
 | 
			
		||||
    app.kubernetes.io/instance: local-path-provisioner
 | 
			
		||||
    app.kubernetes.io/version: "v0.0.25-dev"
 | 
			
		||||
    app.kubernetes.io/version: "v0.0.25"
 | 
			
		||||
    app.kubernetes.io/managed-by: Helm
 | 
			
		||||
roleRef:
 | 
			
		||||
  apiGroup: rbac.authorization.k8s.io
 | 
			
		||||
@@ -133,9 +133,9 @@ metadata:
 | 
			
		||||
  namespace: local-path-storage
 | 
			
		||||
  labels:
 | 
			
		||||
    app.kubernetes.io/name: local-path-provisioner
 | 
			
		||||
    helm.sh/chart: local-path-provisioner-0.0.25-dev
 | 
			
		||||
    helm.sh/chart: local-path-provisioner-0.0.25
 | 
			
		||||
    app.kubernetes.io/instance: local-path-provisioner
 | 
			
		||||
    app.kubernetes.io/version: "v0.0.25-dev"
 | 
			
		||||
    app.kubernetes.io/version: "v0.0.25"
 | 
			
		||||
    app.kubernetes.io/managed-by: Helm
 | 
			
		||||
rules:
 | 
			
		||||
  - apiGroups: [""]
 | 
			
		||||
@@ -150,9 +150,9 @@ metadata:
 | 
			
		||||
  namespace: local-path-storage
 | 
			
		||||
  labels:
 | 
			
		||||
    app.kubernetes.io/name: local-path-provisioner
 | 
			
		||||
    helm.sh/chart: local-path-provisioner-0.0.25-dev
 | 
			
		||||
    helm.sh/chart: local-path-provisioner-0.0.25
 | 
			
		||||
    app.kubernetes.io/instance: local-path-provisioner
 | 
			
		||||
    app.kubernetes.io/version: "v0.0.25-dev"
 | 
			
		||||
    app.kubernetes.io/version: "v0.0.25"
 | 
			
		||||
    app.kubernetes.io/managed-by: Helm
 | 
			
		||||
roleRef:
 | 
			
		||||
  apiGroup: rbac.authorization.k8s.io
 | 
			
		||||
@@ -171,27 +171,21 @@ metadata:
 | 
			
		||||
  namespace: local-path-storage
 | 
			
		||||
  labels:
 | 
			
		||||
    app.kubernetes.io/name: local-path-provisioner
 | 
			
		||||
    helm.sh/chart: local-path-provisioner-0.0.25-dev
 | 
			
		||||
    helm.sh/chart: local-path-provisioner-0.0.25
 | 
			
		||||
    app.kubernetes.io/instance: local-path-provisioner
 | 
			
		||||
    app.kubernetes.io/version: "v0.0.25-dev"
 | 
			
		||||
    app.kubernetes.io/version: "v0.0.25"
 | 
			
		||||
    app.kubernetes.io/managed-by: Helm
 | 
			
		||||
spec:
 | 
			
		||||
  replicas: 1
 | 
			
		||||
  selector:
 | 
			
		||||
    matchLabels:
 | 
			
		||||
      app.kubernetes.io/name: local-path-provisioner
 | 
			
		||||
      helm.sh/chart: local-path-provisioner-0.0.25-dev
 | 
			
		||||
      app.kubernetes.io/instance: local-path-provisioner
 | 
			
		||||
      app.kubernetes.io/version: "v0.0.25-dev"
 | 
			
		||||
      app.kubernetes.io/managed-by: Helm
 | 
			
		||||
  template:
 | 
			
		||||
    metadata:
 | 
			
		||||
      labels:
 | 
			
		||||
        app.kubernetes.io/name: local-path-provisioner
 | 
			
		||||
        helm.sh/chart: local-path-provisioner-0.0.25-dev
 | 
			
		||||
        app.kubernetes.io/instance: local-path-provisioner
 | 
			
		||||
        app.kubernetes.io/version: "v0.0.25-dev"
 | 
			
		||||
        app.kubernetes.io/managed-by: Helm
 | 
			
		||||
    spec:
 | 
			
		||||
      serviceAccountName: local-path-provisioner
 | 
			
		||||
      securityContext:
 | 
			
		||||
 
 | 
			
		||||
@@ -6,10 +6,10 @@ metadata:
 | 
			
		||||
  name: metrics-server
 | 
			
		||||
  namespace: kube-system
 | 
			
		||||
  labels:
 | 
			
		||||
    helm.sh/chart: metrics-server-3.9.0
 | 
			
		||||
    helm.sh/chart: metrics-server-3.12.1
 | 
			
		||||
    app.kubernetes.io/name: metrics-server
 | 
			
		||||
    app.kubernetes.io/instance: metrics-server
 | 
			
		||||
    app.kubernetes.io/version: "0.6.3"
 | 
			
		||||
    app.kubernetes.io/version: "0.7.1"
 | 
			
		||||
    app.kubernetes.io/managed-by: Helm
 | 
			
		||||
---
 | 
			
		||||
# Source: metrics-server/templates/clusterrole-aggregated-reader.yaml
 | 
			
		||||
@@ -18,10 +18,10 @@ kind: ClusterRole
 | 
			
		||||
metadata:
 | 
			
		||||
  name: system:metrics-server-aggregated-reader
 | 
			
		||||
  labels:
 | 
			
		||||
    helm.sh/chart: metrics-server-3.9.0
 | 
			
		||||
    helm.sh/chart: metrics-server-3.12.1
 | 
			
		||||
    app.kubernetes.io/name: metrics-server
 | 
			
		||||
    app.kubernetes.io/instance: metrics-server
 | 
			
		||||
    app.kubernetes.io/version: "0.6.3"
 | 
			
		||||
    app.kubernetes.io/version: "0.7.1"
 | 
			
		||||
    app.kubernetes.io/managed-by: Helm
 | 
			
		||||
    rbac.authorization.k8s.io/aggregate-to-admin: "true"
 | 
			
		||||
    rbac.authorization.k8s.io/aggregate-to-edit: "true"
 | 
			
		||||
@@ -43,10 +43,10 @@ kind: ClusterRole
 | 
			
		||||
metadata:
 | 
			
		||||
  name: system:metrics-server
 | 
			
		||||
  labels:
 | 
			
		||||
    helm.sh/chart: metrics-server-3.9.0
 | 
			
		||||
    helm.sh/chart: metrics-server-3.12.1
 | 
			
		||||
    app.kubernetes.io/name: metrics-server
 | 
			
		||||
    app.kubernetes.io/instance: metrics-server
 | 
			
		||||
    app.kubernetes.io/version: "0.6.3"
 | 
			
		||||
    app.kubernetes.io/version: "0.7.1"
 | 
			
		||||
    app.kubernetes.io/managed-by: Helm
 | 
			
		||||
rules:
 | 
			
		||||
  - apiGroups:
 | 
			
		||||
@@ -73,10 +73,10 @@ kind: ClusterRoleBinding
 | 
			
		||||
metadata:
 | 
			
		||||
  name: metrics-server:system:auth-delegator
 | 
			
		||||
  labels:
 | 
			
		||||
    helm.sh/chart: metrics-server-3.9.0
 | 
			
		||||
    helm.sh/chart: metrics-server-3.12.1
 | 
			
		||||
    app.kubernetes.io/name: metrics-server
 | 
			
		||||
    app.kubernetes.io/instance: metrics-server
 | 
			
		||||
    app.kubernetes.io/version: "0.6.3"
 | 
			
		||||
    app.kubernetes.io/version: "0.7.1"
 | 
			
		||||
    app.kubernetes.io/managed-by: Helm
 | 
			
		||||
roleRef:
 | 
			
		||||
  apiGroup: rbac.authorization.k8s.io
 | 
			
		||||
@@ -93,10 +93,10 @@ kind: ClusterRoleBinding
 | 
			
		||||
metadata:
 | 
			
		||||
  name: system:metrics-server
 | 
			
		||||
  labels:
 | 
			
		||||
    helm.sh/chart: metrics-server-3.9.0
 | 
			
		||||
    helm.sh/chart: metrics-server-3.12.1
 | 
			
		||||
    app.kubernetes.io/name: metrics-server
 | 
			
		||||
    app.kubernetes.io/instance: metrics-server
 | 
			
		||||
    app.kubernetes.io/version: "0.6.3"
 | 
			
		||||
    app.kubernetes.io/version: "0.7.1"
 | 
			
		||||
    app.kubernetes.io/managed-by: Helm
 | 
			
		||||
roleRef:
 | 
			
		||||
  apiGroup: rbac.authorization.k8s.io
 | 
			
		||||
@@ -114,10 +114,10 @@ metadata:
 | 
			
		||||
  name: metrics-server-auth-reader
 | 
			
		||||
  namespace: kube-system
 | 
			
		||||
  labels:
 | 
			
		||||
    helm.sh/chart: metrics-server-3.9.0
 | 
			
		||||
    helm.sh/chart: metrics-server-3.12.1
 | 
			
		||||
    app.kubernetes.io/name: metrics-server
 | 
			
		||||
    app.kubernetes.io/instance: metrics-server
 | 
			
		||||
    app.kubernetes.io/version: "0.6.3"
 | 
			
		||||
    app.kubernetes.io/version: "0.7.1"
 | 
			
		||||
    app.kubernetes.io/managed-by: Helm
 | 
			
		||||
roleRef:
 | 
			
		||||
  apiGroup: rbac.authorization.k8s.io
 | 
			
		||||
@@ -135,10 +135,10 @@ metadata:
 | 
			
		||||
  name: metrics-server
 | 
			
		||||
  namespace: kube-system
 | 
			
		||||
  labels:
 | 
			
		||||
    helm.sh/chart: metrics-server-3.9.0
 | 
			
		||||
    helm.sh/chart: metrics-server-3.12.1
 | 
			
		||||
    app.kubernetes.io/name: metrics-server
 | 
			
		||||
    app.kubernetes.io/instance: metrics-server
 | 
			
		||||
    app.kubernetes.io/version: "0.6.3"
 | 
			
		||||
    app.kubernetes.io/version: "0.7.1"
 | 
			
		||||
    app.kubernetes.io/managed-by: Helm
 | 
			
		||||
spec:
 | 
			
		||||
  type: ClusterIP
 | 
			
		||||
@@ -158,10 +158,10 @@ metadata:
 | 
			
		||||
  name: metrics-server
 | 
			
		||||
  namespace: kube-system
 | 
			
		||||
  labels:
 | 
			
		||||
    helm.sh/chart: metrics-server-3.9.0
 | 
			
		||||
    helm.sh/chart: metrics-server-3.12.1
 | 
			
		||||
    app.kubernetes.io/name: metrics-server
 | 
			
		||||
    app.kubernetes.io/instance: metrics-server
 | 
			
		||||
    app.kubernetes.io/version: "0.6.3"
 | 
			
		||||
    app.kubernetes.io/version: "0.7.1"
 | 
			
		||||
    app.kubernetes.io/managed-by: Helm
 | 
			
		||||
spec:
 | 
			
		||||
  replicas: 1
 | 
			
		||||
@@ -196,10 +196,10 @@ spec:
 | 
			
		||||
            runAsUser: 1000
 | 
			
		||||
            seccompProfile:
 | 
			
		||||
              type: RuntimeDefault
 | 
			
		||||
          image: registry.k8s.io/metrics-server/metrics-server:v0.6.3
 | 
			
		||||
          image: registry.k8s.io/metrics-server/metrics-server:v0.7.1
 | 
			
		||||
          imagePullPolicy: IfNotPresent
 | 
			
		||||
          args:
 | 
			
		||||
            - --secure-port=4443
 | 
			
		||||
            - --secure-port=10250
 | 
			
		||||
            - --cert-dir=/tmp
 | 
			
		||||
            - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
 | 
			
		||||
            - --kubelet-use-node-status-port
 | 
			
		||||
@@ -207,7 +207,7 @@ spec:
 | 
			
		||||
          ports:
 | 
			
		||||
          - name: https
 | 
			
		||||
            protocol: TCP
 | 
			
		||||
            containerPort: 4443
 | 
			
		||||
            containerPort: 10250
 | 
			
		||||
          livenessProbe:
 | 
			
		||||
            failureThreshold: 3
 | 
			
		||||
            httpGet:
 | 
			
		||||
@@ -249,10 +249,10 @@ kind: APIService
 | 
			
		||||
metadata:
 | 
			
		||||
  name: v1beta1.metrics.k8s.io
 | 
			
		||||
  labels:
 | 
			
		||||
    helm.sh/chart: metrics-server-3.9.0
 | 
			
		||||
    helm.sh/chart: metrics-server-3.12.1
 | 
			
		||||
    app.kubernetes.io/name: metrics-server
 | 
			
		||||
    app.kubernetes.io/instance: metrics-server
 | 
			
		||||
    app.kubernetes.io/version: "0.6.3"
 | 
			
		||||
    app.kubernetes.io/version: "0.7.1"
 | 
			
		||||
    app.kubernetes.io/managed-by: Helm
 | 
			
		||||
spec:
 | 
			
		||||
  group: metrics.k8s.io
 | 
			
		||||
 
 | 
			
		||||
@@ -19,8 +19,8 @@ create-config: ## Genereate talos configs
 | 
			
		||||
	talosctl --talosconfig _cfgs/talosconfig config endpoint ${ENDPOINT}
 | 
			
		||||
 | 
			
		||||
create-templates:
 | 
			
		||||
	@echo 'podSubnets: "10.80.0.0/12,fd40:10:80::/64"'         >  _cfgs/tfstate.vars
 | 
			
		||||
	@echo 'serviceSubnets: "10.200.0.0/22,fd40:10:200::/108"'  >> _cfgs/tfstate.vars
 | 
			
		||||
	@echo 'podSubnets: "10.80.0.0/12,fd40:10:80::/96"'         >  _cfgs/tfstate.vars
 | 
			
		||||
	@echo 'serviceSubnets: "10.200.0.0/22,fd40:10:200::/112"'  >> _cfgs/tfstate.vars
 | 
			
		||||
	@echo 'apiDomain: api.cluster.local'                       >> _cfgs/tfstate.vars
 | 
			
		||||
	@yq eval '.cluster.network.dnsDomain' _cfgs/controlplane.yaml | awk '{ print "domain: "$$1}'       >> _cfgs/tfstate.vars
 | 
			
		||||
	@yq eval '.cluster.clusterName' _cfgs/controlplane.yaml       | awk '{ print "clusterName: "$$1}'  >> _cfgs/tfstate.vars
 | 
			
		||||
@@ -42,6 +42,9 @@ create-lb: ## Create load balancer
 | 
			
		||||
	terraform apply -auto-approve -target=output.controlplane_endpoint
 | 
			
		||||
	terraform refresh
 | 
			
		||||
 | 
			
		||||
create-infrastructure: ## Bootstrap all nodes
 | 
			
		||||
	terraform apply
 | 
			
		||||
 | 
			
		||||
bootstrap: ## Bootstrap controlplane
 | 
			
		||||
	talosctl --talosconfig _cfgs/talosconfig config endpoint ${ENDPOINT}
 | 
			
		||||
	talosctl --talosconfig _cfgs/talosconfig --nodes ${ENDPOINT} bootstrap
 | 
			
		||||
@@ -54,8 +57,9 @@ kubeconfig: ## Download kubeconfig
 | 
			
		||||
	kubectl --kubeconfig=kubeconfig config set clusters.${CLUSTERNAME}.server https://[${ENDPOINT}]:6443
 | 
			
		||||
	kubectl --kubeconfig=kubeconfig config set-context --current --namespace=kube-system
 | 
			
		||||
 | 
			
		||||
create-infrastructure: ## Bootstrap all nodes
 | 
			
		||||
	terraform apply
 | 
			
		||||
system-static:
 | 
			
		||||
	helm template --namespace=kube-system --version=1.15.7 -f deployments/cilium.yaml \
 | 
			
		||||
		cilium cilium/cilium > deployments/cilium-result.yaml
 | 
			
		||||
 | 
			
		||||
system:
 | 
			
		||||
	helm --kubeconfig=kubeconfig upgrade -i --namespace=kube-system --version=1.15.7 -f deployments/cilium.yaml \
 | 
			
		||||
 
 | 
			
		||||
@@ -9,21 +9,22 @@ Local utilities
 | 
			
		||||
 | 
			
		||||
## Kubernetes addons
 | 
			
		||||
 | 
			
		||||
* [cilium](https://github.com/cilium/cilium) 1.11.1
 | 
			
		||||
* [kubelet-serving-cert-approver](https://github.com/alex1989hu/kubelet-serving-cert-approver)
 | 
			
		||||
* [metrics-server](https://github.com/kubernetes-sigs/metrics-server) 0.5.0
 | 
			
		||||
* [rancher.io/local-path](https://github.com/rancher/local-path-provisioner) 0.0.19
 | 
			
		||||
* [cilium](https://github.com/cilium/cilium) 1.15.7
 | 
			
		||||
* [metrics-server](https://github.com/kubernetes-sigs/metrics-server) 3.12.1
 | 
			
		||||
* [rancher.io/local-path](https://github.com/rancher/local-path-provisioner) 0.0.26
 | 
			
		||||
* [talos CCM](https://github.com/siderolabs/talos-cloud-controller-manager) edge, controller: `cloud-node`.
 | 
			
		||||
* [ingress-nginx](https://kubernetes.github.io/ingress-nginx/) 4.11.1
 | 
			
		||||
 | 
			
		||||
## Prepare the base image
 | 
			
		||||
 | 
			
		||||
Use [packer](../system_os/scaleway) to upload the Talos image.
 | 
			
		||||
Use [packer](images/) to upload the Talos image.
 | 
			
		||||
 | 
			
		||||
## Install control plane
 | 
			
		||||
 | 
			
		||||
Generate the default talos config
 | 
			
		||||
 | 
			
		||||
```shell
 | 
			
		||||
make create-lb create-config create-templates
 | 
			
		||||
make create-config create-templates
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
open config file **terraform.tfvars** and add params
 | 
			
		||||
@@ -32,25 +33,30 @@ open config file **terraform.tfvars** and add params
 | 
			
		||||
# counts and type of kubernetes master nodes
 | 
			
		||||
controlplane = {
 | 
			
		||||
    count = 1,
 | 
			
		||||
    type  = "DEV1-S"
 | 
			
		||||
    type  = "COPARM1-2C-8G"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
instances = {
 | 
			
		||||
    "all" = {
 | 
			
		||||
      version = "v1.30.2"
 | 
			
		||||
    },
 | 
			
		||||
    "fr-par-2" = {
 | 
			
		||||
      web_count    = 1,
 | 
			
		||||
  web_instance_type    = "DEV1-S",
 | 
			
		||||
      web_type     = "COPARM1-2C-8G",
 | 
			
		||||
      worker_count = 1,
 | 
			
		||||
  worker_instance_type = "DEV1-S",
 | 
			
		||||
      worker_type  = "COPARM1-2C-8G",
 | 
			
		||||
    },
 | 
			
		||||
}
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
And deploy the kubernetes master nodes
 | 
			
		||||
 | 
			
		||||
```shell
 | 
			
		||||
make create-controlplane
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Then deploy all other instances
 | 
			
		||||
Bootstrap all the infrastructure
 | 
			
		||||
 | 
			
		||||
```shell
 | 
			
		||||
make create-infrastructure
 | 
			
		||||
 | 
			
		||||
# see terraform output: controlplane_config
 | 
			
		||||
talosctl apply-config --insecure --nodes $IP --config-patch @_cfgs/controlplane-1.yaml --file _cfgs/controlplane.yaml
 | 
			
		||||
 | 
			
		||||
make bootstrap
 | 
			
		||||
make system
 | 
			
		||||
```
 | 
			
		||||
 
 | 
			
		||||
@@ -32,17 +32,20 @@ data:
 | 
			
		||||
  #   the kvstore by commenting out the identity-allocation-mode below, or
 | 
			
		||||
  #   setting it to "kvstore".
 | 
			
		||||
  identity-allocation-mode: crd
 | 
			
		||||
  identity-heartbeat-timeout: "30m0s"
 | 
			
		||||
  identity-gc-interval: "15m0s"
 | 
			
		||||
  cilium-endpoint-gc-interval: "5m0s"
 | 
			
		||||
  nodes-gc-interval: "5m0s"
 | 
			
		||||
  # Disable the usage of CiliumEndpoint CRD
 | 
			
		||||
  disable-endpoint-crd: "false"
 | 
			
		||||
  skip-cnp-status-startup-clean: "false"
 | 
			
		||||
 | 
			
		||||
  # If you want to run cilium in debug mode change this value to true
 | 
			
		||||
  debug: "false"
 | 
			
		||||
  debug-verbose: ""
 | 
			
		||||
  # The agent can be put into the following three policy enforcement modes
 | 
			
		||||
  # default, always and never.
 | 
			
		||||
  # https://docs.cilium.io/en/latest/policy/intro/#policy-enforcement-modes
 | 
			
		||||
  # https://docs.cilium.io/en/latest/security/policy/intro/#policy-enforcement-modes
 | 
			
		||||
  enable-policy: "default"
 | 
			
		||||
  policy-cidr-match-mode: ""
 | 
			
		||||
  # If you want metrics enabled in all of your Cilium agents, set the port for
 | 
			
		||||
  # which the Cilium agents will have their metrics exposed.
 | 
			
		||||
  # This option deprecates the "prometheus-serve-addr" in the
 | 
			
		||||
@@ -50,6 +53,12 @@ data:
 | 
			
		||||
  # NOTE that this will open the port on ALL nodes where Cilium pods are
 | 
			
		||||
  # scheduled.
 | 
			
		||||
  prometheus-serve-addr: ":9962"
 | 
			
		||||
  # A space-separated list of controller groups for which to enable metrics.
 | 
			
		||||
  # The special values of "all" and "none" are supported.
 | 
			
		||||
  controller-group-metrics:
 | 
			
		||||
    write-cni-file
 | 
			
		||||
    sync-host-ips
 | 
			
		||||
    sync-lb-maps-with-k8s-services
 | 
			
		||||
  # Port to expose Envoy metrics (e.g. "9964"). Envoy metrics listener will be disabled if this
 | 
			
		||||
  # field is not set.
 | 
			
		||||
  proxy-prometheus-port: "9964"
 | 
			
		||||
@@ -64,7 +73,7 @@ data:
 | 
			
		||||
  # Users who wish to specify their own custom CNI configuration file must set
 | 
			
		||||
  # custom-cni-conf to "true", otherwise Cilium may overwrite the configuration.
 | 
			
		||||
  custom-cni-conf: "false"
 | 
			
		||||
  enable-bpf-clock-probe: "true"
 | 
			
		||||
  enable-bpf-clock-probe: "false"
 | 
			
		||||
  # If you want cilium monitor to aggregate tracing for packets, set this level
 | 
			
		||||
  # to "low", "medium", or "maximum". The higher the level, the less packets
 | 
			
		||||
  # that will be seen in monitor output.
 | 
			
		||||
@@ -74,14 +83,14 @@ data:
 | 
			
		||||
  # notification events for each allowed connection.
 | 
			
		||||
  #
 | 
			
		||||
  # Only effective when monitor aggregation is set to "medium" or higher.
 | 
			
		||||
  monitor-aggregation-interval: 5s
 | 
			
		||||
  monitor-aggregation-interval: "5s"
 | 
			
		||||
 | 
			
		||||
  # The monitor aggregation flags determine which TCP flags which, upon the
 | 
			
		||||
  # first observation, cause monitor notifications to be generated.
 | 
			
		||||
  #
 | 
			
		||||
  # Only effective when monitor aggregation is set to "medium" or higher.
 | 
			
		||||
  monitor-aggregation-flags: all
 | 
			
		||||
  # Specifies the ratio (0.0-1.0) of total system memory to use for dynamic
 | 
			
		||||
  # Specifies the ratio (0.0-1.0] of total system memory to use for dynamic
 | 
			
		||||
  # sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps.
 | 
			
		||||
  bpf-map-dynamic-size-ratio: "0.0025"
 | 
			
		||||
  # bpf-policy-map-max specifies the maximum number of entries in endpoint
 | 
			
		||||
@@ -90,8 +99,6 @@ data:
 | 
			
		||||
  # bpf-lb-map-max specifies the maximum number of entries in bpf lb service,
 | 
			
		||||
  # backend and affinity maps.
 | 
			
		||||
  bpf-lb-map-max: "65536"
 | 
			
		||||
  # bpf-lb-bypass-fib-lookup instructs Cilium to enable the FIB lookup bypass
 | 
			
		||||
  # optimization for nodeport reverse NAT handling.
 | 
			
		||||
  bpf-lb-external-clusterip: "false"
 | 
			
		||||
 | 
			
		||||
  # Pre-allocation of map entries allows per-packet latency to be reduced, at
 | 
			
		||||
@@ -126,16 +133,23 @@ data:
 | 
			
		||||
  #   - disabled
 | 
			
		||||
  #   - vxlan (default)
 | 
			
		||||
  #   - geneve
 | 
			
		||||
  tunnel: "vxlan"
 | 
			
		||||
  # Default case
 | 
			
		||||
  routing-mode: "tunnel"
 | 
			
		||||
  tunnel-protocol: "vxlan"
 | 
			
		||||
  service-no-backend-response: "reject"
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  # Enables L7 proxy for L7 policy enforcement and visibility
 | 
			
		||||
  enable-l7-proxy: "true"
 | 
			
		||||
 | 
			
		||||
  enable-ipv4-masquerade: "true"
 | 
			
		||||
  enable-ipv4-big-tcp: "false"
 | 
			
		||||
  enable-ipv6-big-tcp: "false"
 | 
			
		||||
  enable-ipv6-masquerade: "true"
 | 
			
		||||
  enable-bpf-masquerade: "false"
 | 
			
		||||
  enable-masquerade-to-route-source: "false"
 | 
			
		||||
 | 
			
		||||
  enable-xt-socket-fallback: "true"
 | 
			
		||||
  install-iptables-rules: "true"
 | 
			
		||||
  install-no-conntrack-iptables-rules: "false"
 | 
			
		||||
 | 
			
		||||
  auto-direct-node-routes: "false"
 | 
			
		||||
@@ -148,15 +162,21 @@ data:
 | 
			
		||||
  kube-proxy-replacement: "strict"
 | 
			
		||||
  kube-proxy-replacement-healthz-bind-address: ""
 | 
			
		||||
  bpf-lb-sock: "false"
 | 
			
		||||
  host-reachable-services-protos: 
 | 
			
		||||
  enable-health-check-nodeport: "true"
 | 
			
		||||
  enable-health-check-loadbalancer-ip: "false"
 | 
			
		||||
  node-port-bind-protection: "true"
 | 
			
		||||
  enable-auto-protect-node-port-range: "true"
 | 
			
		||||
  bpf-lb-acceleration: "disabled"
 | 
			
		||||
  enable-svc-source-range-check: "true"
 | 
			
		||||
  enable-l2-neigh-discovery: "true"
 | 
			
		||||
  arping-refresh-period: "30s"
 | 
			
		||||
  k8s-require-ipv4-pod-cidr: "true"
 | 
			
		||||
  k8s-require-ipv6-pod-cidr: "true"
 | 
			
		||||
  enable-k8s-networkpolicy: "true"
 | 
			
		||||
  # Tell the agent to generate and write a CNI configuration file
 | 
			
		||||
  write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist
 | 
			
		||||
  cni-exclusive: "true"
 | 
			
		||||
  cni-log-file: "/var/run/cilium/cilium-cni.log"
 | 
			
		||||
  enable-endpoint-health-checking: "true"
 | 
			
		||||
  enable-health-checking: "true"
 | 
			
		||||
  enable-well-known-identities: "false"
 | 
			
		||||
@@ -164,7 +184,8 @@ data:
 | 
			
		||||
  synchronize-k8s-nodes: "true"
 | 
			
		||||
  operator-api-serve-addr: "127.0.0.1:9234"
 | 
			
		||||
  ipam: "kubernetes"
 | 
			
		||||
  disable-cnp-status-updates: "true"
 | 
			
		||||
  ipam-cilium-node-update-rate: "15s"
 | 
			
		||||
  egress-gateway-reconciliation-trigger-interval: "1s"
 | 
			
		||||
  enable-vtep: "false"
 | 
			
		||||
  vtep-endpoint: ""
 | 
			
		||||
  vtep-cidr: ""
 | 
			
		||||
@@ -175,23 +196,49 @@ data:
 | 
			
		||||
  bpf-root: "/sys/fs/bpf"
 | 
			
		||||
  cgroup-root: "/sys/fs/cgroup"
 | 
			
		||||
  enable-k8s-terminating-endpoint: "true"
 | 
			
		||||
  enable-sctp: "false"
 | 
			
		||||
 | 
			
		||||
  k8s-client-qps: "10"
 | 
			
		||||
  k8s-client-burst: "20"
 | 
			
		||||
  remove-cilium-node-taints: "true"
 | 
			
		||||
  set-cilium-node-taints: "true"
 | 
			
		||||
  set-cilium-is-up-condition: "true"
 | 
			
		||||
  unmanaged-pod-watcher-interval: "15"
 | 
			
		||||
  # default DNS proxy to transparent mode in non-chaining modes
 | 
			
		||||
  dnsproxy-enable-transparent-mode: "true"
 | 
			
		||||
  tofqdns-dns-reject-response-code: "refused"
 | 
			
		||||
  tofqdns-enable-dns-compression: "true"
 | 
			
		||||
  tofqdns-endpoint-max-ip-per-hostname: "50"
 | 
			
		||||
  tofqdns-idle-connection-grace-period: "0s"
 | 
			
		||||
  tofqdns-max-deferred-connection-deletes: "10000"
 | 
			
		||||
  tofqdns-min-ttl: "3600"
 | 
			
		||||
  tofqdns-proxy-response-max-delay: "100ms"
 | 
			
		||||
  agent-not-ready-taint-key: "node.cilium.io/agent-not-ready"
 | 
			
		||||
 | 
			
		||||
  mesh-auth-enabled: "true"
 | 
			
		||||
  mesh-auth-queue-size: "1024"
 | 
			
		||||
  mesh-auth-rotated-identities-queue-size: "1024"
 | 
			
		||||
  mesh-auth-gc-interval: "5m0s"
 | 
			
		||||
 | 
			
		||||
  proxy-xff-num-trusted-hops-ingress: "0"
 | 
			
		||||
  proxy-xff-num-trusted-hops-egress: "0"
 | 
			
		||||
  proxy-connect-timeout: "2"
 | 
			
		||||
  proxy-max-requests-per-connection: "0"
 | 
			
		||||
  proxy-max-connection-duration-seconds: "0"
 | 
			
		||||
  proxy-idle-timeout-seconds: "60"
 | 
			
		||||
 | 
			
		||||
  external-envoy-proxy: "false"
 | 
			
		||||
  max-connected-clusters: "255"
 | 
			
		||||
 | 
			
		||||
# Extra config allows adding arbitrary properties to the cilium config.
 | 
			
		||||
# By putting it at the end of the ConfigMap, it's also possible to override existing properties.
 | 
			
		||||
---
 | 
			
		||||
# Source: cilium/templates/cilium-agent/clusterrole.yaml
 | 
			
		||||
apiVersion: rbac.authorization.k8s.io/v1
 | 
			
		||||
kind: ClusterRole
 | 
			
		||||
metadata:
 | 
			
		||||
  name: cilium
 | 
			
		||||
  labels:
 | 
			
		||||
    app.kubernetes.io/part-of: cilium
 | 
			
		||||
rules:
 | 
			
		||||
- apiGroups:
 | 
			
		||||
  - networking.k8s.io
 | 
			
		||||
@@ -235,12 +282,14 @@ rules:
 | 
			
		||||
- apiGroups:
 | 
			
		||||
  - cilium.io
 | 
			
		||||
  resources:
 | 
			
		||||
  - ciliumbgploadbalancerippools
 | 
			
		||||
  - ciliumloadbalancerippools
 | 
			
		||||
  - ciliumbgppeeringpolicies
 | 
			
		||||
  - ciliumbgpnodeconfigs
 | 
			
		||||
  - ciliumbgpadvertisements
 | 
			
		||||
  - ciliumbgppeerconfigs
 | 
			
		||||
  - ciliumclusterwideenvoyconfigs
 | 
			
		||||
  - ciliumclusterwidenetworkpolicies
 | 
			
		||||
  - ciliumegressgatewaypolicies
 | 
			
		||||
  - ciliumegressnatpolicies
 | 
			
		||||
  - ciliumendpoints
 | 
			
		||||
  - ciliumendpointslices
 | 
			
		||||
  - ciliumenvoyconfigs
 | 
			
		||||
@@ -248,6 +297,10 @@ rules:
 | 
			
		||||
  - ciliumlocalredirectpolicies
 | 
			
		||||
  - ciliumnetworkpolicies
 | 
			
		||||
  - ciliumnodes
 | 
			
		||||
  - ciliumnodeconfigs
 | 
			
		||||
  - ciliumcidrgroups
 | 
			
		||||
  - ciliuml2announcementpolicies
 | 
			
		||||
  - ciliumpodippools
 | 
			
		||||
  verbs:
 | 
			
		||||
  - list
 | 
			
		||||
  - watch
 | 
			
		||||
@@ -288,6 +341,8 @@ rules:
 | 
			
		||||
  - ciliumclusterwidenetworkpolicies/status
 | 
			
		||||
  - ciliumendpoints/status
 | 
			
		||||
  - ciliumendpoints
 | 
			
		||||
  - ciliuml2announcementpolicies/status
 | 
			
		||||
  - ciliumbgpnodeconfigs/status
 | 
			
		||||
  verbs:
 | 
			
		||||
  - patch
 | 
			
		||||
---
 | 
			
		||||
@@ -296,6 +351,8 @@ apiVersion: rbac.authorization.k8s.io/v1
 | 
			
		||||
kind: ClusterRole
 | 
			
		||||
metadata:
 | 
			
		||||
  name: cilium-operator
 | 
			
		||||
  labels:
 | 
			
		||||
    app.kubernetes.io/part-of: cilium
 | 
			
		||||
rules:
 | 
			
		||||
- apiGroups:
 | 
			
		||||
  - ""
 | 
			
		||||
@@ -339,6 +396,7 @@ rules:
 | 
			
		||||
  - services/status
 | 
			
		||||
  verbs:
 | 
			
		||||
  - update
 | 
			
		||||
  - patch
 | 
			
		||||
- apiGroups:
 | 
			
		||||
  - ""
 | 
			
		||||
  resources:
 | 
			
		||||
@@ -422,6 +480,9 @@ rules:
 | 
			
		||||
  resources:
 | 
			
		||||
  - ciliumendpointslices
 | 
			
		||||
  - ciliumenvoyconfigs
 | 
			
		||||
  - ciliumbgppeerconfigs
 | 
			
		||||
  - ciliumbgpadvertisements
 | 
			
		||||
  - ciliumbgpnodeconfigs
 | 
			
		||||
  verbs:
 | 
			
		||||
  - create
 | 
			
		||||
  - update
 | 
			
		||||
@@ -429,6 +490,7 @@ rules:
 | 
			
		||||
  - list
 | 
			
		||||
  - watch
 | 
			
		||||
  - delete
 | 
			
		||||
  - patch
 | 
			
		||||
- apiGroups:
 | 
			
		||||
  - apiextensions.k8s.io
 | 
			
		||||
  resources:
 | 
			
		||||
@@ -445,12 +507,16 @@ rules:
 | 
			
		||||
  verbs:
 | 
			
		||||
  - update
 | 
			
		||||
  resourceNames:
 | 
			
		||||
  - ciliumbgploadbalancerippools.cilium.io
 | 
			
		||||
  - ciliumloadbalancerippools.cilium.io
 | 
			
		||||
  - ciliumbgppeeringpolicies.cilium.io
 | 
			
		||||
  - ciliumbgpclusterconfigs.cilium.io
 | 
			
		||||
  - ciliumbgppeerconfigs.cilium.io
 | 
			
		||||
  - ciliumbgpadvertisements.cilium.io
 | 
			
		||||
  - ciliumbgpnodeconfigs.cilium.io
 | 
			
		||||
  - ciliumbgpnodeconfigoverrides.cilium.io
 | 
			
		||||
  - ciliumclusterwideenvoyconfigs.cilium.io
 | 
			
		||||
  - ciliumclusterwidenetworkpolicies.cilium.io
 | 
			
		||||
  - ciliumegressgatewaypolicies.cilium.io
 | 
			
		||||
  - ciliumegressnatpolicies.cilium.io
 | 
			
		||||
  - ciliumendpoints.cilium.io
 | 
			
		||||
  - ciliumendpointslices.cilium.io
 | 
			
		||||
  - ciliumenvoyconfigs.cilium.io
 | 
			
		||||
@@ -459,6 +525,33 @@ rules:
 | 
			
		||||
  - ciliumlocalredirectpolicies.cilium.io
 | 
			
		||||
  - ciliumnetworkpolicies.cilium.io
 | 
			
		||||
  - ciliumnodes.cilium.io
 | 
			
		||||
  - ciliumnodeconfigs.cilium.io
 | 
			
		||||
  - ciliumcidrgroups.cilium.io
 | 
			
		||||
  - ciliuml2announcementpolicies.cilium.io
 | 
			
		||||
  - ciliumpodippools.cilium.io
 | 
			
		||||
- apiGroups:
 | 
			
		||||
  - cilium.io
 | 
			
		||||
  resources:
 | 
			
		||||
  - ciliumloadbalancerippools
 | 
			
		||||
  - ciliumpodippools
 | 
			
		||||
  - ciliumbgpclusterconfigs
 | 
			
		||||
  - ciliumbgpnodeconfigoverrides
 | 
			
		||||
  verbs:
 | 
			
		||||
  - get
 | 
			
		||||
  - list
 | 
			
		||||
  - watch
 | 
			
		||||
- apiGroups:
 | 
			
		||||
    - cilium.io
 | 
			
		||||
  resources:
 | 
			
		||||
    - ciliumpodippools
 | 
			
		||||
  verbs:
 | 
			
		||||
    - create
 | 
			
		||||
- apiGroups:
 | 
			
		||||
  - cilium.io
 | 
			
		||||
  resources:
 | 
			
		||||
  - ciliumloadbalancerippools/status
 | 
			
		||||
  verbs:
 | 
			
		||||
  - patch
 | 
			
		||||
# For cilium-operator running in HA mode.
 | 
			
		||||
#
 | 
			
		||||
# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election
 | 
			
		||||
@@ -479,6 +572,8 @@ apiVersion: rbac.authorization.k8s.io/v1
 | 
			
		||||
kind: ClusterRoleBinding
 | 
			
		||||
metadata:
 | 
			
		||||
  name: cilium
 | 
			
		||||
  labels:
 | 
			
		||||
    app.kubernetes.io/part-of: cilium
 | 
			
		||||
roleRef:
 | 
			
		||||
  apiGroup: rbac.authorization.k8s.io
 | 
			
		||||
  kind: ClusterRole
 | 
			
		||||
@@ -493,6 +588,8 @@ apiVersion: rbac.authorization.k8s.io/v1
 | 
			
		||||
kind: ClusterRoleBinding
 | 
			
		||||
metadata:
 | 
			
		||||
  name: cilium-operator
 | 
			
		||||
  labels:
 | 
			
		||||
    app.kubernetes.io/part-of: cilium
 | 
			
		||||
roleRef:
 | 
			
		||||
  apiGroup: rbac.authorization.k8s.io
 | 
			
		||||
  kind: ClusterRole
 | 
			
		||||
@@ -502,6 +599,41 @@ subjects:
 | 
			
		||||
  name: "cilium-operator"
 | 
			
		||||
  namespace: kube-system
 | 
			
		||||
---
 | 
			
		||||
# Source: cilium/templates/cilium-agent/role.yaml
 | 
			
		||||
apiVersion: rbac.authorization.k8s.io/v1
 | 
			
		||||
kind: Role
 | 
			
		||||
metadata:
 | 
			
		||||
  name: cilium-config-agent
 | 
			
		||||
  namespace: kube-system
 | 
			
		||||
  labels:
 | 
			
		||||
    app.kubernetes.io/part-of: cilium
 | 
			
		||||
rules:
 | 
			
		||||
- apiGroups:
 | 
			
		||||
  - ""
 | 
			
		||||
  resources:
 | 
			
		||||
  - configmaps
 | 
			
		||||
  verbs:
 | 
			
		||||
  - get
 | 
			
		||||
  - list
 | 
			
		||||
  - watch
 | 
			
		||||
---
 | 
			
		||||
# Source: cilium/templates/cilium-agent/rolebinding.yaml
 | 
			
		||||
apiVersion: rbac.authorization.k8s.io/v1
 | 
			
		||||
kind: RoleBinding
 | 
			
		||||
metadata:
 | 
			
		||||
  name: cilium-config-agent
 | 
			
		||||
  namespace: kube-system
 | 
			
		||||
  labels:
 | 
			
		||||
    app.kubernetes.io/part-of: cilium
 | 
			
		||||
roleRef:
 | 
			
		||||
  apiGroup: rbac.authorization.k8s.io
 | 
			
		||||
  kind: Role
 | 
			
		||||
  name: cilium-config-agent
 | 
			
		||||
subjects:
 | 
			
		||||
  - kind: ServiceAccount
 | 
			
		||||
    name: "cilium"
 | 
			
		||||
    namespace: kube-system
 | 
			
		||||
---
 | 
			
		||||
# Source: cilium/templates/cilium-agent/service.yaml
 | 
			
		||||
apiVersion: v1
 | 
			
		||||
kind: Service
 | 
			
		||||
@@ -513,6 +645,8 @@ metadata:
 | 
			
		||||
    prometheus.io/port: "9964"
 | 
			
		||||
  labels:
 | 
			
		||||
    k8s-app: cilium
 | 
			
		||||
    app.kubernetes.io/name: cilium-agent
 | 
			
		||||
    app.kubernetes.io/part-of: cilium
 | 
			
		||||
spec:
 | 
			
		||||
  clusterIP: None
 | 
			
		||||
  type: ClusterIP
 | 
			
		||||
@@ -532,6 +666,8 @@ metadata:
 | 
			
		||||
  namespace: kube-system
 | 
			
		||||
  labels:
 | 
			
		||||
    k8s-app: cilium
 | 
			
		||||
    app.kubernetes.io/part-of: cilium
 | 
			
		||||
    app.kubernetes.io/name: cilium-agent
 | 
			
		||||
spec:
 | 
			
		||||
  selector:
 | 
			
		||||
    matchLabels:
 | 
			
		||||
@@ -547,10 +683,15 @@ spec:
 | 
			
		||||
        prometheus.io/scrape: "true"
 | 
			
		||||
      labels:
 | 
			
		||||
        k8s-app: cilium
 | 
			
		||||
        app.kubernetes.io/name: cilium-agent
 | 
			
		||||
        app.kubernetes.io/part-of: cilium
 | 
			
		||||
    spec:
 | 
			
		||||
      securityContext:
 | 
			
		||||
        appArmorProfile:
 | 
			
		||||
          type: Unconfined
 | 
			
		||||
      containers:
 | 
			
		||||
      - name: cilium-agent
 | 
			
		||||
        image: "quay.io/cilium/cilium:v1.12.4@sha256:4b074fcfba9325c18e97569ed1988464309a5ebf64bbc79bec6f3d58cafcb8cf"
 | 
			
		||||
        image: "quay.io/cilium/cilium:v1.15.7@sha256:2e432bf6879feb8b891c497d6fd784b13e53456017d2b8e4ea734145f0282ef0"
 | 
			
		||||
        imagePullPolicy: IfNotPresent
 | 
			
		||||
        command:
 | 
			
		||||
        - cilium-agent
 | 
			
		||||
@@ -568,6 +709,7 @@ spec:
 | 
			
		||||
          failureThreshold: 105
 | 
			
		||||
          periodSeconds: 2
 | 
			
		||||
          successThreshold: 1
 | 
			
		||||
          initialDelaySeconds: 5
 | 
			
		||||
        livenessProbe:
 | 
			
		||||
          httpGet:
 | 
			
		||||
            host: "127.0.0.1"
 | 
			
		||||
@@ -607,18 +749,11 @@ spec:
 | 
			
		||||
              fieldPath: metadata.namespace
 | 
			
		||||
        - name: CILIUM_CLUSTERMESH_CONFIG
 | 
			
		||||
          value: /var/lib/cilium/clustermesh/
 | 
			
		||||
        - name: CILIUM_CNI_CHAINING_MODE
 | 
			
		||||
        - name: GOMEMLIMIT
 | 
			
		||||
          valueFrom:
 | 
			
		||||
            configMapKeyRef:
 | 
			
		||||
              name: cilium-config
 | 
			
		||||
              key: cni-chaining-mode
 | 
			
		||||
              optional: true
 | 
			
		||||
        - name: CILIUM_CUSTOM_CNI_CONF
 | 
			
		||||
          valueFrom:
 | 
			
		||||
            configMapKeyRef:
 | 
			
		||||
              name: cilium-config
 | 
			
		||||
              key: custom-cni-conf
 | 
			
		||||
              optional: true
 | 
			
		||||
            resourceFieldRef:
 | 
			
		||||
              resource: limits.memory
 | 
			
		||||
              divisor: '1'
 | 
			
		||||
        - name: KUBERNETES_SERVICE_HOST
 | 
			
		||||
          value: "api.cluster.local"
 | 
			
		||||
        - name: KUBERNETES_SERVICE_PORT
 | 
			
		||||
@@ -627,10 +762,29 @@ spec:
 | 
			
		||||
          postStart:
 | 
			
		||||
            exec:
 | 
			
		||||
              command:
 | 
			
		||||
              - "/cni-install.sh"
 | 
			
		||||
              - "--enable-debug=false"
 | 
			
		||||
              - "--cni-exclusive=true"
 | 
			
		||||
              - "--log-file=/var/run/cilium/cilium-cni.log"
 | 
			
		||||
              - "bash"
 | 
			
		||||
              - "-c"
 | 
			
		||||
              - |
 | 
			
		||||
                    set -o errexit
 | 
			
		||||
                    set -o pipefail
 | 
			
		||||
                    set -o nounset
 | 
			
		||||
                    
 | 
			
		||||
                    # When running in AWS ENI mode, it's likely that 'aws-node' has
 | 
			
		||||
                    # had a chance to install SNAT iptables rules. These can result
 | 
			
		||||
                    # in dropped traffic, so we should attempt to remove them.
 | 
			
		||||
                    # We do it using a 'postStart' hook since this may need to run
 | 
			
		||||
                    # for nodes which might have already been init'ed but may still
 | 
			
		||||
                    # have dangling rules. This is safe because there are no
 | 
			
		||||
                    # dependencies on anything that is part of the startup script
 | 
			
		||||
                    # itself, and can be safely run multiple times per node (e.g. in
 | 
			
		||||
                    # case of a restart).
 | 
			
		||||
                    if [[ "$(iptables-save | grep -E -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]];
 | 
			
		||||
                    then
 | 
			
		||||
                        echo 'Deleting iptables rules created by the AWS CNI VPC plugin'
 | 
			
		||||
                        iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore
 | 
			
		||||
                    fi
 | 
			
		||||
                    echo 'Done!'
 | 
			
		||||
                    
 | 
			
		||||
          preStop:
 | 
			
		||||
            exec:
 | 
			
		||||
              command:
 | 
			
		||||
@@ -667,25 +821,73 @@ spec:
 | 
			
		||||
          mountPath: /sys/fs/cgroup
 | 
			
		||||
        - name: cilium-run
 | 
			
		||||
          mountPath: /var/run/cilium
 | 
			
		||||
        - name: cni-path
 | 
			
		||||
          mountPath: /host/opt/cni/bin
 | 
			
		||||
        - name: etc-cni-netd
 | 
			
		||||
          mountPath: /host/etc/cni/net.d
 | 
			
		||||
        - name: clustermesh-secrets
 | 
			
		||||
          mountPath: /var/lib/cilium/clustermesh
 | 
			
		||||
          readOnly: true
 | 
			
		||||
        - name: cilium-config-path
 | 
			
		||||
          mountPath: /tmp/cilium/config-map
 | 
			
		||||
          readOnly: true
 | 
			
		||||
          # Needed to be able to load kernel modules
 | 
			
		||||
        - name: lib-modules
 | 
			
		||||
          mountPath: /lib/modules
 | 
			
		||||
          readOnly: true
 | 
			
		||||
        - name: xtables-lock
 | 
			
		||||
          mountPath: /run/xtables.lock
 | 
			
		||||
        - name: tmp
 | 
			
		||||
          mountPath: /tmp
 | 
			
		||||
      initContainers:
 | 
			
		||||
      - name: config
 | 
			
		||||
        image: "quay.io/cilium/cilium:v1.15.7@sha256:2e432bf6879feb8b891c497d6fd784b13e53456017d2b8e4ea734145f0282ef0"
 | 
			
		||||
        imagePullPolicy: IfNotPresent
 | 
			
		||||
        command:
 | 
			
		||||
        - cilium-dbg
 | 
			
		||||
        - build-config
 | 
			
		||||
        env:
 | 
			
		||||
        - name: K8S_NODE_NAME
 | 
			
		||||
          valueFrom:
 | 
			
		||||
            fieldRef:
 | 
			
		||||
              apiVersion: v1
 | 
			
		||||
              fieldPath: spec.nodeName
 | 
			
		||||
        - name: CILIUM_K8S_NAMESPACE
 | 
			
		||||
          valueFrom:
 | 
			
		||||
            fieldRef:
 | 
			
		||||
              apiVersion: v1
 | 
			
		||||
              fieldPath: metadata.namespace
 | 
			
		||||
        - name: KUBERNETES_SERVICE_HOST
 | 
			
		||||
          value: "api.cluster.local"
 | 
			
		||||
        - name: KUBERNETES_SERVICE_PORT
 | 
			
		||||
          value: "6443"
 | 
			
		||||
        volumeMounts:
 | 
			
		||||
        - name: tmp
 | 
			
		||||
          mountPath: /tmp
 | 
			
		||||
        terminationMessagePolicy: FallbackToLogsOnError
 | 
			
		||||
      - name: apply-sysctl-overwrites
 | 
			
		||||
        image: "quay.io/cilium/cilium:v1.15.7@sha256:2e432bf6879feb8b891c497d6fd784b13e53456017d2b8e4ea734145f0282ef0"
 | 
			
		||||
        imagePullPolicy: IfNotPresent
 | 
			
		||||
        env:
 | 
			
		||||
        - name: BIN_PATH
 | 
			
		||||
          value: /opt/cni/bin
 | 
			
		||||
        command:
 | 
			
		||||
        - sh
 | 
			
		||||
        - -ec
 | 
			
		||||
        # The statically linked Go program binary is invoked to avoid any
 | 
			
		||||
        # dependency on utilities like sh that can be missing on certain
 | 
			
		||||
        # distros installed on the underlying host. Copy the binary to the
 | 
			
		||||
        # same directory where we install cilium cni plugin so that exec permissions
 | 
			
		||||
        # are available.
 | 
			
		||||
        - |
 | 
			
		||||
          cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;
 | 
			
		||||
          nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";
 | 
			
		||||
          rm /hostbin/cilium-sysctlfix
 | 
			
		||||
        volumeMounts:
 | 
			
		||||
        - name: hostproc
 | 
			
		||||
          mountPath: /hostproc
 | 
			
		||||
        - name: cni-path
 | 
			
		||||
          mountPath: /hostbin
 | 
			
		||||
        terminationMessagePolicy: FallbackToLogsOnError
 | 
			
		||||
        securityContext:
 | 
			
		||||
          privileged: true
 | 
			
		||||
      - name: clean-cilium-state
 | 
			
		||||
        image: "quay.io/cilium/cilium:v1.12.4@sha256:4b074fcfba9325c18e97569ed1988464309a5ebf64bbc79bec6f3d58cafcb8cf"
 | 
			
		||||
        image: "quay.io/cilium/cilium:v1.15.7@sha256:2e432bf6879feb8b891c497d6fd784b13e53456017d2b8e4ea734145f0282ef0"
 | 
			
		||||
        imagePullPolicy: IfNotPresent
 | 
			
		||||
        command:
 | 
			
		||||
        - /init-container.sh
 | 
			
		||||
@@ -702,6 +904,12 @@ spec:
 | 
			
		||||
              name: cilium-config
 | 
			
		||||
              key: clean-cilium-bpf-state
 | 
			
		||||
              optional: true
 | 
			
		||||
        - name: WRITE_CNI_CONF_WHEN_READY
 | 
			
		||||
          valueFrom:
 | 
			
		||||
            configMapKeyRef:
 | 
			
		||||
              name: cilium-config
 | 
			
		||||
              key: write-cni-conf-when-ready
 | 
			
		||||
              optional: true
 | 
			
		||||
        - name: KUBERNETES_SERVICE_HOST
 | 
			
		||||
          value: "api.cluster.local"
 | 
			
		||||
        - name: KUBERNETES_SERVICE_PORT
 | 
			
		||||
@@ -717,15 +925,31 @@ spec:
 | 
			
		||||
          mountPath: /sys/fs/cgroup
 | 
			
		||||
          mountPropagation: HostToContainer
 | 
			
		||||
        - name: cilium-run
 | 
			
		||||
          mountPath: /var/run/cilium
 | 
			
		||||
          mountPath: /var/run/cilium # wait-for-kube-proxy
 | 
			
		||||
      # Install the CNI binaries in an InitContainer so we don't have a writable host mount in the agent
 | 
			
		||||
      - name: install-cni-binaries
 | 
			
		||||
        image: "quay.io/cilium/cilium:v1.15.7@sha256:2e432bf6879feb8b891c497d6fd784b13e53456017d2b8e4ea734145f0282ef0"
 | 
			
		||||
        imagePullPolicy: IfNotPresent
 | 
			
		||||
        command:
 | 
			
		||||
          - "/install-plugin.sh"
 | 
			
		||||
        resources:
 | 
			
		||||
          requests:
 | 
			
		||||
            cpu: 100m
 | 
			
		||||
            memory: 100Mi # wait-for-kube-proxy
 | 
			
		||||
            memory: 10Mi
 | 
			
		||||
        securityContext:
 | 
			
		||||
          privileged: true
 | 
			
		||||
          capabilities:
 | 
			
		||||
            drop:
 | 
			
		||||
              - ALL
 | 
			
		||||
        terminationMessagePolicy: FallbackToLogsOnError
 | 
			
		||||
        volumeMounts:
 | 
			
		||||
          - name: cni-path
 | 
			
		||||
            mountPath: /host/opt/cni/bin # .Values.cni.install
 | 
			
		||||
      restartPolicy: Always
 | 
			
		||||
      priorityClassName: system-node-critical
 | 
			
		||||
      serviceAccount: "cilium"
 | 
			
		||||
      serviceAccountName: "cilium"
 | 
			
		||||
      automountServiceAccountToken: true
 | 
			
		||||
      terminationGracePeriodSeconds: 1
 | 
			
		||||
      hostNetwork: true
 | 
			
		||||
      affinity:
 | 
			
		||||
@@ -740,6 +964,9 @@ spec:
 | 
			
		||||
      tolerations:
 | 
			
		||||
        - operator: Exists
 | 
			
		||||
      volumes:
 | 
			
		||||
        # For sharing configuration between the "config" initContainer and the agent
 | 
			
		||||
      - name: tmp
 | 
			
		||||
        emptyDir: {}
 | 
			
		||||
        # To keep state between restarts / upgrades
 | 
			
		||||
      - name: cilium-run
 | 
			
		||||
        hostPath:
 | 
			
		||||
@@ -750,6 +977,11 @@ spec:
 | 
			
		||||
        hostPath:
 | 
			
		||||
          path: /sys/fs/bpf
 | 
			
		||||
          type: DirectoryOrCreate
 | 
			
		||||
      # To mount cgroup2 filesystem on the host or apply sysctlfix
 | 
			
		||||
      - name: hostproc
 | 
			
		||||
        hostPath:
 | 
			
		||||
          path: /proc
 | 
			
		||||
          type: Directory
 | 
			
		||||
      # To keep state between restarts / upgrades for cgroup2 filesystem
 | 
			
		||||
      - name: cilium-cgroup
 | 
			
		||||
        hostPath:
 | 
			
		||||
@@ -776,15 +1008,27 @@ spec:
 | 
			
		||||
          type: FileOrCreate
 | 
			
		||||
        # To read the clustermesh configuration
 | 
			
		||||
      - name: clustermesh-secrets
 | 
			
		||||
        secret:
 | 
			
		||||
          secretName: cilium-clustermesh
 | 
			
		||||
        projected:
 | 
			
		||||
          # note: the leading zero means this number is in octal representation: do not remove it
 | 
			
		||||
          defaultMode: 0400
 | 
			
		||||
          sources:
 | 
			
		||||
          - secret:
 | 
			
		||||
              name: cilium-clustermesh
 | 
			
		||||
              optional: true
 | 
			
		||||
        # To read the configuration from the config map
 | 
			
		||||
      - name: cilium-config-path
 | 
			
		||||
        configMap:
 | 
			
		||||
          name: cilium-config
 | 
			
		||||
              # note: items are not explicitly listed here, since the entries of this secret
 | 
			
		||||
              # depend on the peers configured, and that would cause a restart of all agents
 | 
			
		||||
              # at every addition/removal. Leaving the field empty makes each secret entry
 | 
			
		||||
              # to be automatically projected into the volume as a file whose name is the key.
 | 
			
		||||
          - secret:
 | 
			
		||||
              name: clustermesh-apiserver-remote-cert
 | 
			
		||||
              optional: true
 | 
			
		||||
              items:
 | 
			
		||||
              - key: tls.key
 | 
			
		||||
                path: common-etcd-client.key
 | 
			
		||||
              - key: tls.crt
 | 
			
		||||
                path: common-etcd-client.crt
 | 
			
		||||
              - key: ca.crt
 | 
			
		||||
                path: common-etcd-client-ca.crt
 | 
			
		||||
---
 | 
			
		||||
# Source: cilium/templates/cilium-operator/deployment.yaml
 | 
			
		||||
apiVersion: apps/v1
 | 
			
		||||
@@ -795,6 +1039,8 @@ metadata:
 | 
			
		||||
  labels:
 | 
			
		||||
    io.cilium/app: operator
 | 
			
		||||
    name: cilium-operator
 | 
			
		||||
    app.kubernetes.io/part-of: cilium
 | 
			
		||||
    app.kubernetes.io/name: cilium-operator
 | 
			
		||||
spec:
 | 
			
		||||
  # See docs on ServerCapabilities.LeasesResourceLock in file pkg/k8s/version/version.go
 | 
			
		||||
  # for more details.
 | 
			
		||||
@@ -803,23 +1049,29 @@ spec:
 | 
			
		||||
    matchLabels:
 | 
			
		||||
      io.cilium/app: operator
 | 
			
		||||
      name: cilium-operator
 | 
			
		||||
  # ensure operator update on single node k8s clusters, by using rolling update with maxUnavailable=100% in case
 | 
			
		||||
  # of one replica and no user configured Recreate strategy.
 | 
			
		||||
  # otherwise an update might get stuck due to the default maxUnavailable=50% in combination with the
 | 
			
		||||
  # podAntiAffinity which prevents deployments of multiple operator replicas on the same node.
 | 
			
		||||
  strategy:
 | 
			
		||||
    rollingUpdate:
 | 
			
		||||
      maxSurge: 1
 | 
			
		||||
      maxUnavailable: 1
 | 
			
		||||
      maxSurge: 25%
 | 
			
		||||
      maxUnavailable: 100%
 | 
			
		||||
    type: RollingUpdate
 | 
			
		||||
  template:
 | 
			
		||||
    metadata:
 | 
			
		||||
      annotations:
 | 
			
		||||
        # ensure pods roll when configmap updates
 | 
			
		||||
        cilium.io/cilium-configmap-checksum: "c3ffdb3de5df1007b50c84e0af5ba77bc44d069f56d62d3232573a21084f2f80"
 | 
			
		||||
        cilium.io/cilium-configmap-checksum: "fc9863bdf15518f8321ffc2278b755f844cf8067e1bb8a7e7c3ebc7cdf5f0e2f"
 | 
			
		||||
      labels:
 | 
			
		||||
        io.cilium/app: operator
 | 
			
		||||
        name: cilium-operator
 | 
			
		||||
        app.kubernetes.io/part-of: cilium
 | 
			
		||||
        app.kubernetes.io/name: cilium-operator
 | 
			
		||||
    spec:
 | 
			
		||||
      containers:
 | 
			
		||||
      - name: cilium-operator
 | 
			
		||||
        image: "quay.io/cilium/operator-generic:v1.12.4@sha256:071089ec5bca1f556afb8e541d9972a0dfb09d1e25504ae642ced021ecbedbd1"
 | 
			
		||||
        image: "quay.io/cilium/operator-generic:v1.15.7@sha256:6840a6dde703b3e73dd31e03390327a9184fcb888efbad9d9d098d65b9035b54"
 | 
			
		||||
        imagePullPolicy: IfNotPresent
 | 
			
		||||
        command:
 | 
			
		||||
        - cilium-operator-generic
 | 
			
		||||
@@ -856,6 +1108,16 @@ spec:
 | 
			
		||||
          initialDelaySeconds: 60
 | 
			
		||||
          periodSeconds: 10
 | 
			
		||||
          timeoutSeconds: 3
 | 
			
		||||
        readinessProbe:
 | 
			
		||||
          httpGet:
 | 
			
		||||
            host: "127.0.0.1"
 | 
			
		||||
            path: /healthz
 | 
			
		||||
            port: 9234
 | 
			
		||||
            scheme: HTTP
 | 
			
		||||
          initialDelaySeconds: 0
 | 
			
		||||
          periodSeconds: 5
 | 
			
		||||
          timeoutSeconds: 3
 | 
			
		||||
          failureThreshold: 5
 | 
			
		||||
        volumeMounts:
 | 
			
		||||
        - name: cilium-config-path
 | 
			
		||||
          mountPath: /tmp/cilium/config-map
 | 
			
		||||
@@ -866,6 +1128,7 @@ spec:
 | 
			
		||||
      priorityClassName: system-cluster-critical
 | 
			
		||||
      serviceAccount: "cilium-operator"
 | 
			
		||||
      serviceAccountName: "cilium-operator"
 | 
			
		||||
      automountServiceAccountToken: true
 | 
			
		||||
      # In HA mode, cilium-operator pods must not be scheduled on the same
 | 
			
		||||
      # node as they will clash with each other.
 | 
			
		||||
      affinity:
 | 
			
		||||
 
 | 
			
		||||
@@ -64,6 +64,7 @@ resource "local_sensitive_file" "controlplane" {
 | 
			
		||||
      nodeSubnets = ["${split("/", scaleway_ipam_ip.controlplane_v4[count.index].address)[0]}/32", one(scaleway_vpc_private_network.main.ipv6_subnets).subnet]
 | 
			
		||||
      ipv4_local  = scaleway_ipam_ip.controlplane_v4[count.index].address
 | 
			
		||||
      ipv4_vip    = local.ipv4_vip
 | 
			
		||||
      lbv4        = local.lbv4
 | 
			
		||||
 | 
			
		||||
      access     = var.scaleway_access
 | 
			
		||||
      secret     = var.scaleway_secret
 | 
			
		||||
@@ -76,7 +77,7 @@ resource "local_sensitive_file" "controlplane" {
 | 
			
		||||
  filename        = "_cfgs/controlplane-${count.index + 1}.yaml"
 | 
			
		||||
  file_permission = "0600"
 | 
			
		||||
 | 
			
		||||
  depends_on = [scaleway_instance_server.controlplane]
 | 
			
		||||
  depends_on = [scaleway_instance_server.controlplane, scaleway_ipam_ip.controlplane_v4]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
locals {
 | 
			
		||||
 
 | 
			
		||||
@@ -1,7 +1,7 @@
 | 
			
		||||
 | 
			
		||||
locals {
 | 
			
		||||
  web_prefix = "web"
 | 
			
		||||
  web_labels = "node-pool=web"
 | 
			
		||||
  web_labels = "project.io/node-pool=web"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
resource "scaleway_instance_placement_group" "web" {
 | 
			
		||||
@@ -15,6 +15,14 @@ resource "scaleway_instance_ip" "web_v6" {
 | 
			
		||||
  type  = "routed_ipv6"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# resource "scaleway_ipam_ip" "web_v4" {
 | 
			
		||||
#   count   = lookup(try(var.instances[var.regions[0]], {}), "web_count", 0)
 | 
			
		||||
#   address = cidrhost(local.main_subnet, 21 + count.index)
 | 
			
		||||
#   source {
 | 
			
		||||
#     private_network_id = scaleway_vpc_private_network.main.id
 | 
			
		||||
#   }
 | 
			
		||||
# }
 | 
			
		||||
 | 
			
		||||
resource "scaleway_instance_server" "web" {
 | 
			
		||||
  count              = lookup(try(var.instances[var.regions[0]], {}), "web_count", 0)
 | 
			
		||||
  name               = "${local.web_prefix}-${count.index + 1}"
 | 
			
		||||
 
 | 
			
		||||
@@ -1,7 +1,7 @@
 | 
			
		||||
 | 
			
		||||
locals {
 | 
			
		||||
  worker_prefix = "worker"
 | 
			
		||||
  worker_labels = "node-pool=worker"
 | 
			
		||||
  worker_labels = "project.io/node-pool=worker"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
resource "scaleway_instance_ip" "worker_v6" {
 | 
			
		||||
 
 | 
			
		||||
@@ -3,7 +3,7 @@ locals {
 | 
			
		||||
  lb_enable = lookup(var.controlplane, "type_lb", "") == "" ? false : true
 | 
			
		||||
 | 
			
		||||
  ipv4_vip = cidrhost(local.main_subnet, 5)
 | 
			
		||||
  #   lbv4     = local.lb_enable ? scaleway_lb_ip.lb[0].ip_address : try(scaleway_vpc_public_gateway_ip.main.address, "127.0.0.1")
 | 
			
		||||
  lbv4     = local.lb_enable ? scaleway_lb_ip.lb_v4[0].ip_address : "127.0.0.1"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
resource "scaleway_ipam_ip" "controlplane_vip" {
 | 
			
		||||
@@ -13,146 +13,148 @@ resource "scaleway_ipam_ip" "controlplane_vip" {
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# resource "scaleway_lb_ip" "lb" {
 | 
			
		||||
#   count = local.lb_enable ? 1 : 0
 | 
			
		||||
# }
 | 
			
		||||
resource "scaleway_lb_ip" "lb_v4" {
 | 
			
		||||
  count = local.lb_enable ? 1 : 0
 | 
			
		||||
}
 | 
			
		||||
resource "scaleway_lb_ip" "lb_v6" {
 | 
			
		||||
  count   = local.lb_enable ? 1 : 0
 | 
			
		||||
  is_ipv6 = true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# resource "scaleway_lb" "lb" {
 | 
			
		||||
#   count = local.lb_enable ? 1 : 0
 | 
			
		||||
#   name  = "controlplane"
 | 
			
		||||
#   ip_id = scaleway_lb_ip.lb[0].id
 | 
			
		||||
#   type  = lookup(var.controlplane, "type_lb", "LB-S")
 | 
			
		||||
#   tags  = concat(var.tags, ["infra"])
 | 
			
		||||
resource "scaleway_lb" "lb" {
 | 
			
		||||
  count = local.lb_enable ? 1 : 0
 | 
			
		||||
  name  = "controlplane"
 | 
			
		||||
  type  = lookup(var.controlplane, "type_lb", "LB-S")
 | 
			
		||||
 | 
			
		||||
#   private_network {
 | 
			
		||||
#     private_network_id = scaleway_vpc_private_network.main.id
 | 
			
		||||
#     static_config      = [cidrhost(local.main_subnet, 3), cidrhost(local.main_subnet, 4)]
 | 
			
		||||
#   }
 | 
			
		||||
# }
 | 
			
		||||
  ip_ids = [scaleway_lb_ip.lb_v4[0].id, scaleway_lb_ip.lb_v6[0].id]
 | 
			
		||||
  private_network {
 | 
			
		||||
    private_network_id = scaleway_vpc_private_network.main.id
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
# resource "scaleway_lb_backend" "api" {
 | 
			
		||||
#   count            = local.lb_enable ? 1 : 0
 | 
			
		||||
#   lb_id            = scaleway_lb.lb[0].id
 | 
			
		||||
#   name             = "api"
 | 
			
		||||
#   forward_protocol = "tcp"
 | 
			
		||||
#   forward_port     = "6443"
 | 
			
		||||
#   server_ips       = [for k in range(0, lookup(var.controlplane, "count", 0)) : cidrhost(local.main_subnet, 11 + k)]
 | 
			
		||||
  tags = concat(var.tags, ["infra"])
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#   health_check_timeout = "5s"
 | 
			
		||||
#   health_check_delay   = "30s"
 | 
			
		||||
#   health_check_https {
 | 
			
		||||
#     uri  = "/readyz"
 | 
			
		||||
#     code = 401
 | 
			
		||||
#   }
 | 
			
		||||
# }
 | 
			
		||||
resource "scaleway_lb_backend" "api" {
 | 
			
		||||
  count            = local.lb_enable ? 1 : 0
 | 
			
		||||
  lb_id            = scaleway_lb.lb[0].id
 | 
			
		||||
  name             = "api"
 | 
			
		||||
  forward_protocol = "tcp"
 | 
			
		||||
  forward_port     = "6443"
 | 
			
		||||
  server_ips       = [for k in range(0, lookup(var.controlplane, "count", 0)) : cidrhost(local.main_subnet, 11 + k)]
 | 
			
		||||
 | 
			
		||||
# resource "scaleway_lb_frontend" "api" {
 | 
			
		||||
#   count        = local.lb_enable ? 1 : 0
 | 
			
		||||
#   lb_id        = scaleway_lb.lb[0].id
 | 
			
		||||
#   backend_id   = scaleway_lb_backend.api[0].id
 | 
			
		||||
#   name         = "api"
 | 
			
		||||
#   inbound_port = "6443"
 | 
			
		||||
  health_check_timeout = "5s"
 | 
			
		||||
  health_check_delay   = "30s"
 | 
			
		||||
  health_check_https {
 | 
			
		||||
    uri  = "/readyz"
 | 
			
		||||
    code = 401
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#   acl {
 | 
			
		||||
#     name = "Allow whitlist IPs"
 | 
			
		||||
#     action {
 | 
			
		||||
#       type = "allow"
 | 
			
		||||
#     }
 | 
			
		||||
#     match {
 | 
			
		||||
#       ip_subnet = var.whitelist_admins
 | 
			
		||||
#     }
 | 
			
		||||
#   }
 | 
			
		||||
# }
 | 
			
		||||
resource "scaleway_lb_frontend" "api" {
 | 
			
		||||
  count        = local.lb_enable ? 1 : 0
 | 
			
		||||
  lb_id        = scaleway_lb.lb[0].id
 | 
			
		||||
  backend_id   = scaleway_lb_backend.api[0].id
 | 
			
		||||
  name         = "api"
 | 
			
		||||
  inbound_port = "6443"
 | 
			
		||||
 | 
			
		||||
# resource "scaleway_lb_backend" "web" {
 | 
			
		||||
#   count            = local.lb_enable ? 1 : 0
 | 
			
		||||
#   lb_id            = scaleway_lb.lb[0].id
 | 
			
		||||
#   name             = "web"
 | 
			
		||||
#   forward_protocol = "tcp"
 | 
			
		||||
#   forward_port     = "80"
 | 
			
		||||
#   server_ips       = [for k in range(0, lookup(var.instances, "web_count", 0)) : cidrhost(local.main_subnet, 21 + k)]
 | 
			
		||||
  acl {
 | 
			
		||||
    name = "Allow whitlist IPs"
 | 
			
		||||
    action {
 | 
			
		||||
      type = "allow"
 | 
			
		||||
    }
 | 
			
		||||
    match {
 | 
			
		||||
      ip_subnet = var.whitelist_admins
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  acl {
 | 
			
		||||
    name = "Deny all"
 | 
			
		||||
    action {
 | 
			
		||||
      type = "deny"
 | 
			
		||||
    }
 | 
			
		||||
    match {
 | 
			
		||||
      ip_subnet = ["0.0.0.0/0", "::/0"]
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#   health_check_timeout = "5s"
 | 
			
		||||
#   health_check_delay   = "30s"
 | 
			
		||||
#   health_check_http {
 | 
			
		||||
#     uri = "/healthz"
 | 
			
		||||
#   }
 | 
			
		||||
# }
 | 
			
		||||
###################
 | 
			
		||||
 | 
			
		||||
# resource "scaleway_lb_backend" "web_https" {
 | 
			
		||||
#   count            = local.lb_enable ? 1 : 0
 | 
			
		||||
#   lb_id            = scaleway_lb.lb[0].id
 | 
			
		||||
#   name             = "web"
 | 
			
		||||
#   forward_protocol = "tcp"
 | 
			
		||||
#   forward_port     = "443"
 | 
			
		||||
#   server_ips       = [for k in range(0, lookup(var.instances, "web_count", 0)) : cidrhost(local.main_subnet, 21 + k)]
 | 
			
		||||
data "scaleway_ipam_ips" "web" {
 | 
			
		||||
  count    = lookup(try(var.instances[var.regions[0]], {}), "web_count", 0)
 | 
			
		||||
  type     = "ipv4"
 | 
			
		||||
  attached = true
 | 
			
		||||
 | 
			
		||||
#   health_check_timeout = "5s"
 | 
			
		||||
#   health_check_delay   = "30s"
 | 
			
		||||
#   health_check_https {
 | 
			
		||||
#     uri = "/healthz"
 | 
			
		||||
#   }
 | 
			
		||||
# }
 | 
			
		||||
  resource {
 | 
			
		||||
    name = scaleway_instance_server.web[count.index].name
 | 
			
		||||
    type = "instance_private_nic"
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# resource "scaleway_lb_frontend" "http" {
 | 
			
		||||
#   count        = local.lb_enable ? 1 : 0
 | 
			
		||||
#   lb_id        = scaleway_lb.lb[0].id
 | 
			
		||||
#   backend_id   = scaleway_lb_backend.web[0].id
 | 
			
		||||
#   name         = "http"
 | 
			
		||||
#   inbound_port = "80"
 | 
			
		||||
resource "scaleway_lb_backend" "http" {
 | 
			
		||||
  count            = local.lb_enable ? 1 : 0
 | 
			
		||||
  lb_id            = scaleway_lb.lb[0].id
 | 
			
		||||
  name             = "http"
 | 
			
		||||
  forward_protocol = "http"
 | 
			
		||||
  forward_port     = "80"
 | 
			
		||||
  proxy_protocol   = "none"
 | 
			
		||||
  server_ips       = [for k in data.scaleway_ipam_ips.web : split("/", one(k.ips).address)[0]]
 | 
			
		||||
 | 
			
		||||
#   acl {
 | 
			
		||||
#     name = "Allow controlplane IPs"
 | 
			
		||||
#     action {
 | 
			
		||||
#       type = "allow"
 | 
			
		||||
#     }
 | 
			
		||||
#     match {
 | 
			
		||||
#       ip_subnet = try(scaleway_instance_ip.controlplane[*].address, "0.0.0.0/0")
 | 
			
		||||
#     }
 | 
			
		||||
#   }
 | 
			
		||||
#   acl {
 | 
			
		||||
#     name = "Allow whitlist IPs"
 | 
			
		||||
#     action {
 | 
			
		||||
#       type = "allow"
 | 
			
		||||
#     }
 | 
			
		||||
#     match {
 | 
			
		||||
#       ip_subnet = concat(var.whitelist_web, var.whitelist_admins)
 | 
			
		||||
#     }
 | 
			
		||||
#   }
 | 
			
		||||
#   acl {
 | 
			
		||||
#     name = "Deny all"
 | 
			
		||||
#     action {
 | 
			
		||||
#       type = "deny"
 | 
			
		||||
#     }
 | 
			
		||||
#     match {
 | 
			
		||||
#       ip_subnet = ["0.0.0.0/0"]
 | 
			
		||||
#     }
 | 
			
		||||
#   }
 | 
			
		||||
# }
 | 
			
		||||
  health_check_timeout = "5s"
 | 
			
		||||
  health_check_delay   = "30s"
 | 
			
		||||
  health_check_http {
 | 
			
		||||
    uri = "/healthz"
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# resource "scaleway_lb_frontend" "https" {
 | 
			
		||||
#   count        = local.lb_enable ? 1 : 0
 | 
			
		||||
#   lb_id        = scaleway_lb.lb[0].id
 | 
			
		||||
#   backend_id   = scaleway_lb_backend.web_https[0].id
 | 
			
		||||
#   name         = "https"
 | 
			
		||||
#   inbound_port = "443"
 | 
			
		||||
resource "scaleway_lb_frontend" "http" {
 | 
			
		||||
  count        = local.lb_enable ? 1 : 0
 | 
			
		||||
  lb_id        = scaleway_lb.lb[0].id
 | 
			
		||||
  backend_id   = scaleway_lb_backend.http[0].id
 | 
			
		||||
  name         = "http"
 | 
			
		||||
  inbound_port = "80"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#   acl {
 | 
			
		||||
#     name = "Allow whitlist IPs"
 | 
			
		||||
#     action {
 | 
			
		||||
#       type = "allow"
 | 
			
		||||
#     }
 | 
			
		||||
#     match {
 | 
			
		||||
#       ip_subnet = concat(var.whitelist_web, var.whitelist_admins)
 | 
			
		||||
#     }
 | 
			
		||||
#   }
 | 
			
		||||
#   acl {
 | 
			
		||||
#     name = "Deny all"
 | 
			
		||||
#     action {
 | 
			
		||||
#       type = "deny"
 | 
			
		||||
#     }
 | 
			
		||||
#     match {
 | 
			
		||||
#       ip_subnet = ["0.0.0.0/0"]
 | 
			
		||||
#     }
 | 
			
		||||
#   }
 | 
			
		||||
# }
 | 
			
		||||
###################
 | 
			
		||||
 | 
			
		||||
resource "scaleway_lb_backend" "https" {
 | 
			
		||||
  count            = local.lb_enable ? 1 : 0
 | 
			
		||||
  lb_id            = scaleway_lb.lb[0].id
 | 
			
		||||
  name             = "https"
 | 
			
		||||
  forward_protocol = "tcp"
 | 
			
		||||
  forward_port     = "443"
 | 
			
		||||
  proxy_protocol   = "none"
 | 
			
		||||
  server_ips       = [for k in data.scaleway_ipam_ips.web : split("/", one(k.ips).address)[0]]
 | 
			
		||||
 | 
			
		||||
  health_check_timeout = "5s"
 | 
			
		||||
  health_check_delay   = "15s"
 | 
			
		||||
  health_check_https {
 | 
			
		||||
    uri = "/healthz"
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
resource "scaleway_lb_frontend" "https" {
 | 
			
		||||
  count        = local.lb_enable ? 1 : 0
 | 
			
		||||
  lb_id        = scaleway_lb.lb[0].id
 | 
			
		||||
  backend_id   = scaleway_lb_backend.https[0].id
 | 
			
		||||
  name         = "https"
 | 
			
		||||
  inbound_port = "443"
 | 
			
		||||
 | 
			
		||||
  acl {
 | 
			
		||||
    name = "Allow whitlist IPs"
 | 
			
		||||
    action {
 | 
			
		||||
      type = "allow"
 | 
			
		||||
    }
 | 
			
		||||
    match {
 | 
			
		||||
      ip_subnet = concat(var.whitelist_web, var.whitelist_admins)
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  acl {
 | 
			
		||||
    name = "Deny all"
 | 
			
		||||
    action {
 | 
			
		||||
      type = "deny"
 | 
			
		||||
    }
 | 
			
		||||
    match {
 | 
			
		||||
      ip_subnet = ["0.0.0.0/0"]
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -1,4 +1,7 @@
 | 
			
		||||
machine:
 | 
			
		||||
  certSANs:
 | 
			
		||||
    - ${lbv4}
 | 
			
		||||
    - ${apiDomain}
 | 
			
		||||
  kubelet:
 | 
			
		||||
    image: ghcr.io/siderolabs/kubelet:${version}
 | 
			
		||||
    extraArgs:
 | 
			
		||||
@@ -83,6 +86,7 @@ cluster:
 | 
			
		||||
        cpu: 500m
 | 
			
		||||
        memory: 1Gi
 | 
			
		||||
    certSANs:
 | 
			
		||||
      - ${lbv4}
 | 
			
		||||
      - ${apiDomain}
 | 
			
		||||
  controllerManager:
 | 
			
		||||
    image: registry.k8s.io/kube-controller-manager:${version}
 | 
			
		||||
@@ -114,3 +118,11 @@ cluster:
 | 
			
		||||
          SCW_DEFAULT_REGION: ${base64encode(region)}
 | 
			
		||||
          SCW_DEFAULT_ZONE: ${base64encode(zone)}
 | 
			
		||||
          SCW_VPC_ID: ${base64encode(vpc_id)}
 | 
			
		||||
  externalCloudProvider:
 | 
			
		||||
    enabled: true
 | 
			
		||||
    manifests:
 | 
			
		||||
      - https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/local-path-storage-ns.yaml
 | 
			
		||||
      - https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/local-path-storage-result.yaml
 | 
			
		||||
      - https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/coredns-local.yaml
 | 
			
		||||
      - https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/ingress-ns.yaml
 | 
			
		||||
      - https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/ingress-result.yaml
 | 
			
		||||
 
 | 
			
		||||
@@ -47,7 +47,7 @@ variable "controlplane" {
 | 
			
		||||
  description = "Property of controlplane"
 | 
			
		||||
  type        = map(any)
 | 
			
		||||
  default = {
 | 
			
		||||
    count   = 0,
 | 
			
		||||
    count   = 1,
 | 
			
		||||
    type    = "COPARM1-2C-8G" # "DEV1-L",
 | 
			
		||||
    type_lb = ""              # "LB-S"
 | 
			
		||||
  }
 | 
			
		||||
@@ -61,7 +61,7 @@ variable "instances" {
 | 
			
		||||
      version = "v1.30.2"
 | 
			
		||||
    },
 | 
			
		||||
    "fr-par-2" = {
 | 
			
		||||
      web_count    = 0,
 | 
			
		||||
      web_count    = 1,
 | 
			
		||||
      web_type     = "DEV1-L",
 | 
			
		||||
      worker_count = 0,
 | 
			
		||||
      worker_type  = "COPARM1-2C-8G",
 | 
			
		||||
@@ -80,8 +80,9 @@ variable "whitelist_admins" {
 | 
			
		||||
  default     = ["0.0.0.0/0", "::/0"]
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# curl https://www.cloudflare.com/ips-v4 2>/dev/null | awk '{ print "\""$1"\"," }'
 | 
			
		||||
variable "whitelist_web" {
 | 
			
		||||
  description = "Whitelist for web (default Cloudflare network)"
 | 
			
		||||
  description = "Cloudflare subnets"
 | 
			
		||||
  default = [
 | 
			
		||||
    "173.245.48.0/20",
 | 
			
		||||
    "103.21.244.0/22",
 | 
			
		||||
@@ -94,9 +95,9 @@ variable "whitelist_web" {
 | 
			
		||||
    "197.234.240.0/22",
 | 
			
		||||
    "198.41.128.0/17",
 | 
			
		||||
    "162.158.0.0/15",
 | 
			
		||||
    "172.64.0.0/13",
 | 
			
		||||
    "131.0.72.0/22",
 | 
			
		||||
    "104.16.0.0/13",
 | 
			
		||||
    "104.24.0.0/14",
 | 
			
		||||
    "172.64.0.0/13",
 | 
			
		||||
    "131.0.72.0/22",
 | 
			
		||||
  ]
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user