Upgrade terraform plungin for azure

This commit is contained in:
Serge Logvinov
2022-08-19 12:44:08 +03:00
parent f4c427c29a
commit 7f6e13d3d5
11 changed files with 272 additions and 138 deletions

View File

@@ -26,9 +26,9 @@ create-templates:
@yq eval -o=json '{"kubernetes": .}' _cfgs/tfstate.vars > terraform.tfvars.json
create-deployments:
helm template --namespace=kube-system --version=1.11.5 -f deployments/cilium.yaml cilium \
helm template --namespace=kube-system --version=1.12.1 -f deployments/cilium.yaml cilium \
cilium/cilium > deployments/cilium-result.yaml
helm template --namespace=ingress-nginx --version=4.1.2 -f deployments/ingress.yaml ingress-nginx \
helm template --namespace=ingress-nginx --version=4.2.1 -f deployments/ingress.yaml ingress-nginx \
ingress-nginx/ingress-nginx > deployments/ingress-result.yaml
create-network: ## Create networks
@@ -41,6 +41,8 @@ create-controlplane: ## Bootstrap controlplane
create-kubeconfig: ## Download kubeconfig
talosctl --talosconfig _cfgs/talosconfig --nodes ${ENDPOINT} kubeconfig .
kubectl --kubeconfig=kubeconfig config set clusters.talos-k8s-azure.server https://${ENDPOINT}:6443
kubectl --kubeconfig=kubeconfig config set-context --current --namespace=kube-system
kubectl --kubeconfig=kubeconfig get pods -owide -A
create-infrastructure: ## Bootstrap all nodes

View File

@@ -49,10 +49,10 @@ data:
# "cilium-metrics-config" ConfigMap
# NOTE that this will open the port on ALL nodes where Cilium pods are
# scheduled.
prometheus-serve-addr: ":9090"
# Port to expose Envoy metrics (e.g. "9095"). Envoy metrics listener will be disabled if this
prometheus-serve-addr: ":9962"
# Port to expose Envoy metrics (e.g. "9964"). Envoy metrics listener will be disabled if this
# field is not set.
proxy-prometheus-port: "9095"
proxy-prometheus-port: "9964"
# Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4
# address.
@@ -119,14 +119,14 @@ data:
cluster-name: default
# Unique ID of the cluster. Must be unique across all conneted clusters and
# in the range of 1 and 255. Only relevant when building a mesh of clusters.
cluster-id: ""
cluster-id: "0"
# Encapsulation mode for communication between nodes
# Possible values:
# - disabled
# - vxlan (default)
# - geneve
tunnel: vxlan
tunnel: "vxlan"
# Enables L7 proxy for L7 policy enforcement and visibility
enable-l7-proxy: "true"
@@ -139,20 +139,19 @@ data:
install-no-conntrack-iptables-rules: "false"
auto-direct-node-routes: "false"
enable-bandwidth-manager: "false"
enable-local-redirect-policy: "true"
enable-host-firewall: "true"
# List of devices used to attach bpf_host.o (implements BPF NodePort,
# host-firewall and BPF masquerading)
devices: "eth+"
kube-proxy-replacement: "strict"
kube-proxy-replacement: "strict"
kube-proxy-replacement-healthz-bind-address: ""
enable-host-reachable-services: "true"
bpf-lb-sock: "false"
enable-health-check-nodeport: "true"
node-port-bind-protection: "true"
enable-auto-protect-node-port-range: "true"
enable-session-affinity: "true"
enable-svc-source-range-check: "true"
enable-l2-neigh-discovery: "true"
arping-refresh-period: "30s"
k8s-require-ipv4-pod-cidr: "true"
@@ -161,16 +160,32 @@ data:
enable-health-checking: "true"
enable-well-known-identities: "false"
enable-remote-node-identity: "true"
synchronize-k8s-nodes: "true"
operator-api-serve-addr: "127.0.0.1:9234"
ipam: "kubernetes"
disable-cnp-status-updates: "true"
enable-vtep: "false"
vtep-endpoint: ""
vtep-cidr: ""
vtep-mask: ""
vtep-mac: ""
enable-k8s-endpoint-slice: "true"
enable-bgp-control-plane: "false"
bpf-root: "/sys/fs/bpf"
cgroup-root: "/sys/fs/cgroup"
enable-k8s-terminating-endpoint: "true"
annotate-k8s-node: "true"
remove-cilium-node-taints: "true"
set-cilium-is-up-condition: "true"
unmanaged-pod-watcher-interval: "15"
tofqdns-dns-reject-response-code: "refused"
tofqdns-enable-dns-compression: "true"
tofqdns-endpoint-max-ip-per-hostname: "50"
tofqdns-idle-connection-grace-period: "0s"
tofqdns-max-deferred-connection-deletes: "10000"
tofqdns-min-ttl: "3600"
tofqdns-proxy-response-max-delay: "100ms"
agent-not-ready-taint-key: "node.cilium.io/agent-not-ready"
---
# Source: cilium/templates/cilium-agent/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
@@ -218,12 +233,8 @@ rules:
resources:
- customresourcedefinitions
verbs:
# Deprecated for removal in v1.10
- create
- list
- watch
- update
# This is used when validating policies in preflight. This will need to stay
# until we figure out how to avoid "get" inside the preflight, and then
# should be removed ideally.
@@ -231,21 +242,61 @@ rules:
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies
- ciliumnetworkpolicies/status
- ciliumbgploadbalancerippools
- ciliumbgppeeringpolicies
- ciliumclusterwideenvoyconfigs
- ciliumclusterwidenetworkpolicies
- ciliumclusterwidenetworkpolicies/status
- ciliumegressgatewaypolicies
- ciliumegressnatpolicies
- ciliumendpoints
- ciliumendpoints/status
- ciliumnodes
- ciliumnodes/status
- ciliumendpointslices
- ciliumenvoyconfigs
- ciliumidentities
- ciliumlocalredirectpolicies
- ciliumlocalredirectpolicies/status
- ciliumegressnatpolicies
- ciliumendpointslices
- ciliumnetworkpolicies
- ciliumnodes
verbs:
- '*'
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumidentities
- ciliumendpoints
- ciliumnodes
verbs:
- create
- apiGroups:
- cilium.io
# To synchronize garbage collection of such resources
resources:
- ciliumidentities
verbs:
- update
- apiGroups:
- cilium.io
resources:
- ciliumendpoints
verbs:
- delete
- get
- apiGroups:
- cilium.io
resources:
- ciliumnodes
- ciliumnodes/status
verbs:
- get
- update
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies/status
- ciliumclusterwidenetworkpolicies/status
- ciliumendpoints/status
- ciliumendpoints
verbs:
- patch
---
# Source: cilium/templates/cilium-operator/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
@@ -288,14 +339,6 @@ rules:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
@@ -303,14 +346,21 @@ rules:
- services/status
verbs:
- update
- apiGroups:
- ""
resources:
# to check apiserver connectivity
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
# to perform the translation of a CNP that contains `ToGroup` to its endpoints
- services
- endpoints
# to check apiserver connectivity
- namespaces
verbs:
- get
- list
@@ -319,26 +369,73 @@ rules:
- cilium.io
resources:
- ciliumnetworkpolicies
- ciliumnetworkpolicies/status
- ciliumnetworkpolicies/finalizers
- ciliumclusterwidenetworkpolicies
- ciliumclusterwidenetworkpolicies/status
- ciliumclusterwidenetworkpolicies/finalizers
- ciliumendpoints
- ciliumendpoints/status
- ciliumendpoints/finalizers
- ciliumnodes
- ciliumnodes/status
- ciliumnodes/finalizers
- ciliumidentities
- ciliumendpointslices
- ciliumidentities/status
- ciliumidentities/finalizers
- ciliumlocalredirectpolicies
- ciliumlocalredirectpolicies/status
- ciliumlocalredirectpolicies/finalizers
verbs:
- '*'
# Create auto-generated CNPs and CCNPs from Policies that have 'toGroups'
- create
- update
- deletecollection
# To update the status of the CNPs and CCNPs
- patch
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies/status
- ciliumclusterwidenetworkpolicies/status
verbs:
# Update the auto-generated CNPs and CCNPs status.
- patch
- update
- apiGroups:
- cilium.io
resources:
- ciliumendpoints
- ciliumidentities
verbs:
# To perform garbage collection of such resources
- delete
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumidentities
verbs:
# To synchronize garbage collection of such resources
- update
- apiGroups:
- cilium.io
resources:
- ciliumnodes
verbs:
- create
- update
- get
- list
- watch
# To perform CiliumNode garbage collector
- delete
- apiGroups:
- cilium.io
resources:
- ciliumnodes/status
verbs:
- update
- apiGroups:
- cilium.io
resources:
- ciliumendpointslices
- ciliumenvoyconfigs
verbs:
- create
- update
- get
- list
- watch
- delete
- apiGroups:
- apiextensions.k8s.io
resources:
@@ -347,8 +444,28 @@ rules:
- create
- get
- list
- update
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- update
resourceNames:
- ciliumbgploadbalancerippools.cilium.io
- ciliumbgppeeringpolicies.cilium.io
- ciliumclusterwideenvoyconfigs.cilium.io
- ciliumclusterwidenetworkpolicies.cilium.io
- ciliumegressgatewaypolicies.cilium.io
- ciliumegressnatpolicies.cilium.io
- ciliumendpoints.cilium.io
- ciliumendpointslices.cilium.io
- ciliumenvoyconfigs.cilium.io
- ciliumexternalworkloads.cilium.io
- ciliumidentities.cilium.io
- ciliumlocalredirectpolicies.cilium.io
- ciliumnetworkpolicies.cilium.io
- ciliumnodes.cilium.io
# For cilium-operator running in HA mode.
#
# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election
@@ -400,7 +517,7 @@ metadata:
namespace: kube-system
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9095"
prometheus.io/port: "9964"
labels:
k8s-app: cilium
spec:
@@ -410,7 +527,7 @@ spec:
k8s-app: cilium
ports:
- name: envoy-metrics
port: 9095
port: 9964
protocol: TCP
targetPort: envoy-metrics
---
@@ -433,42 +550,14 @@ spec:
template:
metadata:
annotations:
prometheus.io/port: "9090"
prometheus.io/port: "9962"
prometheus.io/scrape: "true"
# This annotation plus the CriticalAddonsOnly toleration makes
# cilium to be a critical pod in the cluster, which ensures cilium
# gets priority scheduling.
# https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/
scheduler.alpha.kubernetes.io/critical-pod: ""
labels:
k8s-app: cilium
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- matchExpressions:
- key: beta.kubernetes.io/os
operator: In
values:
- linux
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values:
- cilium
topologyKey: kubernetes.io/hostname
containers:
- name: cilium-agent
image: "quay.io/cilium/cilium:v1.11.5@sha256:79e66c3c2677e9ecc3fd5b2ed8e4ea7e49cf99ed6ee181f2ef43400c4db5eef0"
image: "quay.io/cilium/cilium:v1.12.1@sha256:ea2db1ee21b88127b5c18a96ad155c25485d0815a667ef77c2b7c7f31cab601b"
imagePullPolicy: IfNotPresent
command:
- cilium-agent
@@ -478,7 +567,7 @@ spec:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9876
port: 9879
scheme: HTTP
httpHeaders:
- name: "brief"
@@ -490,7 +579,7 @@ spec:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9876
port: 9879
scheme: HTTP
httpHeaders:
- name: "brief"
@@ -503,7 +592,7 @@ spec:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9876
port: 9879
scheme: HTTP
httpHeaders:
- name: "brief"
@@ -548,6 +637,7 @@ spec:
- "/cni-install.sh"
- "--enable-debug=false"
- "--cni-exclusive=true"
- "--log-file=/var/run/cilium/cilium-cni.log"
preStop:
exec:
command:
@@ -560,13 +650,17 @@ spec:
cpu: 100m
memory: 128Mi
ports:
- name: peer-service
containerPort: 4244
hostPort: 4244
protocol: TCP
- name: prometheus
containerPort: 9090
hostPort: 9090
containerPort: 9962
hostPort: 9962
protocol: TCP
- name: envoy-metrics
containerPort: 9095
hostPort: 9095
containerPort: 9964
hostPort: 9964
protocol: TCP
securityContext:
privileged: true
@@ -595,10 +689,9 @@ spec:
readOnly: true
- name: xtables-lock
mountPath: /run/xtables.lock
hostNetwork: true
initContainers:
- name: clean-cilium-state
image: "quay.io/cilium/cilium:v1.11.5@sha256:79e66c3c2677e9ecc3fd5b2ed8e4ea7e49cf99ed6ee181f2ef43400c4db5eef0"
image: "quay.io/cilium/cilium:v1.12.1@sha256:ea2db1ee21b88127b5c18a96ad155c25485d0815a667ef77c2b7c7f31cab601b"
imagePullPolicy: IfNotPresent
command:
- /init-container.sh
@@ -633,12 +726,22 @@ spec:
resources:
requests:
cpu: 100m
memory: 100Mi
memory: 100Mi # wait-for-kube-proxy
restartPolicy: Always
priorityClassName: system-node-critical
serviceAccount: "cilium"
serviceAccountName: "cilium"
terminationGracePeriodSeconds: 1
hostNetwork: true
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
k8s-app: cilium
topologyKey: kubernetes.io/hostname
nodeSelector:
kubernetes.io/os: linux
tolerations:
- operator: Exists
volumes:
@@ -713,25 +816,15 @@ spec:
template:
metadata:
annotations:
# ensure pods roll when configmap updates
cilium.io/cilium-configmap-checksum: "10bcfd4171cc8219b04f7404f8c9add742e0de9272cd864272e80f23ec406384"
labels:
io.cilium/app: operator
name: cilium-operator
spec:
# In HA mode, cilium-operator pods must not be scheduled on the same
# node as they will clash with each other.
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: io.cilium/app
operator: In
values:
- operator
topologyKey: kubernetes.io/hostname
containers:
- name: cilium-operator
image: quay.io/cilium/operator-generic:v1.11.5@sha256:8ace281328b27d4216218c604d720b9a63a8aec2bd1996057c79ab0168f9d6d8
image: quay.io/cilium/operator-generic:v1.12.1@sha256:93d5aaeda37d59e6c4325ff05030d7b48fabde6576478e3fdbfb9bb4a68ec4a1
imagePullPolicy: IfNotPresent
command:
- cilium-operator-generic
@@ -777,6 +870,17 @@ spec:
priorityClassName: system-cluster-critical
serviceAccount: "cilium-operator"
serviceAccountName: "cilium-operator"
# In HA mode, cilium-operator pods must not be scheduled on the same
# node as they will clash with each other.
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
io.cilium/app: operator
topologyKey: kubernetes.io/hostname
nodeSelector:
kubernetes.io/os: linux
tolerations:
- operator: Exists
volumes:

View File

@@ -5,6 +5,7 @@ k8sServicePort: "6443"
operator:
enabled: true
rollOutPods: true
replicas: 1
prometheus:
enabled: false
@@ -19,6 +20,7 @@ autoDirectNodeRoutes: false
devices: [eth+]
healthChecking: true
annotateK8sNode: true
cni:
install: true
@@ -35,8 +37,6 @@ ipv4:
enabled: true
ipv6:
enabled: true
hostServices:
enabled: true
hostPort:
enabled: true
nodePort:
@@ -46,6 +46,9 @@ externalIPs:
hostFirewall:
enabled: true
securityContext:
privileged: true
hubble:
enabled: false

View File

@@ -4,10 +4,10 @@ apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.2
helm.sh/chart: ingress-nginx-4.2.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/version: "1.3.0"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
@@ -20,10 +20,10 @@ apiVersion: v1
kind: ConfigMap
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.2
helm.sh/chart: ingress-nginx-4.2.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/version: "1.3.0"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
@@ -66,10 +66,10 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.2
helm.sh/chart: ingress-nginx-4.2.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/version: "1.3.0"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
@@ -86,6 +86,13 @@ rules:
verbs:
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- list
- watch
- apiGroups:
- ""
resources:
@@ -135,10 +142,10 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.2
helm.sh/chart: ingress-nginx-4.2.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/version: "1.3.0"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
@@ -156,10 +163,10 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.2
helm.sh/chart: ingress-nginx-4.2.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/version: "1.3.0"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
@@ -228,6 +235,21 @@ rules:
- configmaps
verbs:
- create
- apiGroups:
- coordination.k8s.io
resources:
- leases
resourceNames:
- ingress-controller-leader
verbs:
- get
- update
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- apiGroups:
- ""
resources:
@@ -241,10 +263,10 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.2
helm.sh/chart: ingress-nginx-4.2.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/version: "1.3.0"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
@@ -265,10 +287,10 @@ kind: Service
metadata:
annotations:
labels:
helm.sh/chart: ingress-nginx-4.1.2
helm.sh/chart: ingress-nginx-4.2.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/version: "1.3.0"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
@@ -302,10 +324,10 @@ apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.2
helm.sh/chart: ingress-nginx-4.2.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/version: "1.3.0"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
@@ -336,7 +358,7 @@ spec:
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: controller
image: "k8s.gcr.io/ingress-nginx/controller:v1.2.0@sha256:d8196e3bc1e72547c5dec66d6556c0ff92a23f6d0919b206be170bc90d5f9185"
image: "registry.k8s.io/ingress-nginx/controller:v1.3.0@sha256:d1707ca76d3b044ab8a28277a2466a02100ee9f58a86af1535a3edf9323ea1b5"
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
@@ -428,10 +450,10 @@ apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.2
helm.sh/chart: ingress-nginx-4.2.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/version: "1.3.0"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller

View File

@@ -14,10 +14,10 @@ regions = ["uksouth", "ukwest", "westeurope"]
## Init and upload images
```shell
wget https://github.com/siderolabs/talos/releases/download/v1.2.0-alpha.0/azure-amd64.tar.gz
wget https://github.com/siderolabs/talos/releases/download/v1.2.0-beta.0/azure-amd64.tar.gz
tar -xzf azure-amd64.tar.gz && mv disk.vhd disk-x64.vhd
wget https://github.com/siderolabs/talos/releases/download/v1.2.0-alpha.0/azure-arm64.tar.gz
wget https://github.com/siderolabs/talos/releases/download/v1.2.0-beta.0/azure-arm64.tar.gz
tar -xzf azure-arm64.tar.gz && mv disk.vhd disk-arm64.vhd
terraform init && terraform apply

View File

@@ -3,7 +3,7 @@ terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~> 3.14.0"
version = "~> 3.19.0"
}
}
required_version = ">= 1.2"

View File

@@ -3,7 +3,7 @@ terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~> 3.14.0"
version = "~> 3.19.0"
}
}
required_version = ">= 1.2"

View File

@@ -3,7 +3,7 @@ terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~> 3.14.0"
version = "~> 3.19.0"
}
}
required_version = ">= 1.2"

View File

@@ -3,7 +3,7 @@ terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~> 3.14.0"
version = "~> 3.19.0"
}
}
required_version = ">= 1.2"

View File

@@ -53,7 +53,10 @@ cluster:
node-cidr-mask-size-ipv6: 112
scheduler: {}
etcd:
subnet: ${nodeSubnets[0]}
advertisedSubnets:
- ${nodeSubnets[0]}
listenSubnets:
- ${nodeSubnets[0]}
extraArgs:
election-timeout: "5000"
heartbeat-interval: "1000"

View File

@@ -3,7 +3,7 @@ terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~> 3.14.0"
version = "~> 3.19.0"
}
}
required_version = ">= 1.2"