update azure deployments

This commit is contained in:
Serge Logvinov
2023-06-27 19:46:06 +03:00
parent 0ca3b85928
commit 840620fcf4
25 changed files with 872 additions and 192 deletions

View File

@@ -38,11 +38,14 @@ create-deployments:
helm template --namespace=kube-system -f deployments/azure-autoscaler.yaml cluster-autoscaler-azure \
autoscaler/cluster-autoscaler > deployments/azure-autoscaler-result.yaml
helm template --namespace=kube-system -f deployments/azuredisk-csi-driver.yaml azuredisk-csi-driver \
azuredisk-csi-driver/azuredisk-csi-driver > deployments/azuredisk-csi-driver-result.yaml
create-network: ## Create networks
cd prepare && terraform init && terraform apply -auto-approve
create-controlplane-bootstrap:
talosctl --talosconfig _cfgs/talosconfig config endpoint ${CPFIRST}
talosctl --talosconfig _cfgs/talosconfig config endpoint ${ENDPOINT}
talosctl --talosconfig _cfgs/talosconfig --nodes ${CPFIRST} bootstrap
create-controlplane: ## Bootstrap controlplane

View File

@@ -37,7 +37,6 @@ kubectl -n kube-system create secret generic azure-csi --from-file=azure.json=_c
* [Azure CSI](https://github.com/kubernetes-sigs/azuredisk-csi-driver)
* [Azure Node AutoScaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/azure/README.md)
* [cilium](https://github.com/cilium/cilium) 1.12.5
* [kubelet-serving-cert-approver](https://github.com/alex1989hu/kubelet-serving-cert-approver)
* [metrics-server](https://github.com/kubernetes-sigs/metrics-server) 0.5.0
* [rancher.io/local-path](https://github.com/rancher/local-path-provisioner) 0.0.19
* [ingress-nginx](https://kubernetes.github.io/ingress-nginx/) 4.4.2

View File

@@ -7,7 +7,7 @@ metadata:
app.kubernetes.io/instance: "cluster-autoscaler-azure"
app.kubernetes.io/name: "azure-cluster-autoscaler"
app.kubernetes.io/managed-by: "Helm"
helm.sh/chart: "cluster-autoscaler-9.24.0"
helm.sh/chart: "cluster-autoscaler-9.29.1"
name: cluster-autoscaler-azure
namespace: kube-system
spec:
@@ -26,7 +26,7 @@ metadata:
app.kubernetes.io/instance: "cluster-autoscaler-azure"
app.kubernetes.io/name: "azure-cluster-autoscaler"
app.kubernetes.io/managed-by: "Helm"
helm.sh/chart: "cluster-autoscaler-9.24.0"
helm.sh/chart: "cluster-autoscaler-9.29.1"
name: cluster-autoscaler-azure
namespace: kube-system
automountServiceAccountToken: true
@@ -39,7 +39,7 @@ metadata:
app.kubernetes.io/instance: "cluster-autoscaler-azure"
app.kubernetes.io/name: "azure-cluster-autoscaler"
app.kubernetes.io/managed-by: "Helm"
helm.sh/chart: "cluster-autoscaler-9.24.0"
helm.sh/chart: "cluster-autoscaler-9.29.1"
name: cluster-autoscaler-azure
rules:
- apiGroups:
@@ -180,7 +180,7 @@ metadata:
app.kubernetes.io/instance: "cluster-autoscaler-azure"
app.kubernetes.io/name: "azure-cluster-autoscaler"
app.kubernetes.io/managed-by: "Helm"
helm.sh/chart: "cluster-autoscaler-9.24.0"
helm.sh/chart: "cluster-autoscaler-9.29.1"
name: cluster-autoscaler-azure
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -199,7 +199,7 @@ metadata:
app.kubernetes.io/instance: "cluster-autoscaler-azure"
app.kubernetes.io/name: "azure-cluster-autoscaler"
app.kubernetes.io/managed-by: "Helm"
helm.sh/chart: "cluster-autoscaler-9.24.0"
helm.sh/chart: "cluster-autoscaler-9.29.1"
name: cluster-autoscaler-azure
namespace: kube-system
rules:
@@ -228,7 +228,7 @@ metadata:
app.kubernetes.io/instance: "cluster-autoscaler-azure"
app.kubernetes.io/name: "azure-cluster-autoscaler"
app.kubernetes.io/managed-by: "Helm"
helm.sh/chart: "cluster-autoscaler-9.24.0"
helm.sh/chart: "cluster-autoscaler-9.29.1"
name: cluster-autoscaler-azure
namespace: kube-system
roleRef:
@@ -248,7 +248,7 @@ metadata:
app.kubernetes.io/instance: "cluster-autoscaler-azure"
app.kubernetes.io/name: "azure-cluster-autoscaler"
app.kubernetes.io/managed-by: "Helm"
helm.sh/chart: "cluster-autoscaler-9.24.0"
helm.sh/chart: "cluster-autoscaler-9.29.1"
name: cluster-autoscaler-azure
namespace: kube-system
spec:
@@ -272,7 +272,7 @@ metadata:
app.kubernetes.io/instance: "cluster-autoscaler-azure"
app.kubernetes.io/name: "azure-cluster-autoscaler"
app.kubernetes.io/managed-by: "Helm"
helm.sh/chart: "cluster-autoscaler-9.24.0"
helm.sh/chart: "cluster-autoscaler-9.29.1"
name: cluster-autoscaler-azure
namespace: kube-system
spec:
@@ -291,7 +291,7 @@ spec:
dnsPolicy: "ClusterFirst"
containers:
- name: azure-cluster-autoscaler
image: "registry.k8s.io/autoscaling/cluster-autoscaler:v1.26.1"
image: "registry.k8s.io/autoscaling/cluster-autoscaler:v1.27.2"
imagePullPolicy: "IfNotPresent"
command:
- ./cluster-autoscaler
@@ -334,4 +334,4 @@ spec:
volumes:
- name: cloud-config
secret:
secretName: azure-cluster-autoscaler
secretName: azure-managed-identity

View File

@@ -4,7 +4,7 @@
fullnameOverride: cluster-autoscaler-azure
image:
tag: v1.26.1
tag: v1.27.2
cloudProvider: azure
cloudConfigPath: /etc/azure/azure.json
@@ -21,7 +21,8 @@ extraArgs:
extraVolumeSecrets:
cloud-config:
name: azure-cluster-autoscaler
name: azure-managed-identity
# name: azure-cloud-controller-manager
mountPath: /etc/azure
priorityClassName: system-cluster-critical

View File

@@ -201,4 +201,4 @@ spec:
volumes:
- name: cloud-config
secret:
secretName: azure-cloud-controller-manager
secretName: azure-managed-identity

View File

@@ -1,92 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: azure-cloud-node-manager
name: azure-cloud-node-manager
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cloud-node-manager
labels:
k8s-app: cloud-node-manager
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["watch","list","get","update", "patch"]
- apiGroups: [""]
resources: ["nodes/status"]
verbs: ["patch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cloud-node-manager
labels:
k8s-app: cloud-node-manager
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cloud-node-manager
subjects:
- kind: ServiceAccount
name: azure-cloud-node-manager
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: azure-cloud-node-manager
namespace: kube-system
labels:
component: azure-cloud-node-manager
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: azure-cloud-node-manager
template:
metadata:
labels:
k8s-app: azure-cloud-node-manager
annotations:
cluster-autoscaler.kubernetes.io/daemonset-pod: "true"
spec:
priorityClassName: system-node-critical
serviceAccountName: azure-cloud-node-manager
hostNetwork: true # required to fetch correct hostname
nodeSelector:
node-role.kubernetes.io/control-plane: ""
node.cloudprovider.kubernetes.io/platform: azure
tolerations:
- key: "node.cloudprovider.kubernetes.io/uninitialized"
value: "true"
effect: "NoSchedule"
- key: "node-role.kubernetes.io/control-plane"
effect: NoSchedule
containers:
- name: cloud-node-manager
image: mcr.microsoft.com/oss/kubernetes/azure-cloud-node-manager:v1.26.0
imagePullPolicy: IfNotPresent
command:
- cloud-node-manager
- --node-name=$(NODE_NAME)
- --wait-routes=false
- --v=4
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
resources:
requests:
cpu: 50m
memory: 32Mi
limits:
cpu: 100m
memory: 64Mi

View File

@@ -173,4 +173,4 @@ spec:
volumes:
- name: cloud-config
secret:
secretName: azure-cloud-controller-manager
secretName: azure-managed-identity

View File

@@ -0,0 +1,697 @@
---
# Source: azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-controller.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-azuredisk-controller-sa
namespace: kube-system
labels:
app.kubernetes.io/instance: "azuredisk-csi-driver"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/name: "azuredisk-csi-driver"
app.kubernetes.io/version: "v1.28.0"
helm.sh/chart: "azuredisk-csi-driver-v1.28.0"
---
# Source: azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-node.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-azuredisk-node-sa
namespace: kube-system
labels:
app.kubernetes.io/instance: "azuredisk-csi-driver"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/name: "azuredisk-csi-driver"
app.kubernetes.io/version: "v1.28.0"
helm.sh/chart: "azuredisk-csi-driver-v1.28.0"
---
# Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-external-provisioner-role
labels:
app.kubernetes.io/instance: "azuredisk-csi-driver"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/name: "azuredisk-csi-driver"
app.kubernetes.io/version: "v1.28.0"
helm.sh/chart: "azuredisk-csi-driver-v1.28.0"
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create", "patch"]
---
# Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-external-attacher-role
labels:
app.kubernetes.io/instance: "azuredisk-csi-driver"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/name: "azuredisk-csi-driver"
app.kubernetes.io/version: "v1.28.0"
helm.sh/chart: "azuredisk-csi-driver-v1.28.0"
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["csi.storage.k8s.io"]
resources: ["csinodeinfos"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create", "patch"]
---
# Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-external-snapshotter-role
labels:
app.kubernetes.io/instance: "azuredisk-csi-driver"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/name: "azuredisk-csi-driver"
app.kubernetes.io/version: "v1.28.0"
helm.sh/chart: "azuredisk-csi-driver-v1.28.0"
rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update", "patch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create", "patch"]
---
# Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-external-resizer-role
labels:
app.kubernetes.io/instance: "azuredisk-csi-driver"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/name: "azuredisk-csi-driver"
app.kubernetes.io/version: "v1.28.0"
helm.sh/chart: "azuredisk-csi-driver-v1.28.0"
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["update", "patch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create", "patch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
---
# Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-azuredisk-controller-secret-role
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
---
# Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-azuredisk-node-role
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get"]
---
# Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-csi-provisioner-binding
labels:
app.kubernetes.io/instance: "azuredisk-csi-driver"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/name: "azuredisk-csi-driver"
app.kubernetes.io/version: "v1.28.0"
helm.sh/chart: "azuredisk-csi-driver-v1.28.0"
subjects:
- kind: ServiceAccount
name: csi-azuredisk-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: azuredisk-external-provisioner-role
apiGroup: rbac.authorization.k8s.io
---
# Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-csi-attacher-binding
labels:
app.kubernetes.io/instance: "azuredisk-csi-driver"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/name: "azuredisk-csi-driver"
app.kubernetes.io/version: "v1.28.0"
helm.sh/chart: "azuredisk-csi-driver-v1.28.0"
subjects:
- kind: ServiceAccount
name: csi-azuredisk-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: azuredisk-external-attacher-role
apiGroup: rbac.authorization.k8s.io
---
# Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-csi-snapshotter-binding
labels:
app.kubernetes.io/instance: "azuredisk-csi-driver"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/name: "azuredisk-csi-driver"
app.kubernetes.io/version: "v1.28.0"
helm.sh/chart: "azuredisk-csi-driver-v1.28.0"
subjects:
- kind: ServiceAccount
name: csi-azuredisk-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: azuredisk-external-snapshotter-role
apiGroup: rbac.authorization.k8s.io
---
# Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: azuredisk-csi-resizer-role
labels:
app.kubernetes.io/instance: "azuredisk-csi-driver"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/name: "azuredisk-csi-driver"
app.kubernetes.io/version: "v1.28.0"
helm.sh/chart: "azuredisk-csi-driver-v1.28.0"
subjects:
- kind: ServiceAccount
name: csi-azuredisk-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: azuredisk-external-resizer-role
apiGroup: rbac.authorization.k8s.io
---
# Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-azuredisk-controller-secret-binding
subjects:
- kind: ServiceAccount
name: csi-azuredisk-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-azuredisk-controller-secret-role
apiGroup: rbac.authorization.k8s.io
---
# Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-azuredisk-node-secret-binding
subjects:
- kind: ServiceAccount
name: csi-azuredisk-node-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-azuredisk-node-role
apiGroup: rbac.authorization.k8s.io
---
# Source: azuredisk-csi-driver/templates/csi-azuredisk-node.yaml
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: csi-azuredisk-node
namespace: kube-system
labels:
app.kubernetes.io/instance: "azuredisk-csi-driver"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/name: "azuredisk-csi-driver"
app.kubernetes.io/version: "v1.28.0"
helm.sh/chart: "azuredisk-csi-driver-v1.28.0"
spec:
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
app: csi-azuredisk-node
template:
metadata:
labels:
app.kubernetes.io/instance: "azuredisk-csi-driver"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/name: "azuredisk-csi-driver"
app.kubernetes.io/version: "v1.28.0"
helm.sh/chart: "azuredisk-csi-driver-v1.28.0"
app: csi-azuredisk-node
spec:
hostNetwork: true
dnsPolicy: Default
serviceAccountName: csi-azuredisk-node-sa
nodeSelector:
kubernetes.io/os: linux
node.cloudprovider.kubernetes.io/platform: azure
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: type
operator: NotIn
values:
- virtual-kubelet
priorityClassName: system-node-critical
securityContext:
seccompProfile:
type: RuntimeDefault
tolerations:
- operator: Exists
containers:
- name: liveness-probe
volumeMounts:
- mountPath: /csi
name: socket-dir
image: "mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.10.0"
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
- --health-port=29603
- --v=2
resources:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
- name: node-driver-registrar
image: "mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.8.0"
args:
- --csi-address=$(ADDRESS)
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --v=2
livenessProbe:
exec:
command:
- /csi-node-driver-registrar
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --mode=kubelet-registration-probe
initialDelaySeconds: 30
timeoutSeconds: 15
env:
- name: ADDRESS
value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/disk.csi.azure.com/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
resources:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
- name: azuredisk
image: "mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.28.0"
args:
- "--v=5"
- "--endpoint=$(CSI_ENDPOINT)"
- "--nodeid=$(KUBE_NODE_NAME)"
- "--enable-perf-optimization=true"
- "--drivername=disk.csi.azure.com"
- "--volume-attach-limit=-1"
# - "--cloud-config-secret-name=azure-cloud-provider"
# - "--cloud-config-secret-namespace=kube-system"
- "--custom-user-agent="
- "--user-agent-suffix=OSS-helm"
- "--allow-empty-cloud-config=true"
- "--support-zone=true"
- "--get-node-info-from-labels=false"
- "--get-nodeid-from-imds=false"
ports:
- containerPort: 29603
name: healthz
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 30
timeoutSeconds: 10
periodSeconds: 30
env:
- name: AZURE_CREDENTIAL_FILE
value: /etc/azure/azure.json
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
imagePullPolicy: IfNotPresent
securityContext:
privileged: true
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/lib/kubelet/
mountPropagation: Bidirectional
name: mountpoint-dir
- name: cloud-config
mountPath: /etc/azure
readOnly: true
- mountPath: /dev
name: device-dir
- mountPath: /sys/bus/scsi/devices
name: sys-devices-dir
- mountPath: /sys/class/
name: sys-class
resources:
limits:
memory: 200Mi
requests:
cpu: 10m
memory: 20Mi
volumes:
- hostPath:
path: /var/lib/kubelet/plugins/disk.csi.azure.com
type: DirectoryOrCreate
name: socket-dir
- hostPath:
path: /var/lib/kubelet/
type: DirectoryOrCreate
name: mountpoint-dir
- hostPath:
path: /var/lib/kubelet/plugins_registry/
type: DirectoryOrCreate
name: registration-dir
- name: cloud-config
secret:
secretName: azure-managed-identity
- hostPath:
path: /dev
type: Directory
name: device-dir
- hostPath:
path: /sys/bus/scsi/devices
type: Directory
name: sys-devices-dir
- hostPath:
path: /sys/class/
type: Directory
name: sys-class
---
# Source: azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
name: csi-azuredisk-controller
namespace: kube-system
labels:
app.kubernetes.io/instance: "azuredisk-csi-driver"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/name: "azuredisk-csi-driver"
app.kubernetes.io/version: "v1.28.0"
helm.sh/chart: "azuredisk-csi-driver-v1.28.0"
spec:
replicas: 1
selector:
matchLabels:
app: csi-azuredisk-controller
template:
metadata:
labels:
app.kubernetes.io/instance: "azuredisk-csi-driver"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/name: "azuredisk-csi-driver"
app.kubernetes.io/version: "v1.28.0"
helm.sh/chart: "azuredisk-csi-driver-v1.28.0"
app: csi-azuredisk-controller
spec:
hostNetwork: true
serviceAccountName: csi-azuredisk-controller-sa
nodeSelector:
kubernetes.io/os: linux
node-role.kubernetes.io/control-plane: ""
node.cloudprovider.kubernetes.io/platform: azure
priorityClassName: system-cluster-critical
securityContext:
seccompProfile:
type: RuntimeDefault
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
containers:
- name: csi-provisioner
image: "mcr.microsoft.com/oss/kubernetes-csi/csi-provisioner:v3.5.0"
args:
- "--feature-gates=Topology=true"
- "--csi-address=$(ADDRESS)"
- "--v=2"
- "--timeout=30s"
- "--leader-election"
- "--leader-election-namespace=kube-system"
- "--worker-threads=100"
- "--extra-create-metadata=true"
- "--strict-topology=true"
- "--kube-api-qps=50"
- "--kube-api-burst=100"
env:
- name: ADDRESS
value: /csi/csi.sock
volumeMounts:
- mountPath: /csi
name: socket-dir
resources:
limits:
memory: 500Mi
requests:
cpu: 10m
memory: 20Mi
- name: csi-attacher
image: "mcr.microsoft.com/oss/kubernetes-csi/csi-attacher:v4.3.0"
args:
- "-v=2"
- "-csi-address=$(ADDRESS)"
- "-timeout=1200s"
- "-leader-election"
- "--leader-election-namespace=kube-system"
- "-worker-threads=1000"
- "-kube-api-qps=200"
- "-kube-api-burst=400"
env:
- name: ADDRESS
value: /csi/csi.sock
volumeMounts:
- mountPath: /csi
name: socket-dir
resources:
limits:
memory: 500Mi
requests:
cpu: 10m
memory: 20Mi
- name: csi-snapshotter
image: "mcr.microsoft.com/oss/kubernetes-csi/csi-snapshotter:v6.2.2"
args:
- "-csi-address=$(ADDRESS)"
- "-leader-election"
- "--leader-election-namespace=kube-system"
- "-v=2"
env:
- name: ADDRESS
value: /csi/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
limits:
memory: 200Mi
requests:
cpu: 10m
memory: 20Mi
- name: csi-resizer
image: "mcr.microsoft.com/oss/kubernetes-csi/csi-resizer:v1.8.0"
args:
- "-csi-address=$(ADDRESS)"
- "-v=2"
- "-leader-election"
- "--leader-election-namespace=kube-system"
- '-handle-volume-inuse-error=false'
- '-feature-gates=RecoverVolumeExpansionFailure=true'
- "-timeout=240s"
env:
- name: ADDRESS
value: /csi/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
limits:
memory: 500Mi
requests:
cpu: 10m
memory: 20Mi
- name: liveness-probe
image: "mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.10.0"
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
- --health-port=29602
- --v=2
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
- name: azuredisk
image: "mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.28.0"
args:
- "--v=5"
- "--endpoint=$(CSI_ENDPOINT)"
- "--metrics-address=0.0.0.0:29604"
- "--disable-avset-nodes=false"
- "--vm-type=vmss"
- "--drivername=disk.csi.azure.com"
# - "--cloud-config-secret-name=azure-managed-identity"
# - "--cloud-config-secret-namespace=kube-system"
- "--custom-user-agent="
- "--user-agent-suffix=OSS-helm"
- "--allow-empty-cloud-config=true"
- "--vmss-cache-ttl-seconds=-1"
- "--enable-traffic-manager=false"
- "--traffic-manager-port=7788"
ports:
- containerPort: 29602
name: healthz
protocol: TCP
- containerPort: 29604
name: metrics
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 30
timeoutSeconds: 10
periodSeconds: 30
env:
- name: AZURE_CREDENTIAL_FILE
value: /etc/azure/azure.json
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /csi
name: socket-dir
- name: cloud-config
mountPath: /etc/azure
readOnly: true
resources:
limits:
memory: 500Mi
requests:
cpu: 10m
memory: 20Mi
volumes:
- name: socket-dir
emptyDir: {}
- name: cloud-config
secret:
secretName: azure-managed-identity
---
# Source: azuredisk-csi-driver/templates/csi-azuredisk-driver.yaml
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: disk.csi.azure.com
annotations:
csiDriver: "v1.28.0"
snapshot: "v6.2.2"
spec:
attachRequired: true
podInfoOnMount: false
fsGroupPolicy: File

View File

@@ -0,0 +1,25 @@
controller:
cloudConfigSecretName: azure-managed-identity
cloudConfigSecretNamespace: kube-system
replicas: 1
vmType: vmss
allowEmptyCloudConfig: true
nodeSelector:
node-role.kubernetes.io/control-plane: ""
node.cloudprovider.kubernetes.io/platform: azure
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
linux:
enabled: true
nodeSelector:
node.cloudprovider.kubernetes.io/platform: azure
windows:
enabled: false

View File

@@ -1,8 +1,6 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
storageclass.kubernetes.io/is-default-class: "false"
name: csi-azure-hdd-xfs
provisioner: kubernetes.io/azure-disk
parameters:
@@ -14,17 +12,10 @@ parameters:
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
# allowedTopologies:
# - matchLabelExpressions:
# - key: topology.disk.csi.azure.com/zone
# values:
# - azure
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
storageclass.kubernetes.io/is-default-class: "false"
name: csi-azure-ssd-xfs
provisioner: kubernetes.io/azure-disk
parameters:
@@ -36,28 +27,37 @@ parameters:
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
# allowedTopologies:
# - matchLabelExpressions:
# - key: topology.disk.csi.azure.com/zone
# values:
# - azure
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
storageclass.kubernetes.io/is-default-class: "false"
name: csi-azure-premium-xfs
provisioner: kubernetes.io/azure-disk
parameters:
kind: Managed
cachingMode: ReadOnly
fsType: xfs
skuName: Premium_LRS # available values: Standard_LRS, Premium_LRS, StandardSSD_LRS and UltraSSD_LRS
skuName: Premium_LRS
zoned: "true"
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: csi-azure-premium-2-xfs
provisioner: kubernetes.io/azure-disk
parameters:
fsType: xfs
kind: Managed
cachingMode: None
skuName: PremiumV2_LRS
perfProfile: Basic
# enableBursting: true
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
# allowedTopologies:
# - matchLabelExpressions:
# - key: topology.disk.csi.azure.com/zone

View File

@@ -22,6 +22,7 @@ spec:
run: overprovisioning
spec:
nodeSelector:
node.cloudprovider.kubernetes.io/platform: azure
project.io/node-pool: web
affinity:
podAntiAffinity:

View File

@@ -16,12 +16,12 @@ spec:
spec:
nodeSelector:
node.cloudprovider.kubernetes.io/platform: azure
# project.io/node-pool: worker
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
securityContext:
runAsNonRoot: true
runAsUser: 1000
runAsUser: 0
seccompProfile:
type: RuntimeDefault
containers:
@@ -31,11 +31,6 @@ spec:
volumeMounts:
- name: persistent-storage
mountPath: /mnt/azuredisk
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
updateStrategy:
type: RollingUpdate
selector:
@@ -49,4 +44,5 @@ spec:
resources:
requests:
storage: 10Gi
storageClassName: csi-azure-ssd-xfs
# storageClassName: csi-azure-ssd-xfs
storageClassName: csi-azure-premium-2-xfs

View File

@@ -72,7 +72,6 @@ resource "azurerm_role_definition" "csi" {
}
}
resource "azurerm_role_definition" "scaler" {
name = "kubernetes-node-autoscaler"
description = "This is a kubernetes role for node autoscaler system, created via Terraform"

View File

@@ -6,7 +6,7 @@ resource "azurerm_availability_set" "controlplane" {
resource_group_name = local.resource_group
platform_update_domain_count = 1
platform_fault_domain_count = 2
platform_fault_domain_count = 3
tags = merge(var.tags, { type = "infra" })
}
@@ -22,7 +22,7 @@ locals {
region : region
availability_set : azurerm_availability_set.controlplane[region].id
image : data.azurerm_shared_image_version.talos[startswith(lookup(try(var.controlplane[region], {}), "type", ""), "Standard_D2p") ? "Arm64" : "x64"].id
image : data.azurerm_shared_image_version.talos[length(regexall("^Standard_[DE][\\d+]p", lookup(try(var.controlplane[region], {}), "db_type", ""))) > 0 ? "Arm64" : "x64"].id
type : lookup(try(var.controlplane[region], {}), "type", "Standard_B2ms")
ip : 11 + inx
@@ -35,6 +35,12 @@ locals {
lbv4s = [for ip in flatten([for c in local.network_controlplane : c.controlplane_lb]) : ip if length(split(".", ip)) > 1]
lbv6s = [for ip in flatten([for c in local.network_controlplane : c.controlplane_lb]) : ip if length(split(":", ip)) > 1]
cpv4s = flatten([for cp in azurerm_network_interface.controlplane :
[for ip in cp.ip_configuration : ip.private_ip_address if ip.private_ip_address_version == "IPv4"]
])
cpv6s = flatten([for cp in azurerm_network_interface.controlplane :
[for ip in cp.ip_configuration : ip.private_ip_address if ip.private_ip_address_version == "IPv6"]
])
}
resource "azurerm_public_ip" "controlplane_v4" {
@@ -120,7 +126,7 @@ resource "local_file" "controlplane" {
azurerm_public_ip.controlplane_v4[each.key].ip_address,
])
ipAliases = compact(each.value.network.controlplane_lb)
nodeSubnets = [cidrsubnet(each.value.network.cidr[0], 1, 0), "!${each.value.network.controlplane_lb[0]}"]
nodeSubnets = [cidrsubnet(each.value.network.cidr[0], 1, 0)]
ccm = templatefile("${path.module}/deployments/azure.json.tpl", {
subscriptionId = local.subscription_id
@@ -187,10 +193,18 @@ resource "azurerm_linux_virtual_machine" "controlplane" {
}
resource "azurerm_role_assignment" "controlplane" {
for_each = local.controlplanes
for_each = { for k in flatten([
for cp in azurerm_linux_virtual_machine.controlplane : [
for role in var.controlplane_role_definition : {
name : "role-${cp.name}-${role}"
role : role
principal : cp.identity[0].principal_id
}
]
]) : k.name => k }
scope = "/subscriptions/${local.subscription_id}"
role_definition_name = var.controlplane_role_definition
principal_id = azurerm_linux_virtual_machine.controlplane[each.key].identity[0].principal_id
role_definition_name = each.value.role
principal_id = each.value.principal
}
locals {
@@ -203,18 +217,18 @@ resource "azurerm_private_dns_a_record" "controlplane" {
resource_group_name = local.resource_group
zone_name = each.key
ttl = 300
records = local.lbv4s
records = length(local.lbv4s) > 0 ? local.lbv4s : local.cpv4s
tags = merge(var.tags, { type = "infra" })
}
resource "azurerm_private_dns_aaaa_record" "controlplane" {
for_each = toset(values({ for zone, name in local.network : zone => name.dns if name.dns != "" && length(local.lbv6s) > 0 }))
for_each = toset(values({ for zone, name in local.network : zone => name.dns if name.dns != "" && length(local.cpv6s) > 0 }))
name = split(".", var.kubernetes["apiDomain"])[0]
resource_group_name = local.resource_group
zone_name = each.key
ttl = 300
records = local.lbv6s
records = length(local.lbv6s) > 0 ? local.lbv6s : local.cpv6s
tags = merge(var.tags, { type = "infra" })
}

View File

@@ -7,14 +7,18 @@ resource "azurerm_linux_virtual_machine_scale_set" "db" {
for_each = { for idx, name in local.regions : name => idx }
location = each.key
instances = lookup(try(var.instances[each.key], {}), "db_count", 0)
name = "db-${lower(each.key)}"
computer_name_prefix = "db-${lower(each.key)}-"
resource_group_name = local.resource_group
sku = lookup(try(var.instances[each.key], {}), "db_type", "Standard_B2s")
provision_vm_agent = false
overprovision = false
platform_fault_domain_count = 2
instances = lookup(try(var.instances[each.key], {}), "db_count", 0)
name = "db-${lower(each.key)}"
computer_name_prefix = "db-${lower(each.key)}-"
resource_group_name = local.resource_group
sku = lookup(try(var.instances[each.key], {}), "db_type", "Standard_B2s")
provision_vm_agent = false
overprovision = false
platform_fault_domain_count = 5
proximity_placement_group_id = azurerm_proximity_placement_group.common[each.key].id
# zone_balance = true
# zones = ["0", "1", "2"]
network_interface {
name = "db-${lower(each.key)}"
@@ -44,7 +48,7 @@ resource "azurerm_linux_virtual_machine_scale_set" "db" {
custom_data = base64encode(templatefile("${path.module}/templates/worker.yaml.tpl",
merge(var.kubernetes, {
lbv4 = local.network_controlplane[each.key].controlplane_lb[0]
lbv4 = try(local.network_controlplane[each.key].controlplane_lb[0], "")
labels = local.db_labels
nodeSubnets = [local.network_public[each.key].cidr[0]]
})
@@ -62,7 +66,7 @@ resource "azurerm_linux_virtual_machine_scale_set" "db" {
disk_size_gb = 50
}
source_image_id = data.azurerm_shared_image_version.talos[startswith(lookup(try(var.instances[each.key], {}), "db_type", ""), "Standard_D2p") ? "Arm64" : "x64"].id
source_image_id = data.azurerm_shared_image_version.talos[length(regexall("^Standard_[DE][\\d+]p", lookup(try(var.instances[each.key], {}), "db_type", ""))) > 0 ? "Arm64" : "x64"].id
# source_image_reference {
# publisher = "talos"
# offer = "Talos"

View File

@@ -14,9 +14,12 @@ resource "azurerm_linux_virtual_machine_scale_set" "web" {
sku = lookup(try(var.instances[each.key], {}), "web_type", "Standard_B2s")
provision_vm_agent = false
overprovision = false
platform_fault_domain_count = 2
platform_fault_domain_count = 5
proximity_placement_group_id = azurerm_proximity_placement_group.common[each.key].id
# zone_balance = false
# zones = ["1"]
# health_probe_id = local.network_public[each.key].sku != "Basic" ? azurerm_lb_probe.web[each.key].id : null
# automatic_instance_repair {
# enabled = local.network_public[each.key].sku != "Basic"
@@ -51,7 +54,7 @@ resource "azurerm_linux_virtual_machine_scale_set" "web" {
custom_data = base64encode(templatefile("${path.module}/templates/worker.yaml.tpl",
merge(var.kubernetes, {
lbv4 = local.network_controlplane[each.key].controlplane_lb[0]
lbv4 = try(local.network_controlplane[each.key].controlplane_lb[0], "")
labels = local.web_labels
nodeSubnets = [local.network_public[each.key].cidr[0]]
})
@@ -69,7 +72,7 @@ resource "azurerm_linux_virtual_machine_scale_set" "web" {
disk_size_gb = 50
}
source_image_id = data.azurerm_shared_image_version.talos[startswith(lookup(try(var.instances[each.key], {}), "worker_type", ""), "Standard_D2p") ? "Arm64" : "x64"].id
source_image_id = data.azurerm_shared_image_version.talos[length(regexall("^Standard_[DE][\\d+]p", lookup(try(var.instances[each.key], {}), "web_type", ""))) > 0 ? "Arm64" : "x64"].id
# source_image_reference {
# publisher = "talos"
# offer = "Talos"

View File

@@ -14,9 +14,29 @@ resource "azurerm_linux_virtual_machine_scale_set" "worker" {
sku = lookup(try(var.instances[each.key], {}), "worker_type", "Standard_B2s")
provision_vm_agent = false
overprovision = false
platform_fault_domain_count = 2
platform_fault_domain_count = 5
proximity_placement_group_id = azurerm_proximity_placement_group.common[each.key].id
# zone_balance = false
# zones = ["1"]
# extension_operations_enabled = true
# extension {
# name = "KubeletHealth"
# publisher = "Microsoft.ManagedServices"
# type = "ApplicationHealthLinux"
# type_handler_version = "1.0"
# auto_upgrade_minor_version = false
# settings = jsonencode({
# protocol : "http"
# port : "10248"
# requestPath : "/healthz"
# intervalInSeconds : 60
# numberOfProbes : 3
# })
# }
network_interface {
name = "worker-${lower(each.key)}"
primary = true
@@ -46,7 +66,7 @@ resource "azurerm_linux_virtual_machine_scale_set" "worker" {
custom_data = base64encode(templatefile("${path.module}/templates/worker.yaml.tpl",
merge(var.kubernetes, {
lbv4 = local.network_controlplane[each.key].controlplane_lb[0]
lbv4 = try(local.network_controlplane[each.key].controlplane_lb[0], "")
labels = local.worker_labels
nodeSubnets = [local.network_private[each.key].cidr[0]]
})
@@ -72,7 +92,7 @@ resource "azurerm_linux_virtual_machine_scale_set" "worker" {
}
}
source_image_id = data.azurerm_shared_image_version.talos[startswith(lookup(try(var.instances[each.key], {}), "worker_type", ""), "Standard_D2p") ? "Arm64" : "x64"].id
source_image_id = data.azurerm_shared_image_version.talos[length(regexall("^Standard_[DE][\\d+]p", lookup(try(var.instances[each.key], {}), "worker_type", ""))) > 0 ? "Arm64" : "x64"].id
# source_image_reference {
# publisher = "talos"
# offer = "Talos"

View File

@@ -79,8 +79,8 @@ resource "azurerm_linux_virtual_machine" "router" {
source_image_reference {
publisher = "Debian"
offer = "debian-11"
sku = "11-gen2"
offer = "debian-12"
sku = "12-gen2"
version = "latest"
}

View File

@@ -1,6 +1,6 @@
resource "azurerm_lb" "controlplane" {
for_each = { for idx, name in var.regions : name => idx }
for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].network_lb_enable, false) }
location = each.key
name = "controlplane-${each.key}"
resource_group_name = var.resource_group
@@ -22,7 +22,7 @@ resource "azurerm_lb" "controlplane" {
}
resource "azurerm_lb_probe" "controlplane" {
for_each = { for idx, name in var.regions : name => idx }
for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].network_lb_enable, false) }
name = "controlplane-tcp-probe"
loadbalancer_id = azurerm_lb.controlplane[each.key].id
interval_in_seconds = 30
@@ -31,19 +31,19 @@ resource "azurerm_lb_probe" "controlplane" {
}
resource "azurerm_lb_backend_address_pool" "controlplane_v4" {
for_each = { for idx, name in var.regions : name => idx }
for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].network_lb_enable, false) }
loadbalancer_id = azurerm_lb.controlplane[each.key].id
name = "controlplane-pool-v4"
}
resource "azurerm_lb_backend_address_pool" "controlplane_v6" {
for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].network_lb_sku, "Basic") != "Basic" }
for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].network_lb_enable, false) && try(var.capabilities[name].network_lb_sku, "Basic") != "Basic" }
loadbalancer_id = azurerm_lb.controlplane[each.key].id
name = "controlplane-pool-v6"
}
resource "azurerm_lb_rule" "kubernetes_v4" {
for_each = { for idx, name in var.regions : name => idx }
for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].network_lb_enable, false) }
name = "controlplane-v4"
loadbalancer_id = azurerm_lb.controlplane[each.key].id
frontend_ip_configuration_name = "controlplane-lb-v4"
@@ -57,7 +57,7 @@ resource "azurerm_lb_rule" "kubernetes_v4" {
}
resource "azurerm_lb_rule" "kubernetes_v6" {
for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].network_lb_sku, "Basic") != "Basic" }
for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].network_lb_enable, false) && try(var.capabilities[name].network_lb_sku, "Basic") != "Basic" }
name = "controlplane-v6"
loadbalancer_id = azurerm_lb.controlplane[each.key].id
frontend_ip_configuration_name = "controlplane-lb-v6"
@@ -71,7 +71,7 @@ resource "azurerm_lb_rule" "kubernetes_v6" {
}
resource "azurerm_lb_rule" "talos" {
for_each = { for idx, name in var.regions : name => idx }
for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].network_lb_enable, false) }
name = "controlplane-talos-v4"
loadbalancer_id = azurerm_lb.controlplane[each.key].id
frontend_ip_configuration_name = "controlplane-lb-v4"
@@ -85,7 +85,7 @@ resource "azurerm_lb_rule" "talos" {
}
resource "azurerm_lb_rule" "talos_v6" {
for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].network_lb_sku, "Basic") != "Basic" }
for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].network_lb_enable, false) && try(var.capabilities[name].network_lb_sku, "Basic") != "Basic" }
name = "controlplane-talos-v6"
loadbalancer_id = azurerm_lb.controlplane[each.key].id
frontend_ip_configuration_name = "controlplane-lb-v6"

View File

@@ -79,6 +79,8 @@ resource "azurerm_virtual_network_peering" "peering" {
allow_virtual_network_access = true
allow_forwarded_traffic = true
allow_gateway_transit = false
depends_on = [azurerm_virtual_network.main]
}
resource "azurerm_route_table" "main" {

View File

@@ -16,50 +16,50 @@ output "resource_group" {
output "network" {
description = "The network"
value = { for zone, net in azurerm_virtual_network.main : zone => {
value = { for region, net in azurerm_virtual_network.main : region => {
name = net.name
nat = try(azurerm_public_ip.nat[zone].ip_address, "")
nat = try(azurerm_public_ip.nat[region].ip_address, "")
dns = try(azurerm_private_dns_zone.main[0].name, "")
peering = try(azurerm_linux_virtual_machine.router[zone].private_ip_addresses, [])
peering = try(azurerm_linux_virtual_machine.router[region].private_ip_addresses, [])
} }
}
output "network_controlplane" {
description = "The controlplane network"
value = { for zone, subnet in azurerm_subnet.controlplane : zone => {
value = { for region, subnet in azurerm_subnet.controlplane : region => {
network_id = subnet.id
cidr = subnet.address_prefixes
sku = azurerm_lb.controlplane[zone].sku
controlplane_pool_v4 = try(azurerm_lb_backend_address_pool.controlplane_v4[zone].id, "")
controlplane_pool_v6 = try(azurerm_lb_backend_address_pool.controlplane_v6[zone].id, "")
controlplane_lb = azurerm_lb.controlplane[zone].private_ip_addresses
sku = try(var.capabilities[region].network_lb_sku, "Basic")
controlplane_pool_v4 = try(var.capabilities[region].network_lb_enable, false) ? try(azurerm_lb_backend_address_pool.controlplane_v4[region].id, "") : ""
controlplane_pool_v6 = try(var.capabilities[region].network_lb_enable, false) ? try(azurerm_lb_backend_address_pool.controlplane_v6[region].id, "") : ""
controlplane_lb = try(var.capabilities[region].network_lb_enable, false) ? azurerm_lb.controlplane[region].private_ip_addresses : []
} }
}
output "network_public" {
description = "The public network"
value = { for zone, subnet in azurerm_subnet.public : zone => {
value = { for region, subnet in azurerm_subnet.public : region => {
network_id = subnet.id
cidr = subnet.address_prefixes
sku = var.capabilities[zone].network_gw_sku
sku = var.capabilities[region].network_gw_sku
} }
}
output "network_private" {
description = "The private network"
value = { for zone, subnet in azurerm_subnet.private : zone => {
value = { for region, subnet in azurerm_subnet.private : region => {
network_id = subnet.id
cidr = subnet.address_prefixes
nat = try(azurerm_public_ip.nat[zone].ip_address, "")
sku = try(azurerm_public_ip.nat[zone].ip_address, "") == "" ? "Standard" : var.capabilities[zone].network_gw_sku
nat = try(azurerm_public_ip.nat[region].ip_address, "")
sku = try(azurerm_public_ip.nat[region].ip_address, "") == "" ? "Standard" : var.capabilities[region].network_gw_sku
} }
}
output "secgroups" {
description = "List of secgroups"
value = { for zone, subnet in azurerm_subnet.private : zone => {
common = azurerm_network_security_group.common[zone].id
controlplane = azurerm_network_security_group.controlplane[zone].id
web = azurerm_network_security_group.web[zone].id
value = { for region, subnet in azurerm_subnet.private : region => {
common = azurerm_network_security_group.common[region].id
controlplane = azurerm_network_security_group.controlplane[region].id
web = azurerm_network_security_group.web[region].id
} }
}

View File

@@ -72,6 +72,7 @@ variable "capabilities" {
},
"uksouth" = {
network_nat_enable = false,
network_lb_enable = false
network_lb_sku = "Basic", # Standard
network_gw_enable = false,
network_gw_type = "Standard_B1s",
@@ -79,6 +80,7 @@ variable "capabilities" {
},
"ukwest" = {
network_nat_enable = false,
network_lb_enable = false
network_lb_sku = "Basic",
network_gw_enable = false,
network_gw_type = "Standard_B1s",

View File

@@ -19,8 +19,10 @@ machine:
routes:
- network: ::/0
gateway: fe80::1234:5678:9abc
%{if length(ipAliases) > 0 }
- interface: lo
addresses: ${format("%#v",ipAliases)}
%{endif}
- interface: dummy0
addresses:
- 169.254.2.53/32
@@ -85,13 +87,13 @@ cluster:
election-timeout: "5000"
heartbeat-interval: "1000"
inlineManifests:
- name: azure-cloud-controller-config
- name: azure-managed-identity
contents: |-
apiVersion: v1
kind: Secret
type: Opaque
metadata:
name: azure-cloud-controller-manager
name: azure-managed-identity
namespace: kube-system
data:
azure.json: ${base64encode(ccm)}
@@ -100,9 +102,8 @@ cluster:
manifests:
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/talos-cloud-controller-manager-result.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/azure/deployments/azure-cloud-controller-manager.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/azure/deployments/azure-csi-node.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/azure/deployments/azure-csi.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/azure/deployments/azure-storage.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/azure/deployments/azuredisk-csi-driver-result.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/azure/deployments/azuredisk-storage.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/metrics-server-result.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/local-path-storage-ns.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/local-path-storage-result.yaml

View File

@@ -28,10 +28,15 @@ machine:
- interface: dummy0
addresses:
- 169.254.2.53/32
%{if lbv4 != "" }
extraHostEntries:
- ip: ${lbv4}
aliases:
- ${apiDomain}
%{endif}
time:
servers:
- time.cloudflare.com
install:
wipe: false
sysctls:

View File

@@ -1,6 +1,6 @@
variable "controlplane_role_definition" {
default = "kubernetes-ccm"
default = ["kubernetes-ccm", "kubernetes-csi", "kubernetes-node-autoscaler"]
}
variable "gallery_name" {