mirror of
https://github.com/optim-enterprises-bv/terraform-talos.git
synced 2025-11-01 02:38:31 +00:00
update azure deployments
This commit is contained in:
@@ -38,11 +38,14 @@ create-deployments:
|
|||||||
helm template --namespace=kube-system -f deployments/azure-autoscaler.yaml cluster-autoscaler-azure \
|
helm template --namespace=kube-system -f deployments/azure-autoscaler.yaml cluster-autoscaler-azure \
|
||||||
autoscaler/cluster-autoscaler > deployments/azure-autoscaler-result.yaml
|
autoscaler/cluster-autoscaler > deployments/azure-autoscaler-result.yaml
|
||||||
|
|
||||||
|
helm template --namespace=kube-system -f deployments/azuredisk-csi-driver.yaml azuredisk-csi-driver \
|
||||||
|
azuredisk-csi-driver/azuredisk-csi-driver > deployments/azuredisk-csi-driver-result.yaml
|
||||||
|
|
||||||
create-network: ## Create networks
|
create-network: ## Create networks
|
||||||
cd prepare && terraform init && terraform apply -auto-approve
|
cd prepare && terraform init && terraform apply -auto-approve
|
||||||
|
|
||||||
create-controlplane-bootstrap:
|
create-controlplane-bootstrap:
|
||||||
talosctl --talosconfig _cfgs/talosconfig config endpoint ${CPFIRST}
|
talosctl --talosconfig _cfgs/talosconfig config endpoint ${ENDPOINT}
|
||||||
talosctl --talosconfig _cfgs/talosconfig --nodes ${CPFIRST} bootstrap
|
talosctl --talosconfig _cfgs/talosconfig --nodes ${CPFIRST} bootstrap
|
||||||
|
|
||||||
create-controlplane: ## Bootstrap controlplane
|
create-controlplane: ## Bootstrap controlplane
|
||||||
|
|||||||
@@ -37,7 +37,6 @@ kubectl -n kube-system create secret generic azure-csi --from-file=azure.json=_c
|
|||||||
* [Azure CSI](https://github.com/kubernetes-sigs/azuredisk-csi-driver)
|
* [Azure CSI](https://github.com/kubernetes-sigs/azuredisk-csi-driver)
|
||||||
* [Azure Node AutoScaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/azure/README.md)
|
* [Azure Node AutoScaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/azure/README.md)
|
||||||
* [cilium](https://github.com/cilium/cilium) 1.12.5
|
* [cilium](https://github.com/cilium/cilium) 1.12.5
|
||||||
* [kubelet-serving-cert-approver](https://github.com/alex1989hu/kubelet-serving-cert-approver)
|
|
||||||
* [metrics-server](https://github.com/kubernetes-sigs/metrics-server) 0.5.0
|
* [metrics-server](https://github.com/kubernetes-sigs/metrics-server) 0.5.0
|
||||||
* [rancher.io/local-path](https://github.com/rancher/local-path-provisioner) 0.0.19
|
* [rancher.io/local-path](https://github.com/rancher/local-path-provisioner) 0.0.19
|
||||||
* [ingress-nginx](https://kubernetes.github.io/ingress-nginx/) 4.4.2
|
* [ingress-nginx](https://kubernetes.github.io/ingress-nginx/) 4.4.2
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ metadata:
|
|||||||
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
||||||
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
||||||
app.kubernetes.io/managed-by: "Helm"
|
app.kubernetes.io/managed-by: "Helm"
|
||||||
helm.sh/chart: "cluster-autoscaler-9.24.0"
|
helm.sh/chart: "cluster-autoscaler-9.29.1"
|
||||||
name: cluster-autoscaler-azure
|
name: cluster-autoscaler-azure
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
spec:
|
spec:
|
||||||
@@ -26,7 +26,7 @@ metadata:
|
|||||||
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
||||||
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
||||||
app.kubernetes.io/managed-by: "Helm"
|
app.kubernetes.io/managed-by: "Helm"
|
||||||
helm.sh/chart: "cluster-autoscaler-9.24.0"
|
helm.sh/chart: "cluster-autoscaler-9.29.1"
|
||||||
name: cluster-autoscaler-azure
|
name: cluster-autoscaler-azure
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
automountServiceAccountToken: true
|
automountServiceAccountToken: true
|
||||||
@@ -39,7 +39,7 @@ metadata:
|
|||||||
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
||||||
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
||||||
app.kubernetes.io/managed-by: "Helm"
|
app.kubernetes.io/managed-by: "Helm"
|
||||||
helm.sh/chart: "cluster-autoscaler-9.24.0"
|
helm.sh/chart: "cluster-autoscaler-9.29.1"
|
||||||
name: cluster-autoscaler-azure
|
name: cluster-autoscaler-azure
|
||||||
rules:
|
rules:
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
@@ -180,7 +180,7 @@ metadata:
|
|||||||
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
||||||
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
||||||
app.kubernetes.io/managed-by: "Helm"
|
app.kubernetes.io/managed-by: "Helm"
|
||||||
helm.sh/chart: "cluster-autoscaler-9.24.0"
|
helm.sh/chart: "cluster-autoscaler-9.29.1"
|
||||||
name: cluster-autoscaler-azure
|
name: cluster-autoscaler-azure
|
||||||
roleRef:
|
roleRef:
|
||||||
apiGroup: rbac.authorization.k8s.io
|
apiGroup: rbac.authorization.k8s.io
|
||||||
@@ -199,7 +199,7 @@ metadata:
|
|||||||
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
||||||
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
||||||
app.kubernetes.io/managed-by: "Helm"
|
app.kubernetes.io/managed-by: "Helm"
|
||||||
helm.sh/chart: "cluster-autoscaler-9.24.0"
|
helm.sh/chart: "cluster-autoscaler-9.29.1"
|
||||||
name: cluster-autoscaler-azure
|
name: cluster-autoscaler-azure
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
rules:
|
rules:
|
||||||
@@ -228,7 +228,7 @@ metadata:
|
|||||||
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
||||||
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
||||||
app.kubernetes.io/managed-by: "Helm"
|
app.kubernetes.io/managed-by: "Helm"
|
||||||
helm.sh/chart: "cluster-autoscaler-9.24.0"
|
helm.sh/chart: "cluster-autoscaler-9.29.1"
|
||||||
name: cluster-autoscaler-azure
|
name: cluster-autoscaler-azure
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
roleRef:
|
roleRef:
|
||||||
@@ -248,7 +248,7 @@ metadata:
|
|||||||
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
||||||
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
||||||
app.kubernetes.io/managed-by: "Helm"
|
app.kubernetes.io/managed-by: "Helm"
|
||||||
helm.sh/chart: "cluster-autoscaler-9.24.0"
|
helm.sh/chart: "cluster-autoscaler-9.29.1"
|
||||||
name: cluster-autoscaler-azure
|
name: cluster-autoscaler-azure
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
spec:
|
spec:
|
||||||
@@ -272,7 +272,7 @@ metadata:
|
|||||||
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
||||||
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
||||||
app.kubernetes.io/managed-by: "Helm"
|
app.kubernetes.io/managed-by: "Helm"
|
||||||
helm.sh/chart: "cluster-autoscaler-9.24.0"
|
helm.sh/chart: "cluster-autoscaler-9.29.1"
|
||||||
name: cluster-autoscaler-azure
|
name: cluster-autoscaler-azure
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
spec:
|
spec:
|
||||||
@@ -291,7 +291,7 @@ spec:
|
|||||||
dnsPolicy: "ClusterFirst"
|
dnsPolicy: "ClusterFirst"
|
||||||
containers:
|
containers:
|
||||||
- name: azure-cluster-autoscaler
|
- name: azure-cluster-autoscaler
|
||||||
image: "registry.k8s.io/autoscaling/cluster-autoscaler:v1.26.1"
|
image: "registry.k8s.io/autoscaling/cluster-autoscaler:v1.27.2"
|
||||||
imagePullPolicy: "IfNotPresent"
|
imagePullPolicy: "IfNotPresent"
|
||||||
command:
|
command:
|
||||||
- ./cluster-autoscaler
|
- ./cluster-autoscaler
|
||||||
@@ -334,4 +334,4 @@ spec:
|
|||||||
volumes:
|
volumes:
|
||||||
- name: cloud-config
|
- name: cloud-config
|
||||||
secret:
|
secret:
|
||||||
secretName: azure-cluster-autoscaler
|
secretName: azure-managed-identity
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
fullnameOverride: cluster-autoscaler-azure
|
fullnameOverride: cluster-autoscaler-azure
|
||||||
image:
|
image:
|
||||||
tag: v1.26.1
|
tag: v1.27.2
|
||||||
|
|
||||||
cloudProvider: azure
|
cloudProvider: azure
|
||||||
cloudConfigPath: /etc/azure/azure.json
|
cloudConfigPath: /etc/azure/azure.json
|
||||||
@@ -21,7 +21,8 @@ extraArgs:
|
|||||||
|
|
||||||
extraVolumeSecrets:
|
extraVolumeSecrets:
|
||||||
cloud-config:
|
cloud-config:
|
||||||
name: azure-cluster-autoscaler
|
name: azure-managed-identity
|
||||||
|
# name: azure-cloud-controller-manager
|
||||||
mountPath: /etc/azure
|
mountPath: /etc/azure
|
||||||
|
|
||||||
priorityClassName: system-cluster-critical
|
priorityClassName: system-cluster-critical
|
||||||
|
|||||||
@@ -201,4 +201,4 @@ spec:
|
|||||||
volumes:
|
volumes:
|
||||||
- name: cloud-config
|
- name: cloud-config
|
||||||
secret:
|
secret:
|
||||||
secretName: azure-cloud-controller-manager
|
secretName: azure-managed-identity
|
||||||
|
|||||||
@@ -1,92 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: ServiceAccount
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
k8s-app: azure-cloud-node-manager
|
|
||||||
name: azure-cloud-node-manager
|
|
||||||
namespace: kube-system
|
|
||||||
---
|
|
||||||
kind: ClusterRole
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
metadata:
|
|
||||||
name: cloud-node-manager
|
|
||||||
labels:
|
|
||||||
k8s-app: cloud-node-manager
|
|
||||||
rules:
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["nodes"]
|
|
||||||
verbs: ["watch","list","get","update", "patch"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["nodes/status"]
|
|
||||||
verbs: ["patch"]
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRoleBinding
|
|
||||||
metadata:
|
|
||||||
name: cloud-node-manager
|
|
||||||
labels:
|
|
||||||
k8s-app: cloud-node-manager
|
|
||||||
kubernetes.io/cluster-service: "true"
|
|
||||||
addonmanager.kubernetes.io/mode: Reconcile
|
|
||||||
roleRef:
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
kind: ClusterRole
|
|
||||||
name: cloud-node-manager
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: azure-cloud-node-manager
|
|
||||||
namespace: kube-system
|
|
||||||
---
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: DaemonSet
|
|
||||||
metadata:
|
|
||||||
name: azure-cloud-node-manager
|
|
||||||
namespace: kube-system
|
|
||||||
labels:
|
|
||||||
component: azure-cloud-node-manager
|
|
||||||
kubernetes.io/cluster-service: "true"
|
|
||||||
addonmanager.kubernetes.io/mode: Reconcile
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
k8s-app: azure-cloud-node-manager
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
k8s-app: azure-cloud-node-manager
|
|
||||||
annotations:
|
|
||||||
cluster-autoscaler.kubernetes.io/daemonset-pod: "true"
|
|
||||||
spec:
|
|
||||||
priorityClassName: system-node-critical
|
|
||||||
serviceAccountName: azure-cloud-node-manager
|
|
||||||
hostNetwork: true # required to fetch correct hostname
|
|
||||||
nodeSelector:
|
|
||||||
node-role.kubernetes.io/control-plane: ""
|
|
||||||
node.cloudprovider.kubernetes.io/platform: azure
|
|
||||||
tolerations:
|
|
||||||
- key: "node.cloudprovider.kubernetes.io/uninitialized"
|
|
||||||
value: "true"
|
|
||||||
effect: "NoSchedule"
|
|
||||||
- key: "node-role.kubernetes.io/control-plane"
|
|
||||||
effect: NoSchedule
|
|
||||||
containers:
|
|
||||||
- name: cloud-node-manager
|
|
||||||
image: mcr.microsoft.com/oss/kubernetes/azure-cloud-node-manager:v1.26.0
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
command:
|
|
||||||
- cloud-node-manager
|
|
||||||
- --node-name=$(NODE_NAME)
|
|
||||||
- --wait-routes=false
|
|
||||||
- --v=4
|
|
||||||
env:
|
|
||||||
- name: NODE_NAME
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: spec.nodeName
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: 50m
|
|
||||||
memory: 32Mi
|
|
||||||
limits:
|
|
||||||
cpu: 100m
|
|
||||||
memory: 64Mi
|
|
||||||
@@ -173,4 +173,4 @@ spec:
|
|||||||
volumes:
|
volumes:
|
||||||
- name: cloud-config
|
- name: cloud-config
|
||||||
secret:
|
secret:
|
||||||
secretName: azure-cloud-controller-manager
|
secretName: azure-managed-identity
|
||||||
|
|||||||
697
azure/deployments/azuredisk-csi-driver-result.yaml
Normal file
697
azure/deployments/azuredisk-csi-driver-result.yaml
Normal file
@@ -0,0 +1,697 @@
|
|||||||
|
---
|
||||||
|
# Source: azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-controller.yaml
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: csi-azuredisk-controller-sa
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/instance: "azuredisk-csi-driver"
|
||||||
|
app.kubernetes.io/managed-by: "Helm"
|
||||||
|
app.kubernetes.io/name: "azuredisk-csi-driver"
|
||||||
|
app.kubernetes.io/version: "v1.28.0"
|
||||||
|
helm.sh/chart: "azuredisk-csi-driver-v1.28.0"
|
||||||
|
---
|
||||||
|
# Source: azuredisk-csi-driver/templates/serviceaccount-csi-azuredisk-node.yaml
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: csi-azuredisk-node-sa
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/instance: "azuredisk-csi-driver"
|
||||||
|
app.kubernetes.io/managed-by: "Helm"
|
||||||
|
app.kubernetes.io/name: "azuredisk-csi-driver"
|
||||||
|
app.kubernetes.io/version: "v1.28.0"
|
||||||
|
helm.sh/chart: "azuredisk-csi-driver-v1.28.0"
|
||||||
|
---
|
||||||
|
# Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml
|
||||||
|
kind: ClusterRole
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: azuredisk-external-provisioner-role
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/instance: "azuredisk-csi-driver"
|
||||||
|
app.kubernetes.io/managed-by: "Helm"
|
||||||
|
app.kubernetes.io/name: "azuredisk-csi-driver"
|
||||||
|
app.kubernetes.io/version: "v1.28.0"
|
||||||
|
helm.sh/chart: "azuredisk-csi-driver-v1.28.0"
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["persistentvolumes"]
|
||||||
|
verbs: ["get", "list", "watch", "create", "delete"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["persistentvolumeclaims"]
|
||||||
|
verbs: ["get", "list", "watch", "update"]
|
||||||
|
- apiGroups: ["storage.k8s.io"]
|
||||||
|
resources: ["storageclasses"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["events"]
|
||||||
|
verbs: ["get", "list", "watch", "create", "update", "patch"]
|
||||||
|
- apiGroups: ["storage.k8s.io"]
|
||||||
|
resources: ["csinodes"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["nodes"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||||
|
resources: ["volumesnapshots"]
|
||||||
|
verbs: ["get", "list"]
|
||||||
|
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||||
|
resources: ["volumesnapshotcontents"]
|
||||||
|
verbs: ["get", "list"]
|
||||||
|
- apiGroups: ["coordination.k8s.io"]
|
||||||
|
resources: ["leases"]
|
||||||
|
verbs: ["get", "watch", "list", "delete", "update", "create", "patch"]
|
||||||
|
---
|
||||||
|
# Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml
|
||||||
|
kind: ClusterRole
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: azuredisk-external-attacher-role
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/instance: "azuredisk-csi-driver"
|
||||||
|
app.kubernetes.io/managed-by: "Helm"
|
||||||
|
app.kubernetes.io/name: "azuredisk-csi-driver"
|
||||||
|
app.kubernetes.io/version: "v1.28.0"
|
||||||
|
helm.sh/chart: "azuredisk-csi-driver-v1.28.0"
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["persistentvolumes"]
|
||||||
|
verbs: ["get", "list", "watch", "update"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["nodes"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
- apiGroups: ["csi.storage.k8s.io"]
|
||||||
|
resources: ["csinodeinfos"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
- apiGroups: ["storage.k8s.io"]
|
||||||
|
resources: ["volumeattachments"]
|
||||||
|
verbs: ["get", "list", "watch", "update", "patch"]
|
||||||
|
- apiGroups: ["storage.k8s.io"]
|
||||||
|
resources: ["volumeattachments/status"]
|
||||||
|
verbs: ["get", "list", "watch", "update", "patch"]
|
||||||
|
- apiGroups: ["coordination.k8s.io"]
|
||||||
|
resources: ["leases"]
|
||||||
|
verbs: ["get", "watch", "list", "delete", "update", "create", "patch"]
|
||||||
|
---
|
||||||
|
# Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml
|
||||||
|
kind: ClusterRole
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: azuredisk-external-snapshotter-role
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/instance: "azuredisk-csi-driver"
|
||||||
|
app.kubernetes.io/managed-by: "Helm"
|
||||||
|
app.kubernetes.io/name: "azuredisk-csi-driver"
|
||||||
|
app.kubernetes.io/version: "v1.28.0"
|
||||||
|
helm.sh/chart: "azuredisk-csi-driver-v1.28.0"
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["events"]
|
||||||
|
verbs: ["list", "watch", "create", "update", "patch"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["secrets"]
|
||||||
|
verbs: ["get", "list"]
|
||||||
|
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||||
|
resources: ["volumesnapshotclasses"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||||
|
resources: ["volumesnapshotcontents"]
|
||||||
|
verbs: ["create", "get", "list", "watch", "update", "delete", "patch"]
|
||||||
|
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||||
|
resources: ["volumesnapshotcontents/status"]
|
||||||
|
verbs: ["update", "patch"]
|
||||||
|
- apiGroups: ["coordination.k8s.io"]
|
||||||
|
resources: ["leases"]
|
||||||
|
verbs: ["get", "watch", "list", "delete", "update", "create", "patch"]
|
||||||
|
---
|
||||||
|
# Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml
|
||||||
|
kind: ClusterRole
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: azuredisk-external-resizer-role
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/instance: "azuredisk-csi-driver"
|
||||||
|
app.kubernetes.io/managed-by: "Helm"
|
||||||
|
app.kubernetes.io/name: "azuredisk-csi-driver"
|
||||||
|
app.kubernetes.io/version: "v1.28.0"
|
||||||
|
helm.sh/chart: "azuredisk-csi-driver-v1.28.0"
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["persistentvolumes"]
|
||||||
|
verbs: ["get", "list", "watch", "update", "patch"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["persistentvolumeclaims"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["persistentvolumeclaims/status"]
|
||||||
|
verbs: ["update", "patch"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["events"]
|
||||||
|
verbs: ["list", "watch", "create", "update", "patch"]
|
||||||
|
- apiGroups: ["coordination.k8s.io"]
|
||||||
|
resources: ["leases"]
|
||||||
|
verbs: ["get", "watch", "list", "delete", "update", "create", "patch"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["pods"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
---
|
||||||
|
# Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml
|
||||||
|
kind: ClusterRole
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: csi-azuredisk-controller-secret-role
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["secrets"]
|
||||||
|
verbs: ["get"]
|
||||||
|
---
|
||||||
|
# Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml
|
||||||
|
kind: ClusterRole
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: csi-azuredisk-node-role
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["secrets"]
|
||||||
|
verbs: ["get"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["nodes"]
|
||||||
|
verbs: ["get"]
|
||||||
|
---
|
||||||
|
# Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: azuredisk-csi-provisioner-binding
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/instance: "azuredisk-csi-driver"
|
||||||
|
app.kubernetes.io/managed-by: "Helm"
|
||||||
|
app.kubernetes.io/name: "azuredisk-csi-driver"
|
||||||
|
app.kubernetes.io/version: "v1.28.0"
|
||||||
|
helm.sh/chart: "azuredisk-csi-driver-v1.28.0"
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: csi-azuredisk-controller-sa
|
||||||
|
namespace: kube-system
|
||||||
|
roleRef:
|
||||||
|
kind: ClusterRole
|
||||||
|
name: azuredisk-external-provisioner-role
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
---
|
||||||
|
# Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: azuredisk-csi-attacher-binding
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/instance: "azuredisk-csi-driver"
|
||||||
|
app.kubernetes.io/managed-by: "Helm"
|
||||||
|
app.kubernetes.io/name: "azuredisk-csi-driver"
|
||||||
|
app.kubernetes.io/version: "v1.28.0"
|
||||||
|
helm.sh/chart: "azuredisk-csi-driver-v1.28.0"
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: csi-azuredisk-controller-sa
|
||||||
|
namespace: kube-system
|
||||||
|
roleRef:
|
||||||
|
kind: ClusterRole
|
||||||
|
name: azuredisk-external-attacher-role
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
---
|
||||||
|
# Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: azuredisk-csi-snapshotter-binding
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/instance: "azuredisk-csi-driver"
|
||||||
|
app.kubernetes.io/managed-by: "Helm"
|
||||||
|
app.kubernetes.io/name: "azuredisk-csi-driver"
|
||||||
|
app.kubernetes.io/version: "v1.28.0"
|
||||||
|
helm.sh/chart: "azuredisk-csi-driver-v1.28.0"
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: csi-azuredisk-controller-sa
|
||||||
|
namespace: kube-system
|
||||||
|
roleRef:
|
||||||
|
kind: ClusterRole
|
||||||
|
name: azuredisk-external-snapshotter-role
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
---
|
||||||
|
# Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: azuredisk-csi-resizer-role
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/instance: "azuredisk-csi-driver"
|
||||||
|
app.kubernetes.io/managed-by: "Helm"
|
||||||
|
app.kubernetes.io/name: "azuredisk-csi-driver"
|
||||||
|
app.kubernetes.io/version: "v1.28.0"
|
||||||
|
helm.sh/chart: "azuredisk-csi-driver-v1.28.0"
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: csi-azuredisk-controller-sa
|
||||||
|
namespace: kube-system
|
||||||
|
roleRef:
|
||||||
|
kind: ClusterRole
|
||||||
|
name: azuredisk-external-resizer-role
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
---
|
||||||
|
# Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-controller.yaml
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: csi-azuredisk-controller-secret-binding
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: csi-azuredisk-controller-sa
|
||||||
|
namespace: kube-system
|
||||||
|
roleRef:
|
||||||
|
kind: ClusterRole
|
||||||
|
name: csi-azuredisk-controller-secret-role
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
---
|
||||||
|
# Source: azuredisk-csi-driver/templates/rbac-csi-azuredisk-node.yaml
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: csi-azuredisk-node-secret-binding
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: csi-azuredisk-node-sa
|
||||||
|
namespace: kube-system
|
||||||
|
roleRef:
|
||||||
|
kind: ClusterRole
|
||||||
|
name: csi-azuredisk-node-role
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
---
|
||||||
|
# Source: azuredisk-csi-driver/templates/csi-azuredisk-node.yaml
|
||||||
|
kind: DaemonSet
|
||||||
|
apiVersion: apps/v1
|
||||||
|
metadata:
|
||||||
|
name: csi-azuredisk-node
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/instance: "azuredisk-csi-driver"
|
||||||
|
app.kubernetes.io/managed-by: "Helm"
|
||||||
|
app.kubernetes.io/name: "azuredisk-csi-driver"
|
||||||
|
app.kubernetes.io/version: "v1.28.0"
|
||||||
|
helm.sh/chart: "azuredisk-csi-driver-v1.28.0"
|
||||||
|
spec:
|
||||||
|
updateStrategy:
|
||||||
|
rollingUpdate:
|
||||||
|
maxUnavailable: 1
|
||||||
|
type: RollingUpdate
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: csi-azuredisk-node
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/instance: "azuredisk-csi-driver"
|
||||||
|
app.kubernetes.io/managed-by: "Helm"
|
||||||
|
app.kubernetes.io/name: "azuredisk-csi-driver"
|
||||||
|
app.kubernetes.io/version: "v1.28.0"
|
||||||
|
helm.sh/chart: "azuredisk-csi-driver-v1.28.0"
|
||||||
|
app: csi-azuredisk-node
|
||||||
|
spec:
|
||||||
|
hostNetwork: true
|
||||||
|
dnsPolicy: Default
|
||||||
|
serviceAccountName: csi-azuredisk-node-sa
|
||||||
|
nodeSelector:
|
||||||
|
kubernetes.io/os: linux
|
||||||
|
node.cloudprovider.kubernetes.io/platform: azure
|
||||||
|
affinity:
|
||||||
|
nodeAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
nodeSelectorTerms:
|
||||||
|
- matchExpressions:
|
||||||
|
- key: type
|
||||||
|
operator: NotIn
|
||||||
|
values:
|
||||||
|
- virtual-kubelet
|
||||||
|
priorityClassName: system-node-critical
|
||||||
|
securityContext:
|
||||||
|
seccompProfile:
|
||||||
|
type: RuntimeDefault
|
||||||
|
tolerations:
|
||||||
|
- operator: Exists
|
||||||
|
containers:
|
||||||
|
- name: liveness-probe
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /csi
|
||||||
|
name: socket-dir
|
||||||
|
image: "mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.10.0"
|
||||||
|
args:
|
||||||
|
- --csi-address=/csi/csi.sock
|
||||||
|
- --probe-timeout=3s
|
||||||
|
- --health-port=29603
|
||||||
|
- --v=2
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: 100Mi
|
||||||
|
requests:
|
||||||
|
cpu: 10m
|
||||||
|
memory: 20Mi
|
||||||
|
- name: node-driver-registrar
|
||||||
|
image: "mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.8.0"
|
||||||
|
args:
|
||||||
|
- --csi-address=$(ADDRESS)
|
||||||
|
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
|
||||||
|
- --v=2
|
||||||
|
livenessProbe:
|
||||||
|
exec:
|
||||||
|
command:
|
||||||
|
- /csi-node-driver-registrar
|
||||||
|
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
|
||||||
|
- --mode=kubelet-registration-probe
|
||||||
|
initialDelaySeconds: 30
|
||||||
|
timeoutSeconds: 15
|
||||||
|
env:
|
||||||
|
- name: ADDRESS
|
||||||
|
value: /csi/csi.sock
|
||||||
|
- name: DRIVER_REG_SOCK_PATH
|
||||||
|
value: /var/lib/kubelet/plugins/disk.csi.azure.com/csi.sock
|
||||||
|
volumeMounts:
|
||||||
|
- name: socket-dir
|
||||||
|
mountPath: /csi
|
||||||
|
- name: registration-dir
|
||||||
|
mountPath: /registration
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: 100Mi
|
||||||
|
requests:
|
||||||
|
cpu: 10m
|
||||||
|
memory: 20Mi
|
||||||
|
- name: azuredisk
|
||||||
|
image: "mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.28.0"
|
||||||
|
args:
|
||||||
|
- "--v=5"
|
||||||
|
- "--endpoint=$(CSI_ENDPOINT)"
|
||||||
|
- "--nodeid=$(KUBE_NODE_NAME)"
|
||||||
|
- "--enable-perf-optimization=true"
|
||||||
|
- "--drivername=disk.csi.azure.com"
|
||||||
|
- "--volume-attach-limit=-1"
|
||||||
|
# - "--cloud-config-secret-name=azure-cloud-provider"
|
||||||
|
# - "--cloud-config-secret-namespace=kube-system"
|
||||||
|
- "--custom-user-agent="
|
||||||
|
- "--user-agent-suffix=OSS-helm"
|
||||||
|
- "--allow-empty-cloud-config=true"
|
||||||
|
- "--support-zone=true"
|
||||||
|
- "--get-node-info-from-labels=false"
|
||||||
|
- "--get-nodeid-from-imds=false"
|
||||||
|
ports:
|
||||||
|
- containerPort: 29603
|
||||||
|
name: healthz
|
||||||
|
protocol: TCP
|
||||||
|
livenessProbe:
|
||||||
|
failureThreshold: 5
|
||||||
|
httpGet:
|
||||||
|
path: /healthz
|
||||||
|
port: healthz
|
||||||
|
initialDelaySeconds: 30
|
||||||
|
timeoutSeconds: 10
|
||||||
|
periodSeconds: 30
|
||||||
|
env:
|
||||||
|
- name: AZURE_CREDENTIAL_FILE
|
||||||
|
value: /etc/azure/azure.json
|
||||||
|
- name: CSI_ENDPOINT
|
||||||
|
value: unix:///csi/csi.sock
|
||||||
|
- name: KUBE_NODE_NAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
apiVersion: v1
|
||||||
|
fieldPath: spec.nodeName
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /csi
|
||||||
|
name: socket-dir
|
||||||
|
- mountPath: /var/lib/kubelet/
|
||||||
|
mountPropagation: Bidirectional
|
||||||
|
name: mountpoint-dir
|
||||||
|
- name: cloud-config
|
||||||
|
mountPath: /etc/azure
|
||||||
|
readOnly: true
|
||||||
|
- mountPath: /dev
|
||||||
|
name: device-dir
|
||||||
|
- mountPath: /sys/bus/scsi/devices
|
||||||
|
name: sys-devices-dir
|
||||||
|
- mountPath: /sys/class/
|
||||||
|
name: sys-class
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: 200Mi
|
||||||
|
requests:
|
||||||
|
cpu: 10m
|
||||||
|
memory: 20Mi
|
||||||
|
volumes:
|
||||||
|
- hostPath:
|
||||||
|
path: /var/lib/kubelet/plugins/disk.csi.azure.com
|
||||||
|
type: DirectoryOrCreate
|
||||||
|
name: socket-dir
|
||||||
|
- hostPath:
|
||||||
|
path: /var/lib/kubelet/
|
||||||
|
type: DirectoryOrCreate
|
||||||
|
name: mountpoint-dir
|
||||||
|
- hostPath:
|
||||||
|
path: /var/lib/kubelet/plugins_registry/
|
||||||
|
type: DirectoryOrCreate
|
||||||
|
name: registration-dir
|
||||||
|
- name: cloud-config
|
||||||
|
secret:
|
||||||
|
secretName: azure-managed-identity
|
||||||
|
- hostPath:
|
||||||
|
path: /dev
|
||||||
|
type: Directory
|
||||||
|
name: device-dir
|
||||||
|
- hostPath:
|
||||||
|
path: /sys/bus/scsi/devices
|
||||||
|
type: Directory
|
||||||
|
name: sys-devices-dir
|
||||||
|
- hostPath:
|
||||||
|
path: /sys/class/
|
||||||
|
type: Directory
|
||||||
|
name: sys-class
|
||||||
|
---
|
||||||
|
# Source: azuredisk-csi-driver/templates/csi-azuredisk-controller.yaml
|
||||||
|
kind: Deployment
|
||||||
|
apiVersion: apps/v1
|
||||||
|
metadata:
|
||||||
|
name: csi-azuredisk-controller
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/instance: "azuredisk-csi-driver"
|
||||||
|
app.kubernetes.io/managed-by: "Helm"
|
||||||
|
app.kubernetes.io/name: "azuredisk-csi-driver"
|
||||||
|
app.kubernetes.io/version: "v1.28.0"
|
||||||
|
helm.sh/chart: "azuredisk-csi-driver-v1.28.0"
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: csi-azuredisk-controller
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/instance: "azuredisk-csi-driver"
|
||||||
|
app.kubernetes.io/managed-by: "Helm"
|
||||||
|
app.kubernetes.io/name: "azuredisk-csi-driver"
|
||||||
|
app.kubernetes.io/version: "v1.28.0"
|
||||||
|
helm.sh/chart: "azuredisk-csi-driver-v1.28.0"
|
||||||
|
app: csi-azuredisk-controller
|
||||||
|
spec:
|
||||||
|
hostNetwork: true
|
||||||
|
serviceAccountName: csi-azuredisk-controller-sa
|
||||||
|
nodeSelector:
|
||||||
|
kubernetes.io/os: linux
|
||||||
|
node-role.kubernetes.io/control-plane: ""
|
||||||
|
node.cloudprovider.kubernetes.io/platform: azure
|
||||||
|
priorityClassName: system-cluster-critical
|
||||||
|
securityContext:
|
||||||
|
seccompProfile:
|
||||||
|
type: RuntimeDefault
|
||||||
|
tolerations:
|
||||||
|
- effect: NoSchedule
|
||||||
|
key: node-role.kubernetes.io/control-plane
|
||||||
|
containers:
|
||||||
|
- name: csi-provisioner
|
||||||
|
image: "mcr.microsoft.com/oss/kubernetes-csi/csi-provisioner:v3.5.0"
|
||||||
|
args:
|
||||||
|
- "--feature-gates=Topology=true"
|
||||||
|
- "--csi-address=$(ADDRESS)"
|
||||||
|
- "--v=2"
|
||||||
|
- "--timeout=30s"
|
||||||
|
- "--leader-election"
|
||||||
|
- "--leader-election-namespace=kube-system"
|
||||||
|
- "--worker-threads=100"
|
||||||
|
- "--extra-create-metadata=true"
|
||||||
|
- "--strict-topology=true"
|
||||||
|
- "--kube-api-qps=50"
|
||||||
|
- "--kube-api-burst=100"
|
||||||
|
env:
|
||||||
|
- name: ADDRESS
|
||||||
|
value: /csi/csi.sock
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /csi
|
||||||
|
name: socket-dir
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: 500Mi
|
||||||
|
requests:
|
||||||
|
cpu: 10m
|
||||||
|
memory: 20Mi
|
||||||
|
- name: csi-attacher
|
||||||
|
image: "mcr.microsoft.com/oss/kubernetes-csi/csi-attacher:v4.3.0"
|
||||||
|
args:
|
||||||
|
- "-v=2"
|
||||||
|
- "-csi-address=$(ADDRESS)"
|
||||||
|
- "-timeout=1200s"
|
||||||
|
- "-leader-election"
|
||||||
|
- "--leader-election-namespace=kube-system"
|
||||||
|
- "-worker-threads=1000"
|
||||||
|
- "-kube-api-qps=200"
|
||||||
|
- "-kube-api-burst=400"
|
||||||
|
env:
|
||||||
|
- name: ADDRESS
|
||||||
|
value: /csi/csi.sock
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /csi
|
||||||
|
name: socket-dir
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: 500Mi
|
||||||
|
requests:
|
||||||
|
cpu: 10m
|
||||||
|
memory: 20Mi
|
||||||
|
- name: csi-snapshotter
|
||||||
|
image: "mcr.microsoft.com/oss/kubernetes-csi/csi-snapshotter:v6.2.2"
|
||||||
|
args:
|
||||||
|
- "-csi-address=$(ADDRESS)"
|
||||||
|
- "-leader-election"
|
||||||
|
- "--leader-election-namespace=kube-system"
|
||||||
|
- "-v=2"
|
||||||
|
env:
|
||||||
|
- name: ADDRESS
|
||||||
|
value: /csi/csi.sock
|
||||||
|
volumeMounts:
|
||||||
|
- name: socket-dir
|
||||||
|
mountPath: /csi
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: 200Mi
|
||||||
|
requests:
|
||||||
|
cpu: 10m
|
||||||
|
memory: 20Mi
|
||||||
|
- name: csi-resizer
|
||||||
|
image: "mcr.microsoft.com/oss/kubernetes-csi/csi-resizer:v1.8.0"
|
||||||
|
args:
|
||||||
|
- "-csi-address=$(ADDRESS)"
|
||||||
|
- "-v=2"
|
||||||
|
- "-leader-election"
|
||||||
|
- "--leader-election-namespace=kube-system"
|
||||||
|
- '-handle-volume-inuse-error=false'
|
||||||
|
- '-feature-gates=RecoverVolumeExpansionFailure=true'
|
||||||
|
- "-timeout=240s"
|
||||||
|
env:
|
||||||
|
- name: ADDRESS
|
||||||
|
value: /csi/csi.sock
|
||||||
|
volumeMounts:
|
||||||
|
- name: socket-dir
|
||||||
|
mountPath: /csi
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: 500Mi
|
||||||
|
requests:
|
||||||
|
cpu: 10m
|
||||||
|
memory: 20Mi
|
||||||
|
- name: liveness-probe
|
||||||
|
image: "mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.10.0"
|
||||||
|
args:
|
||||||
|
- --csi-address=/csi/csi.sock
|
||||||
|
- --probe-timeout=3s
|
||||||
|
- --health-port=29602
|
||||||
|
- --v=2
|
||||||
|
volumeMounts:
|
||||||
|
- name: socket-dir
|
||||||
|
mountPath: /csi
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: 100Mi
|
||||||
|
requests:
|
||||||
|
cpu: 10m
|
||||||
|
memory: 20Mi
|
||||||
|
- name: azuredisk
|
||||||
|
image: "mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.28.0"
|
||||||
|
args:
|
||||||
|
- "--v=5"
|
||||||
|
- "--endpoint=$(CSI_ENDPOINT)"
|
||||||
|
- "--metrics-address=0.0.0.0:29604"
|
||||||
|
- "--disable-avset-nodes=false"
|
||||||
|
- "--vm-type=vmss"
|
||||||
|
- "--drivername=disk.csi.azure.com"
|
||||||
|
# - "--cloud-config-secret-name=azure-managed-identity"
|
||||||
|
# - "--cloud-config-secret-namespace=kube-system"
|
||||||
|
- "--custom-user-agent="
|
||||||
|
- "--user-agent-suffix=OSS-helm"
|
||||||
|
- "--allow-empty-cloud-config=true"
|
||||||
|
- "--vmss-cache-ttl-seconds=-1"
|
||||||
|
- "--enable-traffic-manager=false"
|
||||||
|
- "--traffic-manager-port=7788"
|
||||||
|
ports:
|
||||||
|
- containerPort: 29602
|
||||||
|
name: healthz
|
||||||
|
protocol: TCP
|
||||||
|
- containerPort: 29604
|
||||||
|
name: metrics
|
||||||
|
protocol: TCP
|
||||||
|
livenessProbe:
|
||||||
|
failureThreshold: 5
|
||||||
|
httpGet:
|
||||||
|
path: /healthz
|
||||||
|
port: healthz
|
||||||
|
initialDelaySeconds: 30
|
||||||
|
timeoutSeconds: 10
|
||||||
|
periodSeconds: 30
|
||||||
|
env:
|
||||||
|
- name: AZURE_CREDENTIAL_FILE
|
||||||
|
value: /etc/azure/azure.json
|
||||||
|
- name: CSI_ENDPOINT
|
||||||
|
value: unix:///csi/csi.sock
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /csi
|
||||||
|
name: socket-dir
|
||||||
|
- name: cloud-config
|
||||||
|
mountPath: /etc/azure
|
||||||
|
readOnly: true
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: 500Mi
|
||||||
|
requests:
|
||||||
|
cpu: 10m
|
||||||
|
memory: 20Mi
|
||||||
|
volumes:
|
||||||
|
- name: socket-dir
|
||||||
|
emptyDir: {}
|
||||||
|
- name: cloud-config
|
||||||
|
secret:
|
||||||
|
secretName: azure-managed-identity
|
||||||
|
---
|
||||||
|
# Source: azuredisk-csi-driver/templates/csi-azuredisk-driver.yaml
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: CSIDriver
|
||||||
|
metadata:
|
||||||
|
name: disk.csi.azure.com
|
||||||
|
annotations:
|
||||||
|
csiDriver: "v1.28.0"
|
||||||
|
snapshot: "v6.2.2"
|
||||||
|
spec:
|
||||||
|
attachRequired: true
|
||||||
|
podInfoOnMount: false
|
||||||
|
fsGroupPolicy: File
|
||||||
25
azure/deployments/azuredisk-csi-driver.yaml
Normal file
25
azure/deployments/azuredisk-csi-driver.yaml
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
|
||||||
|
controller:
|
||||||
|
cloudConfigSecretName: azure-managed-identity
|
||||||
|
cloudConfigSecretNamespace: kube-system
|
||||||
|
|
||||||
|
replicas: 1
|
||||||
|
vmType: vmss
|
||||||
|
allowEmptyCloudConfig: true
|
||||||
|
|
||||||
|
nodeSelector:
|
||||||
|
node-role.kubernetes.io/control-plane: ""
|
||||||
|
node.cloudprovider.kubernetes.io/platform: azure
|
||||||
|
|
||||||
|
tolerations:
|
||||||
|
- key: node-role.kubernetes.io/control-plane
|
||||||
|
effect: NoSchedule
|
||||||
|
|
||||||
|
linux:
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
nodeSelector:
|
||||||
|
node.cloudprovider.kubernetes.io/platform: azure
|
||||||
|
|
||||||
|
windows:
|
||||||
|
enabled: false
|
||||||
@@ -1,8 +1,6 @@
|
|||||||
apiVersion: storage.k8s.io/v1
|
apiVersion: storage.k8s.io/v1
|
||||||
kind: StorageClass
|
kind: StorageClass
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
|
||||||
storageclass.kubernetes.io/is-default-class: "false"
|
|
||||||
name: csi-azure-hdd-xfs
|
name: csi-azure-hdd-xfs
|
||||||
provisioner: kubernetes.io/azure-disk
|
provisioner: kubernetes.io/azure-disk
|
||||||
parameters:
|
parameters:
|
||||||
@@ -14,17 +12,10 @@ parameters:
|
|||||||
reclaimPolicy: Delete
|
reclaimPolicy: Delete
|
||||||
volumeBindingMode: WaitForFirstConsumer
|
volumeBindingMode: WaitForFirstConsumer
|
||||||
allowVolumeExpansion: true
|
allowVolumeExpansion: true
|
||||||
# allowedTopologies:
|
|
||||||
# - matchLabelExpressions:
|
|
||||||
# - key: topology.disk.csi.azure.com/zone
|
|
||||||
# values:
|
|
||||||
# - azure
|
|
||||||
---
|
---
|
||||||
apiVersion: storage.k8s.io/v1
|
apiVersion: storage.k8s.io/v1
|
||||||
kind: StorageClass
|
kind: StorageClass
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
|
||||||
storageclass.kubernetes.io/is-default-class: "false"
|
|
||||||
name: csi-azure-ssd-xfs
|
name: csi-azure-ssd-xfs
|
||||||
provisioner: kubernetes.io/azure-disk
|
provisioner: kubernetes.io/azure-disk
|
||||||
parameters:
|
parameters:
|
||||||
@@ -36,28 +27,37 @@ parameters:
|
|||||||
reclaimPolicy: Delete
|
reclaimPolicy: Delete
|
||||||
volumeBindingMode: WaitForFirstConsumer
|
volumeBindingMode: WaitForFirstConsumer
|
||||||
allowVolumeExpansion: true
|
allowVolumeExpansion: true
|
||||||
# allowedTopologies:
|
|
||||||
# - matchLabelExpressions:
|
|
||||||
# - key: topology.disk.csi.azure.com/zone
|
|
||||||
# values:
|
|
||||||
# - azure
|
|
||||||
---
|
---
|
||||||
apiVersion: storage.k8s.io/v1
|
apiVersion: storage.k8s.io/v1
|
||||||
kind: StorageClass
|
kind: StorageClass
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
|
||||||
storageclass.kubernetes.io/is-default-class: "false"
|
|
||||||
name: csi-azure-premium-xfs
|
name: csi-azure-premium-xfs
|
||||||
provisioner: kubernetes.io/azure-disk
|
provisioner: kubernetes.io/azure-disk
|
||||||
parameters:
|
parameters:
|
||||||
kind: Managed
|
kind: Managed
|
||||||
cachingMode: ReadOnly
|
cachingMode: ReadOnly
|
||||||
fsType: xfs
|
fsType: xfs
|
||||||
skuName: Premium_LRS # available values: Standard_LRS, Premium_LRS, StandardSSD_LRS and UltraSSD_LRS
|
skuName: Premium_LRS
|
||||||
zoned: "true"
|
zoned: "true"
|
||||||
reclaimPolicy: Delete
|
reclaimPolicy: Delete
|
||||||
volumeBindingMode: WaitForFirstConsumer
|
volumeBindingMode: WaitForFirstConsumer
|
||||||
allowVolumeExpansion: true
|
allowVolumeExpansion: true
|
||||||
|
---
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: StorageClass
|
||||||
|
metadata:
|
||||||
|
name: csi-azure-premium-2-xfs
|
||||||
|
provisioner: kubernetes.io/azure-disk
|
||||||
|
parameters:
|
||||||
|
fsType: xfs
|
||||||
|
kind: Managed
|
||||||
|
cachingMode: None
|
||||||
|
skuName: PremiumV2_LRS
|
||||||
|
perfProfile: Basic
|
||||||
|
# enableBursting: true
|
||||||
|
reclaimPolicy: Delete
|
||||||
|
volumeBindingMode: WaitForFirstConsumer
|
||||||
|
allowVolumeExpansion: true
|
||||||
# allowedTopologies:
|
# allowedTopologies:
|
||||||
# - matchLabelExpressions:
|
# - matchLabelExpressions:
|
||||||
# - key: topology.disk.csi.azure.com/zone
|
# - key: topology.disk.csi.azure.com/zone
|
||||||
@@ -22,6 +22,7 @@ spec:
|
|||||||
run: overprovisioning
|
run: overprovisioning
|
||||||
spec:
|
spec:
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
|
node.cloudprovider.kubernetes.io/platform: azure
|
||||||
project.io/node-pool: web
|
project.io/node-pool: web
|
||||||
affinity:
|
affinity:
|
||||||
podAntiAffinity:
|
podAntiAffinity:
|
||||||
|
|||||||
@@ -16,12 +16,12 @@ spec:
|
|||||||
spec:
|
spec:
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
node.cloudprovider.kubernetes.io/platform: azure
|
node.cloudprovider.kubernetes.io/platform: azure
|
||||||
|
# project.io/node-pool: worker
|
||||||
tolerations:
|
tolerations:
|
||||||
- effect: NoSchedule
|
- effect: NoSchedule
|
||||||
key: node-role.kubernetes.io/control-plane
|
key: node-role.kubernetes.io/control-plane
|
||||||
securityContext:
|
securityContext:
|
||||||
runAsNonRoot: true
|
runAsUser: 0
|
||||||
runAsUser: 1000
|
|
||||||
seccompProfile:
|
seccompProfile:
|
||||||
type: RuntimeDefault
|
type: RuntimeDefault
|
||||||
containers:
|
containers:
|
||||||
@@ -31,11 +31,6 @@ spec:
|
|||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: persistent-storage
|
- name: persistent-storage
|
||||||
mountPath: /mnt/azuredisk
|
mountPath: /mnt/azuredisk
|
||||||
securityContext:
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
updateStrategy:
|
updateStrategy:
|
||||||
type: RollingUpdate
|
type: RollingUpdate
|
||||||
selector:
|
selector:
|
||||||
@@ -49,4 +44,5 @@ spec:
|
|||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
storage: 10Gi
|
storage: 10Gi
|
||||||
storageClassName: csi-azure-ssd-xfs
|
# storageClassName: csi-azure-ssd-xfs
|
||||||
|
storageClassName: csi-azure-premium-2-xfs
|
||||||
|
|||||||
@@ -72,7 +72,6 @@ resource "azurerm_role_definition" "csi" {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
resource "azurerm_role_definition" "scaler" {
|
resource "azurerm_role_definition" "scaler" {
|
||||||
name = "kubernetes-node-autoscaler"
|
name = "kubernetes-node-autoscaler"
|
||||||
description = "This is a kubernetes role for node autoscaler system, created via Terraform"
|
description = "This is a kubernetes role for node autoscaler system, created via Terraform"
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ resource "azurerm_availability_set" "controlplane" {
|
|||||||
resource_group_name = local.resource_group
|
resource_group_name = local.resource_group
|
||||||
|
|
||||||
platform_update_domain_count = 1
|
platform_update_domain_count = 1
|
||||||
platform_fault_domain_count = 2
|
platform_fault_domain_count = 3
|
||||||
|
|
||||||
tags = merge(var.tags, { type = "infra" })
|
tags = merge(var.tags, { type = "infra" })
|
||||||
}
|
}
|
||||||
@@ -22,7 +22,7 @@ locals {
|
|||||||
region : region
|
region : region
|
||||||
availability_set : azurerm_availability_set.controlplane[region].id
|
availability_set : azurerm_availability_set.controlplane[region].id
|
||||||
|
|
||||||
image : data.azurerm_shared_image_version.talos[startswith(lookup(try(var.controlplane[region], {}), "type", ""), "Standard_D2p") ? "Arm64" : "x64"].id
|
image : data.azurerm_shared_image_version.talos[length(regexall("^Standard_[DE][\\d+]p", lookup(try(var.controlplane[region], {}), "db_type", ""))) > 0 ? "Arm64" : "x64"].id
|
||||||
type : lookup(try(var.controlplane[region], {}), "type", "Standard_B2ms")
|
type : lookup(try(var.controlplane[region], {}), "type", "Standard_B2ms")
|
||||||
|
|
||||||
ip : 11 + inx
|
ip : 11 + inx
|
||||||
@@ -35,6 +35,12 @@ locals {
|
|||||||
|
|
||||||
lbv4s = [for ip in flatten([for c in local.network_controlplane : c.controlplane_lb]) : ip if length(split(".", ip)) > 1]
|
lbv4s = [for ip in flatten([for c in local.network_controlplane : c.controlplane_lb]) : ip if length(split(".", ip)) > 1]
|
||||||
lbv6s = [for ip in flatten([for c in local.network_controlplane : c.controlplane_lb]) : ip if length(split(":", ip)) > 1]
|
lbv6s = [for ip in flatten([for c in local.network_controlplane : c.controlplane_lb]) : ip if length(split(":", ip)) > 1]
|
||||||
|
cpv4s = flatten([for cp in azurerm_network_interface.controlplane :
|
||||||
|
[for ip in cp.ip_configuration : ip.private_ip_address if ip.private_ip_address_version == "IPv4"]
|
||||||
|
])
|
||||||
|
cpv6s = flatten([for cp in azurerm_network_interface.controlplane :
|
||||||
|
[for ip in cp.ip_configuration : ip.private_ip_address if ip.private_ip_address_version == "IPv6"]
|
||||||
|
])
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_public_ip" "controlplane_v4" {
|
resource "azurerm_public_ip" "controlplane_v4" {
|
||||||
@@ -120,7 +126,7 @@ resource "local_file" "controlplane" {
|
|||||||
azurerm_public_ip.controlplane_v4[each.key].ip_address,
|
azurerm_public_ip.controlplane_v4[each.key].ip_address,
|
||||||
])
|
])
|
||||||
ipAliases = compact(each.value.network.controlplane_lb)
|
ipAliases = compact(each.value.network.controlplane_lb)
|
||||||
nodeSubnets = [cidrsubnet(each.value.network.cidr[0], 1, 0), "!${each.value.network.controlplane_lb[0]}"]
|
nodeSubnets = [cidrsubnet(each.value.network.cidr[0], 1, 0)]
|
||||||
|
|
||||||
ccm = templatefile("${path.module}/deployments/azure.json.tpl", {
|
ccm = templatefile("${path.module}/deployments/azure.json.tpl", {
|
||||||
subscriptionId = local.subscription_id
|
subscriptionId = local.subscription_id
|
||||||
@@ -187,10 +193,18 @@ resource "azurerm_linux_virtual_machine" "controlplane" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_role_assignment" "controlplane" {
|
resource "azurerm_role_assignment" "controlplane" {
|
||||||
for_each = local.controlplanes
|
for_each = { for k in flatten([
|
||||||
|
for cp in azurerm_linux_virtual_machine.controlplane : [
|
||||||
|
for role in var.controlplane_role_definition : {
|
||||||
|
name : "role-${cp.name}-${role}"
|
||||||
|
role : role
|
||||||
|
principal : cp.identity[0].principal_id
|
||||||
|
}
|
||||||
|
]
|
||||||
|
]) : k.name => k }
|
||||||
scope = "/subscriptions/${local.subscription_id}"
|
scope = "/subscriptions/${local.subscription_id}"
|
||||||
role_definition_name = var.controlplane_role_definition
|
role_definition_name = each.value.role
|
||||||
principal_id = azurerm_linux_virtual_machine.controlplane[each.key].identity[0].principal_id
|
principal_id = each.value.principal
|
||||||
}
|
}
|
||||||
|
|
||||||
locals {
|
locals {
|
||||||
@@ -203,18 +217,18 @@ resource "azurerm_private_dns_a_record" "controlplane" {
|
|||||||
resource_group_name = local.resource_group
|
resource_group_name = local.resource_group
|
||||||
zone_name = each.key
|
zone_name = each.key
|
||||||
ttl = 300
|
ttl = 300
|
||||||
records = local.lbv4s
|
records = length(local.lbv4s) > 0 ? local.lbv4s : local.cpv4s
|
||||||
|
|
||||||
tags = merge(var.tags, { type = "infra" })
|
tags = merge(var.tags, { type = "infra" })
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_private_dns_aaaa_record" "controlplane" {
|
resource "azurerm_private_dns_aaaa_record" "controlplane" {
|
||||||
for_each = toset(values({ for zone, name in local.network : zone => name.dns if name.dns != "" && length(local.lbv6s) > 0 }))
|
for_each = toset(values({ for zone, name in local.network : zone => name.dns if name.dns != "" && length(local.cpv6s) > 0 }))
|
||||||
name = split(".", var.kubernetes["apiDomain"])[0]
|
name = split(".", var.kubernetes["apiDomain"])[0]
|
||||||
resource_group_name = local.resource_group
|
resource_group_name = local.resource_group
|
||||||
zone_name = each.key
|
zone_name = each.key
|
||||||
ttl = 300
|
ttl = 300
|
||||||
records = local.lbv6s
|
records = length(local.lbv6s) > 0 ? local.lbv6s : local.cpv6s
|
||||||
|
|
||||||
tags = merge(var.tags, { type = "infra" })
|
tags = merge(var.tags, { type = "infra" })
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,14 +7,18 @@ resource "azurerm_linux_virtual_machine_scale_set" "db" {
|
|||||||
for_each = { for idx, name in local.regions : name => idx }
|
for_each = { for idx, name in local.regions : name => idx }
|
||||||
location = each.key
|
location = each.key
|
||||||
|
|
||||||
instances = lookup(try(var.instances[each.key], {}), "db_count", 0)
|
instances = lookup(try(var.instances[each.key], {}), "db_count", 0)
|
||||||
name = "db-${lower(each.key)}"
|
name = "db-${lower(each.key)}"
|
||||||
computer_name_prefix = "db-${lower(each.key)}-"
|
computer_name_prefix = "db-${lower(each.key)}-"
|
||||||
resource_group_name = local.resource_group
|
resource_group_name = local.resource_group
|
||||||
sku = lookup(try(var.instances[each.key], {}), "db_type", "Standard_B2s")
|
sku = lookup(try(var.instances[each.key], {}), "db_type", "Standard_B2s")
|
||||||
provision_vm_agent = false
|
provision_vm_agent = false
|
||||||
overprovision = false
|
overprovision = false
|
||||||
platform_fault_domain_count = 2
|
platform_fault_domain_count = 5
|
||||||
|
proximity_placement_group_id = azurerm_proximity_placement_group.common[each.key].id
|
||||||
|
|
||||||
|
# zone_balance = true
|
||||||
|
# zones = ["0", "1", "2"]
|
||||||
|
|
||||||
network_interface {
|
network_interface {
|
||||||
name = "db-${lower(each.key)}"
|
name = "db-${lower(each.key)}"
|
||||||
@@ -44,7 +48,7 @@ resource "azurerm_linux_virtual_machine_scale_set" "db" {
|
|||||||
|
|
||||||
custom_data = base64encode(templatefile("${path.module}/templates/worker.yaml.tpl",
|
custom_data = base64encode(templatefile("${path.module}/templates/worker.yaml.tpl",
|
||||||
merge(var.kubernetes, {
|
merge(var.kubernetes, {
|
||||||
lbv4 = local.network_controlplane[each.key].controlplane_lb[0]
|
lbv4 = try(local.network_controlplane[each.key].controlplane_lb[0], "")
|
||||||
labels = local.db_labels
|
labels = local.db_labels
|
||||||
nodeSubnets = [local.network_public[each.key].cidr[0]]
|
nodeSubnets = [local.network_public[each.key].cidr[0]]
|
||||||
})
|
})
|
||||||
@@ -62,7 +66,7 @@ resource "azurerm_linux_virtual_machine_scale_set" "db" {
|
|||||||
disk_size_gb = 50
|
disk_size_gb = 50
|
||||||
}
|
}
|
||||||
|
|
||||||
source_image_id = data.azurerm_shared_image_version.talos[startswith(lookup(try(var.instances[each.key], {}), "db_type", ""), "Standard_D2p") ? "Arm64" : "x64"].id
|
source_image_id = data.azurerm_shared_image_version.talos[length(regexall("^Standard_[DE][\\d+]p", lookup(try(var.instances[each.key], {}), "db_type", ""))) > 0 ? "Arm64" : "x64"].id
|
||||||
# source_image_reference {
|
# source_image_reference {
|
||||||
# publisher = "talos"
|
# publisher = "talos"
|
||||||
# offer = "Talos"
|
# offer = "Talos"
|
||||||
|
|||||||
@@ -14,9 +14,12 @@ resource "azurerm_linux_virtual_machine_scale_set" "web" {
|
|||||||
sku = lookup(try(var.instances[each.key], {}), "web_type", "Standard_B2s")
|
sku = lookup(try(var.instances[each.key], {}), "web_type", "Standard_B2s")
|
||||||
provision_vm_agent = false
|
provision_vm_agent = false
|
||||||
overprovision = false
|
overprovision = false
|
||||||
platform_fault_domain_count = 2
|
platform_fault_domain_count = 5
|
||||||
proximity_placement_group_id = azurerm_proximity_placement_group.common[each.key].id
|
proximity_placement_group_id = azurerm_proximity_placement_group.common[each.key].id
|
||||||
|
|
||||||
|
# zone_balance = false
|
||||||
|
# zones = ["1"]
|
||||||
|
|
||||||
# health_probe_id = local.network_public[each.key].sku != "Basic" ? azurerm_lb_probe.web[each.key].id : null
|
# health_probe_id = local.network_public[each.key].sku != "Basic" ? azurerm_lb_probe.web[each.key].id : null
|
||||||
# automatic_instance_repair {
|
# automatic_instance_repair {
|
||||||
# enabled = local.network_public[each.key].sku != "Basic"
|
# enabled = local.network_public[each.key].sku != "Basic"
|
||||||
@@ -51,7 +54,7 @@ resource "azurerm_linux_virtual_machine_scale_set" "web" {
|
|||||||
|
|
||||||
custom_data = base64encode(templatefile("${path.module}/templates/worker.yaml.tpl",
|
custom_data = base64encode(templatefile("${path.module}/templates/worker.yaml.tpl",
|
||||||
merge(var.kubernetes, {
|
merge(var.kubernetes, {
|
||||||
lbv4 = local.network_controlplane[each.key].controlplane_lb[0]
|
lbv4 = try(local.network_controlplane[each.key].controlplane_lb[0], "")
|
||||||
labels = local.web_labels
|
labels = local.web_labels
|
||||||
nodeSubnets = [local.network_public[each.key].cidr[0]]
|
nodeSubnets = [local.network_public[each.key].cidr[0]]
|
||||||
})
|
})
|
||||||
@@ -69,7 +72,7 @@ resource "azurerm_linux_virtual_machine_scale_set" "web" {
|
|||||||
disk_size_gb = 50
|
disk_size_gb = 50
|
||||||
}
|
}
|
||||||
|
|
||||||
source_image_id = data.azurerm_shared_image_version.talos[startswith(lookup(try(var.instances[each.key], {}), "worker_type", ""), "Standard_D2p") ? "Arm64" : "x64"].id
|
source_image_id = data.azurerm_shared_image_version.talos[length(regexall("^Standard_[DE][\\d+]p", lookup(try(var.instances[each.key], {}), "web_type", ""))) > 0 ? "Arm64" : "x64"].id
|
||||||
# source_image_reference {
|
# source_image_reference {
|
||||||
# publisher = "talos"
|
# publisher = "talos"
|
||||||
# offer = "Talos"
|
# offer = "Talos"
|
||||||
|
|||||||
@@ -14,9 +14,29 @@ resource "azurerm_linux_virtual_machine_scale_set" "worker" {
|
|||||||
sku = lookup(try(var.instances[each.key], {}), "worker_type", "Standard_B2s")
|
sku = lookup(try(var.instances[each.key], {}), "worker_type", "Standard_B2s")
|
||||||
provision_vm_agent = false
|
provision_vm_agent = false
|
||||||
overprovision = false
|
overprovision = false
|
||||||
platform_fault_domain_count = 2
|
platform_fault_domain_count = 5
|
||||||
proximity_placement_group_id = azurerm_proximity_placement_group.common[each.key].id
|
proximity_placement_group_id = azurerm_proximity_placement_group.common[each.key].id
|
||||||
|
|
||||||
|
# zone_balance = false
|
||||||
|
# zones = ["1"]
|
||||||
|
|
||||||
|
# extension_operations_enabled = true
|
||||||
|
# extension {
|
||||||
|
# name = "KubeletHealth"
|
||||||
|
# publisher = "Microsoft.ManagedServices"
|
||||||
|
# type = "ApplicationHealthLinux"
|
||||||
|
# type_handler_version = "1.0"
|
||||||
|
# auto_upgrade_minor_version = false
|
||||||
|
|
||||||
|
# settings = jsonencode({
|
||||||
|
# protocol : "http"
|
||||||
|
# port : "10248"
|
||||||
|
# requestPath : "/healthz"
|
||||||
|
# intervalInSeconds : 60
|
||||||
|
# numberOfProbes : 3
|
||||||
|
# })
|
||||||
|
# }
|
||||||
|
|
||||||
network_interface {
|
network_interface {
|
||||||
name = "worker-${lower(each.key)}"
|
name = "worker-${lower(each.key)}"
|
||||||
primary = true
|
primary = true
|
||||||
@@ -46,7 +66,7 @@ resource "azurerm_linux_virtual_machine_scale_set" "worker" {
|
|||||||
|
|
||||||
custom_data = base64encode(templatefile("${path.module}/templates/worker.yaml.tpl",
|
custom_data = base64encode(templatefile("${path.module}/templates/worker.yaml.tpl",
|
||||||
merge(var.kubernetes, {
|
merge(var.kubernetes, {
|
||||||
lbv4 = local.network_controlplane[each.key].controlplane_lb[0]
|
lbv4 = try(local.network_controlplane[each.key].controlplane_lb[0], "")
|
||||||
labels = local.worker_labels
|
labels = local.worker_labels
|
||||||
nodeSubnets = [local.network_private[each.key].cidr[0]]
|
nodeSubnets = [local.network_private[each.key].cidr[0]]
|
||||||
})
|
})
|
||||||
@@ -72,7 +92,7 @@ resource "azurerm_linux_virtual_machine_scale_set" "worker" {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
source_image_id = data.azurerm_shared_image_version.talos[startswith(lookup(try(var.instances[each.key], {}), "worker_type", ""), "Standard_D2p") ? "Arm64" : "x64"].id
|
source_image_id = data.azurerm_shared_image_version.talos[length(regexall("^Standard_[DE][\\d+]p", lookup(try(var.instances[each.key], {}), "worker_type", ""))) > 0 ? "Arm64" : "x64"].id
|
||||||
# source_image_reference {
|
# source_image_reference {
|
||||||
# publisher = "talos"
|
# publisher = "talos"
|
||||||
# offer = "Talos"
|
# offer = "Talos"
|
||||||
|
|||||||
@@ -79,8 +79,8 @@ resource "azurerm_linux_virtual_machine" "router" {
|
|||||||
|
|
||||||
source_image_reference {
|
source_image_reference {
|
||||||
publisher = "Debian"
|
publisher = "Debian"
|
||||||
offer = "debian-11"
|
offer = "debian-12"
|
||||||
sku = "11-gen2"
|
sku = "12-gen2"
|
||||||
version = "latest"
|
version = "latest"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
|
|
||||||
resource "azurerm_lb" "controlplane" {
|
resource "azurerm_lb" "controlplane" {
|
||||||
for_each = { for idx, name in var.regions : name => idx }
|
for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].network_lb_enable, false) }
|
||||||
location = each.key
|
location = each.key
|
||||||
name = "controlplane-${each.key}"
|
name = "controlplane-${each.key}"
|
||||||
resource_group_name = var.resource_group
|
resource_group_name = var.resource_group
|
||||||
@@ -22,7 +22,7 @@ resource "azurerm_lb" "controlplane" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_lb_probe" "controlplane" {
|
resource "azurerm_lb_probe" "controlplane" {
|
||||||
for_each = { for idx, name in var.regions : name => idx }
|
for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].network_lb_enable, false) }
|
||||||
name = "controlplane-tcp-probe"
|
name = "controlplane-tcp-probe"
|
||||||
loadbalancer_id = azurerm_lb.controlplane[each.key].id
|
loadbalancer_id = azurerm_lb.controlplane[each.key].id
|
||||||
interval_in_seconds = 30
|
interval_in_seconds = 30
|
||||||
@@ -31,19 +31,19 @@ resource "azurerm_lb_probe" "controlplane" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_lb_backend_address_pool" "controlplane_v4" {
|
resource "azurerm_lb_backend_address_pool" "controlplane_v4" {
|
||||||
for_each = { for idx, name in var.regions : name => idx }
|
for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].network_lb_enable, false) }
|
||||||
loadbalancer_id = azurerm_lb.controlplane[each.key].id
|
loadbalancer_id = azurerm_lb.controlplane[each.key].id
|
||||||
name = "controlplane-pool-v4"
|
name = "controlplane-pool-v4"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_lb_backend_address_pool" "controlplane_v6" {
|
resource "azurerm_lb_backend_address_pool" "controlplane_v6" {
|
||||||
for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].network_lb_sku, "Basic") != "Basic" }
|
for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].network_lb_enable, false) && try(var.capabilities[name].network_lb_sku, "Basic") != "Basic" }
|
||||||
loadbalancer_id = azurerm_lb.controlplane[each.key].id
|
loadbalancer_id = azurerm_lb.controlplane[each.key].id
|
||||||
name = "controlplane-pool-v6"
|
name = "controlplane-pool-v6"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_lb_rule" "kubernetes_v4" {
|
resource "azurerm_lb_rule" "kubernetes_v4" {
|
||||||
for_each = { for idx, name in var.regions : name => idx }
|
for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].network_lb_enable, false) }
|
||||||
name = "controlplane-v4"
|
name = "controlplane-v4"
|
||||||
loadbalancer_id = azurerm_lb.controlplane[each.key].id
|
loadbalancer_id = azurerm_lb.controlplane[each.key].id
|
||||||
frontend_ip_configuration_name = "controlplane-lb-v4"
|
frontend_ip_configuration_name = "controlplane-lb-v4"
|
||||||
@@ -57,7 +57,7 @@ resource "azurerm_lb_rule" "kubernetes_v4" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_lb_rule" "kubernetes_v6" {
|
resource "azurerm_lb_rule" "kubernetes_v6" {
|
||||||
for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].network_lb_sku, "Basic") != "Basic" }
|
for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].network_lb_enable, false) && try(var.capabilities[name].network_lb_sku, "Basic") != "Basic" }
|
||||||
name = "controlplane-v6"
|
name = "controlplane-v6"
|
||||||
loadbalancer_id = azurerm_lb.controlplane[each.key].id
|
loadbalancer_id = azurerm_lb.controlplane[each.key].id
|
||||||
frontend_ip_configuration_name = "controlplane-lb-v6"
|
frontend_ip_configuration_name = "controlplane-lb-v6"
|
||||||
@@ -71,7 +71,7 @@ resource "azurerm_lb_rule" "kubernetes_v6" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_lb_rule" "talos" {
|
resource "azurerm_lb_rule" "talos" {
|
||||||
for_each = { for idx, name in var.regions : name => idx }
|
for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].network_lb_enable, false) }
|
||||||
name = "controlplane-talos-v4"
|
name = "controlplane-talos-v4"
|
||||||
loadbalancer_id = azurerm_lb.controlplane[each.key].id
|
loadbalancer_id = azurerm_lb.controlplane[each.key].id
|
||||||
frontend_ip_configuration_name = "controlplane-lb-v4"
|
frontend_ip_configuration_name = "controlplane-lb-v4"
|
||||||
@@ -85,7 +85,7 @@ resource "azurerm_lb_rule" "talos" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_lb_rule" "talos_v6" {
|
resource "azurerm_lb_rule" "talos_v6" {
|
||||||
for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].network_lb_sku, "Basic") != "Basic" }
|
for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].network_lb_enable, false) && try(var.capabilities[name].network_lb_sku, "Basic") != "Basic" }
|
||||||
name = "controlplane-talos-v6"
|
name = "controlplane-talos-v6"
|
||||||
loadbalancer_id = azurerm_lb.controlplane[each.key].id
|
loadbalancer_id = azurerm_lb.controlplane[each.key].id
|
||||||
frontend_ip_configuration_name = "controlplane-lb-v6"
|
frontend_ip_configuration_name = "controlplane-lb-v6"
|
||||||
|
|||||||
@@ -79,6 +79,8 @@ resource "azurerm_virtual_network_peering" "peering" {
|
|||||||
allow_virtual_network_access = true
|
allow_virtual_network_access = true
|
||||||
allow_forwarded_traffic = true
|
allow_forwarded_traffic = true
|
||||||
allow_gateway_transit = false
|
allow_gateway_transit = false
|
||||||
|
|
||||||
|
depends_on = [azurerm_virtual_network.main]
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "azurerm_route_table" "main" {
|
resource "azurerm_route_table" "main" {
|
||||||
|
|||||||
@@ -16,50 +16,50 @@ output "resource_group" {
|
|||||||
|
|
||||||
output "network" {
|
output "network" {
|
||||||
description = "The network"
|
description = "The network"
|
||||||
value = { for zone, net in azurerm_virtual_network.main : zone => {
|
value = { for region, net in azurerm_virtual_network.main : region => {
|
||||||
name = net.name
|
name = net.name
|
||||||
nat = try(azurerm_public_ip.nat[zone].ip_address, "")
|
nat = try(azurerm_public_ip.nat[region].ip_address, "")
|
||||||
dns = try(azurerm_private_dns_zone.main[0].name, "")
|
dns = try(azurerm_private_dns_zone.main[0].name, "")
|
||||||
peering = try(azurerm_linux_virtual_machine.router[zone].private_ip_addresses, [])
|
peering = try(azurerm_linux_virtual_machine.router[region].private_ip_addresses, [])
|
||||||
} }
|
} }
|
||||||
}
|
}
|
||||||
|
|
||||||
output "network_controlplane" {
|
output "network_controlplane" {
|
||||||
description = "The controlplane network"
|
description = "The controlplane network"
|
||||||
value = { for zone, subnet in azurerm_subnet.controlplane : zone => {
|
value = { for region, subnet in azurerm_subnet.controlplane : region => {
|
||||||
network_id = subnet.id
|
network_id = subnet.id
|
||||||
cidr = subnet.address_prefixes
|
cidr = subnet.address_prefixes
|
||||||
sku = azurerm_lb.controlplane[zone].sku
|
sku = try(var.capabilities[region].network_lb_sku, "Basic")
|
||||||
controlplane_pool_v4 = try(azurerm_lb_backend_address_pool.controlplane_v4[zone].id, "")
|
controlplane_pool_v4 = try(var.capabilities[region].network_lb_enable, false) ? try(azurerm_lb_backend_address_pool.controlplane_v4[region].id, "") : ""
|
||||||
controlplane_pool_v6 = try(azurerm_lb_backend_address_pool.controlplane_v6[zone].id, "")
|
controlplane_pool_v6 = try(var.capabilities[region].network_lb_enable, false) ? try(azurerm_lb_backend_address_pool.controlplane_v6[region].id, "") : ""
|
||||||
controlplane_lb = azurerm_lb.controlplane[zone].private_ip_addresses
|
controlplane_lb = try(var.capabilities[region].network_lb_enable, false) ? azurerm_lb.controlplane[region].private_ip_addresses : []
|
||||||
} }
|
} }
|
||||||
}
|
}
|
||||||
|
|
||||||
output "network_public" {
|
output "network_public" {
|
||||||
description = "The public network"
|
description = "The public network"
|
||||||
value = { for zone, subnet in azurerm_subnet.public : zone => {
|
value = { for region, subnet in azurerm_subnet.public : region => {
|
||||||
network_id = subnet.id
|
network_id = subnet.id
|
||||||
cidr = subnet.address_prefixes
|
cidr = subnet.address_prefixes
|
||||||
sku = var.capabilities[zone].network_gw_sku
|
sku = var.capabilities[region].network_gw_sku
|
||||||
} }
|
} }
|
||||||
}
|
}
|
||||||
|
|
||||||
output "network_private" {
|
output "network_private" {
|
||||||
description = "The private network"
|
description = "The private network"
|
||||||
value = { for zone, subnet in azurerm_subnet.private : zone => {
|
value = { for region, subnet in azurerm_subnet.private : region => {
|
||||||
network_id = subnet.id
|
network_id = subnet.id
|
||||||
cidr = subnet.address_prefixes
|
cidr = subnet.address_prefixes
|
||||||
nat = try(azurerm_public_ip.nat[zone].ip_address, "")
|
nat = try(azurerm_public_ip.nat[region].ip_address, "")
|
||||||
sku = try(azurerm_public_ip.nat[zone].ip_address, "") == "" ? "Standard" : var.capabilities[zone].network_gw_sku
|
sku = try(azurerm_public_ip.nat[region].ip_address, "") == "" ? "Standard" : var.capabilities[region].network_gw_sku
|
||||||
} }
|
} }
|
||||||
}
|
}
|
||||||
|
|
||||||
output "secgroups" {
|
output "secgroups" {
|
||||||
description = "List of secgroups"
|
description = "List of secgroups"
|
||||||
value = { for zone, subnet in azurerm_subnet.private : zone => {
|
value = { for region, subnet in azurerm_subnet.private : region => {
|
||||||
common = azurerm_network_security_group.common[zone].id
|
common = azurerm_network_security_group.common[region].id
|
||||||
controlplane = azurerm_network_security_group.controlplane[zone].id
|
controlplane = azurerm_network_security_group.controlplane[region].id
|
||||||
web = azurerm_network_security_group.web[zone].id
|
web = azurerm_network_security_group.web[region].id
|
||||||
} }
|
} }
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -72,6 +72,7 @@ variable "capabilities" {
|
|||||||
},
|
},
|
||||||
"uksouth" = {
|
"uksouth" = {
|
||||||
network_nat_enable = false,
|
network_nat_enable = false,
|
||||||
|
network_lb_enable = false
|
||||||
network_lb_sku = "Basic", # Standard
|
network_lb_sku = "Basic", # Standard
|
||||||
network_gw_enable = false,
|
network_gw_enable = false,
|
||||||
network_gw_type = "Standard_B1s",
|
network_gw_type = "Standard_B1s",
|
||||||
@@ -79,6 +80,7 @@ variable "capabilities" {
|
|||||||
},
|
},
|
||||||
"ukwest" = {
|
"ukwest" = {
|
||||||
network_nat_enable = false,
|
network_nat_enable = false,
|
||||||
|
network_lb_enable = false
|
||||||
network_lb_sku = "Basic",
|
network_lb_sku = "Basic",
|
||||||
network_gw_enable = false,
|
network_gw_enable = false,
|
||||||
network_gw_type = "Standard_B1s",
|
network_gw_type = "Standard_B1s",
|
||||||
|
|||||||
@@ -19,8 +19,10 @@ machine:
|
|||||||
routes:
|
routes:
|
||||||
- network: ::/0
|
- network: ::/0
|
||||||
gateway: fe80::1234:5678:9abc
|
gateway: fe80::1234:5678:9abc
|
||||||
|
%{if length(ipAliases) > 0 }
|
||||||
- interface: lo
|
- interface: lo
|
||||||
addresses: ${format("%#v",ipAliases)}
|
addresses: ${format("%#v",ipAliases)}
|
||||||
|
%{endif}
|
||||||
- interface: dummy0
|
- interface: dummy0
|
||||||
addresses:
|
addresses:
|
||||||
- 169.254.2.53/32
|
- 169.254.2.53/32
|
||||||
@@ -85,13 +87,13 @@ cluster:
|
|||||||
election-timeout: "5000"
|
election-timeout: "5000"
|
||||||
heartbeat-interval: "1000"
|
heartbeat-interval: "1000"
|
||||||
inlineManifests:
|
inlineManifests:
|
||||||
- name: azure-cloud-controller-config
|
- name: azure-managed-identity
|
||||||
contents: |-
|
contents: |-
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Secret
|
kind: Secret
|
||||||
type: Opaque
|
type: Opaque
|
||||||
metadata:
|
metadata:
|
||||||
name: azure-cloud-controller-manager
|
name: azure-managed-identity
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
data:
|
data:
|
||||||
azure.json: ${base64encode(ccm)}
|
azure.json: ${base64encode(ccm)}
|
||||||
@@ -100,9 +102,8 @@ cluster:
|
|||||||
manifests:
|
manifests:
|
||||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/talos-cloud-controller-manager-result.yaml
|
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/talos-cloud-controller-manager-result.yaml
|
||||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/azure/deployments/azure-cloud-controller-manager.yaml
|
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/azure/deployments/azure-cloud-controller-manager.yaml
|
||||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/azure/deployments/azure-csi-node.yaml
|
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/azure/deployments/azuredisk-csi-driver-result.yaml
|
||||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/azure/deployments/azure-csi.yaml
|
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/azure/deployments/azuredisk-storage.yaml
|
||||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/azure/deployments/azure-storage.yaml
|
|
||||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/metrics-server-result.yaml
|
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/metrics-server-result.yaml
|
||||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/local-path-storage-ns.yaml
|
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/local-path-storage-ns.yaml
|
||||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/local-path-storage-result.yaml
|
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/local-path-storage-result.yaml
|
||||||
|
|||||||
@@ -28,10 +28,15 @@ machine:
|
|||||||
- interface: dummy0
|
- interface: dummy0
|
||||||
addresses:
|
addresses:
|
||||||
- 169.254.2.53/32
|
- 169.254.2.53/32
|
||||||
|
%{if lbv4 != "" }
|
||||||
extraHostEntries:
|
extraHostEntries:
|
||||||
- ip: ${lbv4}
|
- ip: ${lbv4}
|
||||||
aliases:
|
aliases:
|
||||||
- ${apiDomain}
|
- ${apiDomain}
|
||||||
|
%{endif}
|
||||||
|
time:
|
||||||
|
servers:
|
||||||
|
- time.cloudflare.com
|
||||||
install:
|
install:
|
||||||
wipe: false
|
wipe: false
|
||||||
sysctls:
|
sysctls:
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
|
|
||||||
variable "controlplane_role_definition" {
|
variable "controlplane_role_definition" {
|
||||||
default = "kubernetes-ccm"
|
default = ["kubernetes-ccm", "kubernetes-csi", "kubernetes-node-autoscaler"]
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "gallery_name" {
|
variable "gallery_name" {
|
||||||
|
|||||||
Reference in New Issue
Block a user