Add web nodes + lb

This commit is contained in:
Serge Logvinov
2022-05-20 16:50:46 +03:00
parent 138bbd59e4
commit be300ad329
15 changed files with 924 additions and 12 deletions

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: ingress-nginx

View File

@@ -0,0 +1,440 @@
---
# Source: ingress-nginx/templates/controller-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
automountServiceAccountToken: true
---
# Source: ingress-nginx/templates/controller-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
data:
allow-snippet-annotations: "true"
client-body-timeout: "30"
client-header-timeout: "30"
enable-access-log-for-default-backend: "true"
error-log-level: "error"
hsts: "true"
hsts-include-subdomains: "true"
hsts-max-age: "31536000"
hsts-preload: "true"
http-redirect-code: "301"
limit-req-status-code: "429"
log-format-escape-json: "true"
log-format-upstream: "{\"ip\":\"$remote_addr\", \"ssl\":\"$ssl_protocol\", \"method\":\"$request_method\", \"proto\":\"$scheme\", \"host\":\"$host\", \"uri\":\"$request_uri\", \"status\":$status, \"size\":$bytes_sent, \"agent\":\"$http_user_agent\", \"referer\":\"$http_referer\", \"namespace\":\"$namespace\"}"
proxy-connect-timeout: "10"
proxy-headers-hash-bucket-size: "128"
proxy-hide-headers: "strict-transport-security"
proxy-read-timeout: "60"
proxy-real-ip-cidr: "173.245.48.0/20,103.21.244.0/22,103.22.200.0/22,103.31.4.0/22,141.101.64.0/18,108.162.192.0/18,190.93.240.0/20,188.114.96.0/20,197.234.240.0/22,198.41.128.0/17,162.158.0.0/15,172.64.0.0/13,131.0.72.0/22,104.16.0.0/13,104.24.0.0/14,172.16.0.0/12"
proxy-send-timeout: "60"
server-name-hash-bucket-size: "64"
server-name-hash-max-size: "512"
server-tokens: "false"
ssl-protocols: "TLSv1.3"
upstream-keepalive-connections: "32"
use-forwarded-headers: "true"
use-geoip: "false"
use-geoip2: "false"
use-gzip: "true"
worker-cpu-affinity: "auto"
worker-processes: "auto"
---
# Source: ingress-nginx/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
- namespaces
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
---
# Source: ingress-nginx/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: "ingress-nginx"
---
# Source: ingress-nginx/templates/controller-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
- ingress-controller-leader
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
---
# Source: ingress-nginx/templates/controller-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: "ingress-nginx"
---
# Source: ingress-nginx/templates/controller-service.yaml
apiVersion: v1
kind: Service
metadata:
annotations:
labels:
helm.sh/chart: ingress-nginx-4.1.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
type: ClusterIP
clusterIP: None
ipFamilyPolicy: RequireDualStack
ipFamilies:
- IPv4
- IPv6
ports:
- name: http
port: 80
protocol: TCP
targetPort: http
appProtocol: http
- name: https
port: 443
protocol: TCP
targetPort: https
appProtocol: https
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
selector:
matchLabels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
revisionHistoryLimit: 2
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
minReadySeconds: 15
template:
metadata:
annotations:
prometheus.io/port: "10254"
prometheus.io/scrape: "true"
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
spec:
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: controller
image: "k8s.gcr.io/ingress-nginx/controller:v1.2.0@sha256:d8196e3bc1e72547c5dec66d6556c0ff92a23f6d0919b206be170bc90d5f9185"
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
args:
- /nginx-ingress-controller
- --election-id=ingress-controller-leader
- --controller-class=k8s.io/ingress-nginx
- --ingress-class=nginx
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
runAsUser: 101
allowPrivilegeEscalation: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LD_PRELOAD
value: /usr/local/lib/libmimalloc.so
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 15
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 1
ports:
- name: http
containerPort: 80
protocol: TCP
- name: https
containerPort: 443
protocol: TCP
resources:
limits:
cpu: 1
memory: 1Gi
requests:
cpu: 100m
memory: 128Mi
hostNetwork: true
nodeSelector:
kubernetes.io/os: linux
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: project.io/node-pool
operator: In
values:
- web
serviceAccountName: ingress-nginx
terminationGracePeriodSeconds: 300
---
# Source: ingress-nginx/templates/controller-ingressclass.yaml
# We don't support namespaced ingressClass yet
# So a ClusterRole and a ClusterRoleBinding is required
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
labels:
helm.sh/chart: ingress-nginx-4.1.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.2.0"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: nginx
spec:
controller: k8s.io/ingress-nginx

View File

@@ -0,0 +1,116 @@
controller:
kind: DaemonSet
hostNetwork: true
hostPort:
enabled: false
ports:
http: 80
https: 443
dnsPolicy: ClusterFirstWithHostNet
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
publishService:
enabled: false
config:
worker-processes: "auto"
worker-cpu-affinity: "auto"
error-log-level: "error"
server-tokens: "false"
http-redirect-code: "301"
use-gzip: "true"
use-geoip: "false"
use-geoip2: "false"
use-forwarded-headers: "true"
# curl https://www.cloudflare.com/ips-v4 2>/dev/null | tr '\n' ','
proxy-real-ip-cidr: "173.245.48.0/20,103.21.244.0/22,103.22.200.0/22,103.31.4.0/22,141.101.64.0/18,108.162.192.0/18,190.93.240.0/20,188.114.96.0/20,197.234.240.0/22,198.41.128.0/17,162.158.0.0/15,172.64.0.0/13,131.0.72.0/22,104.16.0.0/13,104.24.0.0/14,172.16.0.0/12"
enable-access-log-for-default-backend: "true"
log-format-escape-json: "true"
log-format-upstream: '{"ip":"$remote_addr", "ssl":"$ssl_protocol", "method":"$request_method", "proto":"$scheme", "host":"$host", "uri":"$request_uri", "status":$status, "size":$bytes_sent, "agent":"$http_user_agent", "referer":"$http_referer", "namespace":"$namespace"}'
upstream-keepalive-connections: "32"
proxy-connect-timeout: "10"
proxy-read-timeout: "60"
proxy-send-timeout: "60"
ssl-protocols: "TLSv1.3"
hsts: "true"
hsts-max-age: "31536000"
hsts-include-subdomains: "true"
hsts-preload: "true"
proxy-hide-headers: "strict-transport-security"
proxy-headers-hash-bucket-size: "128"
server-name-hash-bucket-size: "64"
server-name-hash-max-size: "512"
limit-req-status-code: "429"
client-header-timeout: "30"
client-body-timeout: "30"
minReadySeconds: 15
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "10254"
extraEnvs:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
livenessProbe:
initialDelaySeconds: 15
periodSeconds: 30
readinessProbe:
periodSeconds: 30
resources:
limits:
cpu: 1
memory: 1Gi
requests:
cpu: 100m
memory: 128Mi
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: project.io/node-pool
operator: In
values:
- web
service:
enabled: true
type: ClusterIP
clusterIP: None
ipFamilyPolicy: "RequireDualStack"
ipFamilies:
- IPv4
- IPv6
admissionWebhooks:
enabled: false
metrics:
enabled: false
revisionHistoryLimit: 2
defaultBackend:
enabled: false

153
azure/instances-web.tf Normal file
View File

@@ -0,0 +1,153 @@
resource "azurerm_public_ip" "web_v4" {
for_each = { for idx, name in local.regions : name => idx }
location = each.key
name = "web-${lower(each.key)}-v4"
resource_group_name = local.resource_group
sku = local.network_public[each.key].sku
allocation_method = local.network_public[each.key].sku == "Standard" ? "Static" : "Dynamic"
tags = merge(var.tags, { type = "web" })
}
resource "azurerm_lb" "web" {
for_each = { for idx, name in local.regions : name => idx }
location = each.key
name = "web-${lower(each.key)}"
resource_group_name = local.resource_group
sku = local.network_public[each.key].sku
frontend_ip_configuration {
name = "web-lb-v4"
public_ip_address_id = azurerm_public_ip.web_v4[each.key].id
}
tags = merge(var.tags, { type = "web" })
}
resource "azurerm_lb_backend_address_pool" "web_v4" {
for_each = { for idx, name in local.regions : name => idx }
loadbalancer_id = azurerm_lb.web[each.key].id
name = "web-pool-v4"
}
resource "azurerm_lb_probe" "web" {
for_each = { for idx, name in local.regions : name => idx }
name = "web-http-probe"
loadbalancer_id = azurerm_lb.web[each.key].id
interval_in_seconds = 30
protocol = "Http"
request_path = "/healthz"
port = 80
}
resource "azurerm_lb_rule" "web_http_v4" {
for_each = { for idx, name in local.regions : name => idx }
name = "web_http-v4"
loadbalancer_id = azurerm_lb.web[each.key].id
frontend_ip_configuration_name = "web-lb-v4"
probe_id = azurerm_lb_probe.web[each.key].id
backend_address_pool_ids = [azurerm_lb_backend_address_pool.web_v4[each.key].id]
enable_floating_ip = false
protocol = "Tcp"
frontend_port = 80
backend_port = 80
idle_timeout_in_minutes = 30
enable_tcp_reset = local.network_public[each.key].sku != "Basic"
}
resource "azurerm_lb_rule" "web_https_v4" {
for_each = { for idx, name in local.regions : name => idx }
name = "web-https-v4"
loadbalancer_id = azurerm_lb.web[each.key].id
frontend_ip_configuration_name = "web-lb-v4"
probe_id = azurerm_lb_probe.web[each.key].id
backend_address_pool_ids = [azurerm_lb_backend_address_pool.web_v4[each.key].id]
enable_floating_ip = false
protocol = "Tcp"
frontend_port = 443
backend_port = 443
idle_timeout_in_minutes = 30
enable_tcp_reset = local.network_public[each.key].sku != "Basic"
}
locals {
web_labels = "topology.kubernetes.io/zone=azure,project.io/node-pool=web"
}
resource "azurerm_linux_virtual_machine_scale_set" "web" {
for_each = { for idx, name in local.regions : name => idx }
location = each.key
instances = lookup(try(var.instances[each.key], {}), "web_count", 0)
name = "web-${lower(each.key)}"
computer_name_prefix = "web-${lower(each.key)}-"
resource_group_name = local.resource_group
sku = lookup(try(var.instances[each.key], {}), "web_instance_type", "Standard_B2s")
extensions_time_budget = "PT30M"
provision_vm_agent = false
# availability_set_id = var.instance_availability_set
network_interface {
name = "web-${lower(each.key)}"
primary = true
network_security_group_id = local.network_secgroup[each.key].web
ip_configuration {
name = "web-${lower(each.key)}-v4"
primary = true
version = "IPv4"
subnet_id = local.network_public[each.key].network_id
load_balancer_backend_address_pool_ids = [azurerm_lb_backend_address_pool.web_v4[each.key].id]
}
ip_configuration {
name = "web-${lower(each.key)}-v6"
version = "IPv6"
subnet_id = local.network_public[each.key].network_id
}
}
custom_data = base64encode(templatefile("${path.module}/templates/worker.yaml.tpl",
merge(var.kubernetes, {
lbv4 = local.network_public[each.key].controlplane_lb[0]
labels = "topology.kubernetes.io/region=${each.key},${local.web_labels}"
nodeSubnets = [local.network_public[each.key].cidr[0]]
})
))
admin_username = "talos"
admin_ssh_key {
username = "talos"
public_key = file("~/.ssh/terraform.pub")
}
source_image_id = data.azurerm_image.talos[each.key].id
os_disk {
caching = "ReadOnly"
storage_account_type = "StandardSSD_LRS"
disk_size_gb = 50
}
tags = merge(var.tags, { type = "web" })
boot_diagnostics {}
lifecycle {
ignore_changes = [admin_username, admin_ssh_key, os_disk, source_image_id, tags]
}
}
# resource "local_file" "web" {
# for_each = { for idx, name in local.regions : name => idx }
# content = templatefile("${path.module}/templates/worker.yaml.tpl",
# merge(var.kubernetes, {
# lbv4 = local.network_public[each.key].controlplane_lb[0]
# labels = "topology.kubernetes.io/region=${each.key},${local.web_labels}"
# nodeSubnets = [local.network_public[each.key].cidr[0]]
# })
# )
# filename = "_cfgs/web-${lower(each.key)}.yaml"
# file_permission = "0600"
# }

71
azure/instances-werker.tf Normal file
View File

@@ -0,0 +1,71 @@
# locals {
# worker_labels = "topology.kubernetes.io/zone=azure,project.io/node-pool=worker"
# }
# resource "azurerm_linux_virtual_machine_scale_set" "worker" {
# for_each = { for idx, name in local.regions : name => idx }
# location = each.key
# instances = lookup(try(var.instances[each.key], {}), "worker_count", 0)
# name = "worker-${lower(each.key)}"
# computer_name_prefix = "worker-${lower(each.key)}-"
# resource_group_name = local.resource_group
# sku = lookup(try(var.instances[each.key], {}), "worker_instance_type", "Standard_B2s")
# extensions_time_budget = "PT30M"
# provision_vm_agent = false
# # availability_set_id = var.instance_availability_set
# network_interface {
# name = "worker-${lower(each.key)}"
# primary = true
# ip_configuration {
# name = "worker-${lower(each.key)}-v4"
# primary = true
# version = "IPv4"
# subnet_id = local.network_private[each.key].network_id
# }
# ip_configuration {
# name = "worker-${lower(each.key)}-v6"
# version = "IPv6"
# subnet_id = local.network_private[each.key].network_id
# }
# }
# custom_data = base64encode(templatefile("${path.module}/templates/worker.yaml.tpl",
# merge(var.kubernetes, {
# lbv4 = local.network_public[each.key].controlplane_lb[0]
# labels = "topology.kubernetes.io/region=${each.key},${local.worker_labels}"
# nodeSubnets = [local.network_private[each.key].cidr[0]]
# })
# ))
# os_disk {
# caching = "ReadOnly"
# storage_account_type = "StandardSSD_LRS"
# disk_size_gb = 50
# }
# disable_password_authentication = false
# admin_password = "talos4PWD"
# admin_username = "talos"
# admin_ssh_key {
# username = "talos"
# public_key = file("~/.ssh/terraform.pub")
# }
# source_image_id = data.azurerm_image.talos[each.key].id
# # source_image_reference {
# # publisher = "Debian"
# # offer = "debian-11"
# # sku = "11-gen2"
# # version = "latest"
# # }
# tags = merge(var.tags, { type = "worker" })
# boot_diagnostics {}
# lifecycle {
# ignore_changes = [admin_username, admin_ssh_key, os_disk, source_image_id, tags]
# }
# }

View File

@@ -40,7 +40,7 @@ resource "azurerm_network_interface" "controlplane" {
private_ip_address = cidrhost(ip_configuration.value, var.instance_ip_start + count.index)
private_ip_address_version = length(split(".", ip_configuration.value)) > 1 ? "IPv4" : "IPv6"
private_ip_address_allocation = "Static"
public_ip_address_id = length(split(".", ip_configuration.value)) > 1 ? azurerm_public_ip.controlplane_v4[count.index].id : azurerm_public_ip.controlplane_v6[count.index].id
public_ip_address_id = length(split(".", ip_configuration.value)) > 1 ? azurerm_public_ip.controlplane_v4[count.index].id : try(azurerm_public_ip.controlplane_v6[count.index].id, "")
}
}
@@ -99,10 +99,13 @@ resource "azurerm_linux_virtual_machine" "controlplane" {
azurerm_public_ip.controlplane_v4[count.index].ip_address,
try(azurerm_public_ip.controlplane_v6[count.index].ip_address, ""),
])
nodeSubnets = var.network_internal.cidr
ipAliases = compact([var.instance_params["lbv4"], var.instance_params["lbv6"]])
nodeSubnets = [var.network_internal.cidr[0]]
})
))
# vtpm_enabled = false
# encryption_at_host_enabled = true
os_disk {
name = "controlplane-${lower(var.region)}-${1 + count.index}-boot"
caching = "ReadOnly"
@@ -150,7 +153,8 @@ resource "local_file" "controlplane" {
azurerm_public_ip.controlplane_v4[count.index].ip_address,
try(azurerm_public_ip.controlplane_v6[count.index].ip_address, ""),
])
nodeSubnets = var.network_internal.cidr
ipAliases = compact([var.instance_params["lbv4"], var.instance_params["lbv6"]])
nodeSubnets = [var.network_internal.cidr[0]]
})
)
filename = "_cfgs/controlplane-${lower(var.region)}-${1 + count.index}.yaml"

View File

@@ -38,6 +38,8 @@ output "network_private" {
output "secgroups" {
description = "List of secgroups"
value = { for zone, subnet in azurerm_subnet.private : zone => {
common = azurerm_network_security_group.common[zone].id
controlplane = azurerm_network_security_group.controlplane[zone].id
web = azurerm_network_security_group.web[zone].id
} }
}

View File

@@ -1,10 +1,4 @@
resource "azurerm_subnet_network_security_group_association" "public" {
for_each = { for idx, name in var.regions : name => idx }
subnet_id = azurerm_subnet.public[each.key].id
network_security_group_id = azurerm_network_security_group.common[each.key].id
}
resource "azurerm_subnet_network_security_group_association" "private" {
for_each = { for idx, name in var.regions : name => idx }
subnet_id = azurerm_subnet.private[each.key].id

View File

@@ -5,6 +5,21 @@ resource "azurerm_network_security_group" "controlplane" {
name = "controlplane-${each.key}"
resource_group_name = azurerm_resource_group.kubernetes.name
dynamic "security_rule" {
for_each = var.whitelist_admin
content {
name = "Icmp-${security_rule.key}"
priority = 1000 + security_rule.key
direction = "Inbound"
access = "Allow"
protocol = "Icmp"
source_port_range = "*"
source_address_prefix = security_rule.value
destination_port_range = "*"
destination_address_prefix = "*"
}
}
dynamic "security_rule" {
for_each = var.whitelist_admin
content {
@@ -15,7 +30,7 @@ resource "azurerm_network_security_group" "controlplane" {
protocol = "Tcp"
source_port_range = "*"
source_address_prefix = security_rule.value
destination_port_ranges = ["6443", "50000-50001"]
destination_port_ranges = ["6443", "50000-50001", "22"]
destination_address_prefix = "*"
}
}

View File

@@ -0,0 +1,39 @@
resource "azurerm_network_security_group" "web" {
for_each = { for idx, name in var.regions : name => idx }
location = each.key
name = "web-${each.key}"
resource_group_name = azurerm_resource_group.kubernetes.name
dynamic "security_rule" {
for_each = var.whitelist_admin
content {
name = "Icmp-${security_rule.key}"
priority = 1000 + security_rule.key
direction = "Inbound"
access = "Allow"
protocol = "Icmp"
source_port_range = "*"
source_address_prefix = security_rule.value
destination_port_range = "*"
destination_address_prefix = "*"
}
}
dynamic "security_rule" {
for_each = var.whitelist_admin
content {
name = "WhitelistAdmin-${security_rule.key}"
priority = 1500 + security_rule.key
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
source_address_prefix = security_rule.value
destination_port_ranges = ["80", "443", "22"]
destination_address_prefix = "*"
}
}
tags = merge(var.tags, { type = "infra" })
}

View File

@@ -63,7 +63,7 @@ variable "capabilities" {
default = {
"uksouth" = {
network_nat_enable = false,
network_lb_type = "Standard",
network_lb_type = "Basic",
network_gw_enable = true,
network_gw_type = "Standard_B1s",

View File

@@ -17,6 +17,9 @@ machine:
network:
hostname: "${name}"
interfaces:
- interface: eth0
dhcp: true
addresses: ${format("%#v",ipAliases)}
- interface: dummy0
addresses:
- 169.254.2.53/32
@@ -58,3 +61,5 @@ cluster:
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/azure/deployments/metrics-server.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/azure/deployments/local-path-storage.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/azure/deployments/coredns-local.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/azure/deployments/ingress-ns.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/azure/deployments/ingress-result.yaml

View File

@@ -0,0 +1,48 @@
version: v1alpha1
debug: false
persist: true
machine:
type: worker
token: ${tokenMachine}
ca:
crt: ${caMachine}
kubelet:
extraArgs:
cloud-provider: external
node-labels: "${labels}"
rotate-server-certificates: true
nodeIP:
validSubnets: ${format("%#v",nodeSubnets)}
clusterDNS:
- 169.254.2.53
- fd00::169:254:2:53
- ${cidrhost(split(",",serviceSubnets)[0], 10)}
network:
interfaces:
- interface: dummy0
addresses:
- 169.254.2.53/32
- fd00::169:254:2:53/128
extraHostEntries:
- ip: ${lbv4}
aliases:
- ${apiDomain}
sysctls:
net.core.somaxconn: 65535
net.core.netdev_max_backlog: 4096
install:
wipe: false
cluster:
id: ${clusterID}
secret: ${clusterSecret}
controlPlane:
endpoint: https://${lbv4}:6443
clusterName: ${clusterName}
network:
dnsDomain: ${domain}
serviceSubnets: ${format("%#v",split(",",serviceSubnets))}
proxy:
disabled: true
token: ${token}
ca:
crt: ${ca}

View File

@@ -30,7 +30,7 @@ variable "controlplane" {
default = {
"uksouth" = {
count = 0,
instance_type = "Standard_D2as_v4",
instance_type = "Standard_B2s",
},
"ukwest" = {
count = 0,
@@ -56,3 +56,22 @@ variable "kubernetes" {
}
sensitive = true
}
variable "instances" {
description = "Map of region's properties"
type = map(any)
default = {
"uksouth" = {
web_count = 0,
web_instance_type = "Standard_B2s",
worker_count = 0,
worker_instance_type = "Standard_B4ms", # B4ms E2as_v4
},
"ukwest" = {
web_count = 0,
web_instance_type = "Standard_B2s",
worker_count = 0,
worker_instance_type = "Standard_B4ms", # B4ms E2as_v4
},
}
}

View File

@@ -37,6 +37,8 @@ machine:
install:
wipe: false
cluster:
id: ${clusterID}
secret: ${clusterSecret}
controlPlane:
endpoint: https://${lbv4}:6443
clusterName: ${clusterName}