mirror of
https://github.com/optim-enterprises-bv/terraform-talos.git
synced 2025-10-29 17:42:47 +00:00
labels, nated nodes, lb
This commit is contained in:
@@ -176,9 +176,9 @@ spec:
|
||||
tolerations:
|
||||
- key: "node.cloudprovider.kubernetes.io/uninitialized"
|
||||
value: "true"
|
||||
effect: "NoSchedule"
|
||||
effect: NoSchedule
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
operator: Exists
|
||||
- key: "node-role.kubernetes.io/master"
|
||||
effect: NoSchedule
|
||||
affinity:
|
||||
@@ -223,7 +223,7 @@ spec:
|
||||
memory: 32Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 12Mi
|
||||
memory: 16Mi
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
@@ -245,46 +245,6 @@ spec:
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
---
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: runtime/default
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: runtime/default
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubelet-serving-cert-approver
|
||||
app.kubernetes.io/name: kubelet-serving-cert-approver
|
||||
name: kubelet-serving-cert-approver
|
||||
namespace: kubelet-serving-cert-approver
|
||||
spec:
|
||||
allowPrivilegeEscalation: false
|
||||
forbiddenSysctls:
|
||||
- '*'
|
||||
fsGroup:
|
||||
ranges:
|
||||
- max: 65534
|
||||
min: 65534
|
||||
rule: MustRunAs
|
||||
hostIPC: false
|
||||
hostNetwork: false
|
||||
hostPID: false
|
||||
privileged: false
|
||||
readOnlyRootFilesystem: true
|
||||
requiredDropCapabilities:
|
||||
- ALL
|
||||
runAsUser:
|
||||
ranges:
|
||||
- max: 65534
|
||||
min: 65534
|
||||
rule: MustRunAs
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
ranges:
|
||||
- max: 65534
|
||||
min: 65534
|
||||
rule: MustRunAs
|
||||
volumes:
|
||||
- downwardAPI
|
||||
- secret
|
||||
- effect: NoSchedule
|
||||
key: node.cloudprovider.kubernetes.io/uninitialized
|
||||
operator: Exists
|
||||
|
||||
@@ -127,6 +127,9 @@ spec:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
spec:
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
node-role.kubernetes.io/master: ""
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
@@ -172,8 +175,6 @@ spec:
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp-dir
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
priorityClassName: system-cluster-critical
|
||||
serviceAccountName: metrics-server
|
||||
volumes:
|
||||
|
||||
@@ -145,6 +145,7 @@ spec:
|
||||
- --cloud-provider=scaleway
|
||||
- --leader-elect=true
|
||||
- --allow-untagged-cloud
|
||||
- --controllers=cloud-node,cloud-node-lifecycle
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
|
||||
@@ -26,7 +26,6 @@ resource "scaleway_instance_server" "controlplane" {
|
||||
cloud-init = templatefile("${path.module}/templates/controlplane.yaml",
|
||||
merge(var.kubernetes, {
|
||||
name = "master-${count.index + 1}"
|
||||
type = "controlplane"
|
||||
ipv4_vip = local.ipv4_vip
|
||||
ipv4_local = cidrhost(local.main_subnet, 11 + count.index)
|
||||
lbv4 = local.lbv4
|
||||
@@ -34,6 +33,7 @@ resource "scaleway_instance_server" "controlplane" {
|
||||
labels = "${local.controlplane_labels},node.kubernetes.io/instance-type=${lookup(var.controlplane, "type", "DEV1-M")}"
|
||||
access = var.scaleway_access
|
||||
secret = var.scaleway_secret
|
||||
region = "fr-par"
|
||||
project_id = var.scaleway_project_id
|
||||
})
|
||||
)
|
||||
@@ -48,6 +48,13 @@ resource "scaleway_instance_server" "controlplane" {
|
||||
}
|
||||
}
|
||||
|
||||
resource "scaleway_vpc_public_gateway_dhcp_reservation" "controlplane" {
|
||||
count = lookup(var.controlplane, "count", 0)
|
||||
gateway_network_id = scaleway_vpc_gateway_network.main.id
|
||||
mac_address = scaleway_instance_server.controlplane[count.index].private_network.0.mac_address
|
||||
ip_address = cidrhost(local.main_subnet, 11 + count.index)
|
||||
}
|
||||
|
||||
resource "scaleway_instance_placement_group" "controlplane" {
|
||||
name = "controlplane"
|
||||
policy_type = "max_availability"
|
||||
|
||||
@@ -1,29 +1,33 @@
|
||||
|
||||
# FIXME: does not work without enable_dynamic_ip
|
||||
locals {
|
||||
web_labels = "topology.kubernetes.io/region=fr-par,topology.kubernetes.io/zone=${var.regions[0]},project.io/node-pool=web"
|
||||
}
|
||||
|
||||
resource "scaleway_instance_server" "web" {
|
||||
count = lookup(var.instances, "web_count", 0)
|
||||
name = "web-${count.index + 1}"
|
||||
image = data.scaleway_instance_image.talos.id
|
||||
type = lookup(var.instances, "web_instance_type", "DEV1-M")
|
||||
enable_ipv6 = true
|
||||
enable_dynamic_ip = false
|
||||
security_group_id = scaleway_instance_security_group.web.id
|
||||
tags = concat(var.tags, ["web"])
|
||||
count = lookup(var.instances, "web_count", 0)
|
||||
name = "web-${count.index + 1}"
|
||||
image = data.scaleway_instance_image.talos.id
|
||||
type = lookup(var.instances, "web_type", "DEV1-M")
|
||||
enable_ipv6 = true
|
||||
enable_dynamic_ip = false
|
||||
security_group_id = scaleway_instance_security_group.web.id
|
||||
placement_group_id = scaleway_instance_placement_group.web.id
|
||||
tags = concat(var.tags, ["web"])
|
||||
|
||||
private_network {
|
||||
pn_id = scaleway_vpc_private_network.main.id
|
||||
}
|
||||
|
||||
user_data = {
|
||||
cloud-init = templatefile("${path.module}/templates/web.yaml.tpl",
|
||||
cloud-init = templatefile("${path.module}/templates/worker.yaml.tpl",
|
||||
merge(var.kubernetes, {
|
||||
name = "web-${count.index + 1}"
|
||||
type = "worker"
|
||||
ipv4_vip = local.ipv4_vip
|
||||
ipv4 = cidrhost(local.main_subnet, 21 + count.index)
|
||||
ipv4_gw = cidrhost(local.main_subnet, 1)
|
||||
clusterDns = cidrhost(split(",", var.kubernetes["serviceSubnets"])[0], 10)
|
||||
nodeSubnets = local.main_subnet
|
||||
labels = "topology.kubernetes.io/region=fr-par"
|
||||
labels = "${local.web_labels},node.kubernetes.io/instance-type=${lookup(var.instances, "web_type", "DEV1-M")}"
|
||||
})
|
||||
)
|
||||
}
|
||||
@@ -36,3 +40,16 @@ resource "scaleway_instance_server" "web" {
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "scaleway_instance_placement_group" "web" {
|
||||
name = "web"
|
||||
policy_type = "max_availability"
|
||||
policy_mode = "enforced"
|
||||
}
|
||||
|
||||
resource "scaleway_vpc_public_gateway_dhcp_reservation" "web" {
|
||||
count = lookup(var.instances, "web_count", 0)
|
||||
gateway_network_id = scaleway_vpc_gateway_network.main.id
|
||||
mac_address = scaleway_instance_server.web[count.index].private_network.0.mac_address
|
||||
ip_address = cidrhost(local.main_subnet, 21 + count.index)
|
||||
}
|
||||
|
||||
48
scaleway/instances-worker.tf
Normal file
48
scaleway/instances-worker.tf
Normal file
@@ -0,0 +1,48 @@
|
||||
|
||||
locals {
|
||||
worker_labels = "topology.kubernetes.io/region=fr-par,topology.kubernetes.io/zone=${var.regions[0]},project.io/node-pool=worker"
|
||||
}
|
||||
|
||||
resource "scaleway_instance_server" "worker" {
|
||||
count = lookup(var.instances, "worker_count", 0)
|
||||
name = "worker-${count.index + 1}"
|
||||
image = data.scaleway_instance_image.talos.id
|
||||
type = lookup(var.instances, "worker_type", "DEV1-M")
|
||||
enable_ipv6 = true
|
||||
enable_dynamic_ip = false
|
||||
security_group_id = scaleway_instance_security_group.worker.id
|
||||
tags = concat(var.tags, ["worker"])
|
||||
|
||||
private_network {
|
||||
pn_id = scaleway_vpc_private_network.main.id
|
||||
}
|
||||
|
||||
user_data = {
|
||||
cloud-init = templatefile("${path.module}/templates/worker.yaml.tpl",
|
||||
merge(var.kubernetes, {
|
||||
name = "worker-${count.index + 1}"
|
||||
ipv4_vip = local.ipv4_vip
|
||||
ipv4 = cidrhost(local.main_subnet, 31 + count.index)
|
||||
ipv4_gw = cidrhost(local.main_subnet, 1)
|
||||
clusterDns = cidrhost(split(",", var.kubernetes["serviceSubnets"])[0], 10)
|
||||
nodeSubnets = local.main_subnet
|
||||
labels = "${local.worker_labels},node.kubernetes.io/instance-type=${lookup(var.instances, "worker_type", "DEV1-M")}"
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
image,
|
||||
type,
|
||||
user_data,
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "scaleway_vpc_public_gateway_dhcp_reservation" "worker" {
|
||||
count = lookup(var.instances, "worker_count", 0)
|
||||
gateway_network_id = scaleway_vpc_gateway_network.main.id
|
||||
mac_address = scaleway_instance_server.worker[count.index].private_network.0.mac_address
|
||||
ip_address = cidrhost(local.main_subnet, 31 + count.index)
|
||||
}
|
||||
@@ -29,12 +29,13 @@ resource "scaleway_lb_backend" "api" {
|
||||
name = "api"
|
||||
forward_protocol = "tcp"
|
||||
forward_port = "6443"
|
||||
server_ips = scaleway_instance_server.controlplane[*].private_ip
|
||||
server_ips = [for k in range(0, lookup(var.controlplane, "count", 0)) : cidrhost(local.main_subnet, 11 + k)]
|
||||
|
||||
health_check_timeout = "5s"
|
||||
health_check_delay = "30s"
|
||||
health_check_https {
|
||||
uri = "/readyz"
|
||||
uri = "/readyz"
|
||||
code = 401
|
||||
}
|
||||
}
|
||||
|
||||
@@ -55,3 +56,96 @@ resource "scaleway_lb_frontend" "api" {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "scaleway_lb_backend" "web" {
|
||||
count = local.lb_enable ? 1 : 0
|
||||
lb_id = scaleway_lb.lb[0].id
|
||||
name = "web"
|
||||
forward_protocol = "tcp"
|
||||
forward_port = "80"
|
||||
server_ips = [for k in range(0, lookup(var.instances, "web_count", 0)) : cidrhost(local.main_subnet, 21 + k)]
|
||||
|
||||
health_check_timeout = "5s"
|
||||
health_check_delay = "30s"
|
||||
health_check_http {
|
||||
uri = "/healthz"
|
||||
}
|
||||
}
|
||||
|
||||
resource "scaleway_lb_backend" "web_https" {
|
||||
count = local.lb_enable ? 1 : 0
|
||||
lb_id = scaleway_lb.lb[0].id
|
||||
name = "web"
|
||||
forward_protocol = "tcp"
|
||||
forward_port = "443"
|
||||
server_ips = [for k in range(0, lookup(var.instances, "web_count", 0)) : cidrhost(local.main_subnet, 21 + k)]
|
||||
|
||||
health_check_timeout = "5s"
|
||||
health_check_delay = "30s"
|
||||
health_check_https {
|
||||
uri = "/healthz"
|
||||
}
|
||||
}
|
||||
|
||||
resource "scaleway_lb_frontend" "http" {
|
||||
count = local.lb_enable ? 1 : 0
|
||||
lb_id = scaleway_lb.lb[0].id
|
||||
backend_id = scaleway_lb_backend.web[0].id
|
||||
name = "http"
|
||||
inbound_port = "80"
|
||||
|
||||
acl {
|
||||
name = "Allow controlplane IPs"
|
||||
action {
|
||||
type = "allow"
|
||||
}
|
||||
match {
|
||||
ip_subnet = try(scaleway_instance_ip.controlplane[*].address, "0.0.0.0/0")
|
||||
}
|
||||
}
|
||||
acl {
|
||||
name = "Allow whitlist IPs"
|
||||
action {
|
||||
type = "allow"
|
||||
}
|
||||
match {
|
||||
ip_subnet = concat(var.whitelist_web, var.whitelist_admins)
|
||||
}
|
||||
}
|
||||
acl {
|
||||
name = "Deny all"
|
||||
action {
|
||||
type = "deny"
|
||||
}
|
||||
match {
|
||||
ip_subnet = ["0.0.0.0/0"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "scaleway_lb_frontend" "https" {
|
||||
count = local.lb_enable ? 1 : 0
|
||||
lb_id = scaleway_lb.lb[0].id
|
||||
backend_id = scaleway_lb_backend.web_https[0].id
|
||||
name = "https"
|
||||
inbound_port = "443"
|
||||
|
||||
acl {
|
||||
name = "Allow whitlist IPs"
|
||||
action {
|
||||
type = "allow"
|
||||
}
|
||||
match {
|
||||
ip_subnet = concat(var.whitelist_web, var.whitelist_admins)
|
||||
}
|
||||
}
|
||||
acl {
|
||||
name = "Deny all"
|
||||
action {
|
||||
type = "deny"
|
||||
}
|
||||
match {
|
||||
ip_subnet = ["0.0.0.0/0"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,37 +4,47 @@ resource "scaleway_instance_security_group" "controlplane" {
|
||||
inbound_default_policy = "drop"
|
||||
outbound_default_policy = "accept"
|
||||
|
||||
dynamic "inbound_rule" {
|
||||
for_each = ["50000", "6443", "2379", "2380"]
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
protocol = "TCP"
|
||||
port = inbound_rule.value
|
||||
}
|
||||
inbound_rule {
|
||||
action = "accept"
|
||||
protocol = "ANY"
|
||||
ip_range = local.main_subnet
|
||||
}
|
||||
|
||||
dynamic "inbound_rule" {
|
||||
for_each = ["50000", "6443"]
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
protocol = "TCP"
|
||||
port = inbound_rule.value
|
||||
ip_range = "::/0"
|
||||
}
|
||||
}
|
||||
|
||||
inbound_rule {
|
||||
action = "accept"
|
||||
protocol = "TCP"
|
||||
port = 4240
|
||||
ip_range = "::/0"
|
||||
}
|
||||
inbound_rule {
|
||||
action = "accept"
|
||||
protocol = "ANY"
|
||||
ip_range = local.main_subnet
|
||||
|
||||
dynamic "inbound_rule" {
|
||||
for_each = var.whitelist_admins
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
protocol = "TCP"
|
||||
port = "6443"
|
||||
ip_range = length(split("/", inbound_rule.value)) == 2 ? inbound_rule.value : "${inbound_rule.value}/32"
|
||||
}
|
||||
}
|
||||
dynamic "inbound_rule" {
|
||||
for_each = var.whitelist_admins
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
protocol = "TCP"
|
||||
port = "50000"
|
||||
ip_range = length(split("/", inbound_rule.value)) == 2 ? inbound_rule.value : "${inbound_rule.value}/32"
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "inbound_rule" {
|
||||
for_each = ["2379", "2380"]
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
protocol = "TCP"
|
||||
port = inbound_rule.value
|
||||
}
|
||||
}
|
||||
|
||||
# KubeSpan
|
||||
@@ -72,12 +82,6 @@ resource "scaleway_instance_security_group" "web" {
|
||||
}
|
||||
}
|
||||
|
||||
inbound_rule {
|
||||
action = "accept"
|
||||
protocol = "TCP"
|
||||
port = 4240
|
||||
ip_range = "::/0"
|
||||
}
|
||||
inbound_rule {
|
||||
action = "accept"
|
||||
protocol = "ANY"
|
||||
@@ -97,6 +101,12 @@ resource "scaleway_instance_security_group" "web" {
|
||||
ip_range = "::/0"
|
||||
}
|
||||
|
||||
inbound_rule {
|
||||
action = "accept"
|
||||
protocol = "TCP"
|
||||
port = 4240
|
||||
ip_range = "::/0"
|
||||
}
|
||||
inbound_rule {
|
||||
action = "accept"
|
||||
protocol = "ICMP"
|
||||
@@ -127,4 +137,16 @@ resource "scaleway_instance_security_group" "worker" {
|
||||
port = 51820
|
||||
ip_range = "::/0"
|
||||
}
|
||||
|
||||
inbound_rule {
|
||||
action = "accept"
|
||||
protocol = "TCP"
|
||||
port = 4240
|
||||
ip_range = "::/0"
|
||||
}
|
||||
inbound_rule {
|
||||
action = "accept"
|
||||
protocol = "ICMP"
|
||||
ip_range = "::/0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,12 +2,11 @@ version: v1alpha1
|
||||
debug: false
|
||||
persist: true
|
||||
machine:
|
||||
type: ${type}
|
||||
type: controlplane
|
||||
certSANs:
|
||||
- "${lbv4}"
|
||||
- "${ipv4}"
|
||||
- "${ipv4_local}"
|
||||
- "${ipv4_vip}"
|
||||
- "${apiDomain}"
|
||||
kubelet:
|
||||
extraArgs:
|
||||
node-ip: "${ipv4_local}"
|
||||
@@ -62,7 +61,7 @@ cluster:
|
||||
id: ${clusterID}
|
||||
secret: ${clusterSecret}
|
||||
controlPlane:
|
||||
endpoint: https://${ipv4_vip}:6443
|
||||
endpoint: https://${apiDomain}:6443
|
||||
clusterName: ${clusterName}
|
||||
discovery:
|
||||
enabled: true
|
||||
@@ -83,6 +82,7 @@ cluster:
|
||||
certSANs:
|
||||
- "${lbv4}"
|
||||
- "${ipv4}"
|
||||
- "${apiDomain}"
|
||||
controllerManager:
|
||||
extraArgs:
|
||||
node-cidr-mask-size-ipv4: 24
|
||||
@@ -103,7 +103,7 @@ cluster:
|
||||
SCW_ACCESS_KEY: ${base64encode(access)}
|
||||
SCW_SECRET_KEY: ${base64encode(secret)}
|
||||
SCW_DEFAULT_PROJECT_ID: ${base64encode(project_id)}
|
||||
SCW_DEFAULT_REGION: ${base64encode("fr-par")}
|
||||
SCW_DEFAULT_REGION: ${base64encode(region)}
|
||||
externalCloudProvider:
|
||||
enabled: true
|
||||
manifests:
|
||||
|
||||
@@ -18,6 +18,7 @@ machine:
|
||||
nodeIP:
|
||||
validSubnets: ${format("%#v",split(",",nodeSubnets))}
|
||||
network:
|
||||
hostname: "${name}"
|
||||
interfaces:
|
||||
- interface: eth0
|
||||
dhcp: true
|
||||
@@ -27,7 +28,12 @@ machine:
|
||||
- network: 169.254.42.42/32
|
||||
metric: 1024
|
||||
- interface: eth1
|
||||
dhcp: true
|
||||
addresses:
|
||||
- ${ipv4}/24
|
||||
routes:
|
||||
- network: 0.0.0.0/0
|
||||
gateway: ${ipv4_gw}
|
||||
metric: 512
|
||||
- interface: dummy0
|
||||
addresses:
|
||||
- 169.254.2.53/32
|
||||
@@ -53,7 +59,7 @@ cluster:
|
||||
id: ${clusterID}
|
||||
secret: ${clusterSecret}
|
||||
controlPlane:
|
||||
endpoint: https://${ipv4_vip}:6443
|
||||
endpoint: https://${apiDomain}:6443
|
||||
clusterName: ${clusterName}
|
||||
discovery:
|
||||
enabled: false
|
||||
@@ -61,10 +61,10 @@ variable "instances" {
|
||||
description = "Map of instance properties"
|
||||
type = map(any)
|
||||
default = {
|
||||
web_count = 0,
|
||||
web_instance_type = "DEV1-L",
|
||||
worker_count = 0,
|
||||
worker_instance_type = "DEV1-L",
|
||||
web_count = 0,
|
||||
web_type = "DEV1-L",
|
||||
worker_count = 0,
|
||||
worker_type = "DEV1-L",
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user