Use local vip address

This commit is contained in:
Serge Logvinov
2021-08-18 17:29:35 +03:00
parent 3385750a01
commit 020159446e
9 changed files with 90 additions and 27 deletions

View File

@@ -1,6 +1,6 @@
---
k8sServiceHost: "172.16.0.5"
k8sServiceHost: "172.16.0.10"
k8sServicePort: "6443"
agent:

View File

@@ -540,7 +540,7 @@ spec:
name: cilium-config
optional: true
- name: KUBERNETES_SERVICE_HOST
value: "172.16.0.5"
value: "172.16.0.10"
- name: KUBERNETES_SERVICE_PORT
value: "6443"
image: "quay.io/cilium/cilium:v1.10.3@sha256:8419531c5d3677158802882bdfe2297915c43f2ebe3649551aaac22de9f6d565"
@@ -627,7 +627,7 @@ spec:
name: cilium-config
optional: true
- name: KUBERNETES_SERVICE_HOST
value: "172.16.0.5"
value: "172.16.0.10"
- name: KUBERNETES_SERVICE_PORT
value: "6443"
image: "quay.io/cilium/cilium:v1.10.3@sha256:8419531c5d3677158802882bdfe2297915c43f2ebe3649551aaac22de9f6d565"
@@ -783,7 +783,7 @@ spec:
name: cilium-config
optional: true
- name: KUBERNETES_SERVICE_HOST
value: "172.16.0.5"
value: "172.16.0.10"
- name: KUBERNETES_SERVICE_PORT
value: "6443"
image: "quay.io/cilium/operator-generic:v1.10.3@sha256:337ebf27eae4fbad51cc4baf9110b3ec6753320dd33075bc136e2a1865be5eb5"

View File

@@ -19,6 +19,7 @@ resource "hcloud_server" "controlplane" {
merge(var.kubernetes, {
name = "master-${count.index + 1}"
type = count.index == 0 ? "init" : "controlplane"
ipv4_vip = cidrhost(hcloud_network_subnet.core.ip_range, 10)
ipv4_local = cidrhost(hcloud_network_subnet.core.ip_range, 11 + count.index)
lbv4_local = hcloud_load_balancer_network.api.ip
lbv4 = hcloud_load_balancer.api.ipv4
@@ -38,6 +39,17 @@ resource "hcloud_server" "controlplane" {
}
}
#
# Local floating ip
#
resource "hcloud_server_network" "controlplane" {
count = lookup(var.controlplane, "count", 0) > 0 ? 1 : 0
server_id = hcloud_server.controlplane[0].id
subnet_id = hcloud_network_subnet.core.id
ip = cidrhost(hcloud_network_subnet.core.ip_range, 11)
alias_ips = [cidrhost(hcloud_network_subnet.core.ip_range, 10)]
}
resource "hcloud_load_balancer_target" "api" {
count = lookup(var.controlplane, "count", 0)
type = "server"

View File

@@ -16,6 +16,7 @@ module "web" {
vm_security_group = [hcloud_firewall.web.id]
vm_params = merge(var.kubernetes, {
lbv4 = hcloud_load_balancer_network.api.ip
lbv4 = hcloud_load_balancer_network.api.ip
labels = "node.kubernetes.io/role=web"
})
}

View File

@@ -16,6 +16,7 @@ module "worker" {
vm_security_group = [hcloud_firewall.worker.id]
vm_params = merge(var.kubernetes, {
lbv4 = hcloud_load_balancer_network.api.ip
lbv4 = hcloud_load_balancer_network.api.ip
labels = "node.kubernetes.io/role=worker"
})
}

View File

@@ -10,6 +10,7 @@ machine:
node-ip: "${ipv4}"
cloud-provider: external
rotate-server-certificates: true
node-labels: "${labels}"
network:
hostname: "${name}"
interfaces:

View File

@@ -32,18 +32,43 @@ resource "hcloud_load_balancer_service" "api" {
}
}
resource "hcloud_load_balancer_service" "talos" {
load_balancer_id = hcloud_load_balancer.api.id
protocol = "tcp"
listen_port = 50000
destination_port = 50000
proxyprotocol = false
# resource "hcloud_load_balancer_service" "talos" {
# load_balancer_id = hcloud_load_balancer.api.id
# protocol = "tcp"
# listen_port = 50000
# destination_port = 50000
# proxyprotocol = false
health_check {
protocol = "tcp"
port = 50000
interval = 30
timeout = 5
retries = 3
}
}
# health_check {
# protocol = "tcp"
# port = 50000
# interval = 30
# timeout = 5
# retries = 3
# }
# }
# resource "hcloud_load_balancer_service" "https" {
# load_balancer_id = hcloud_load_balancer.api.id
# protocol = "tcp"
# listen_port = 443
# destination_port = 443
# proxyprotocol = false
# health_check {
# protocol = "http"
# port = 80
# interval = 30
# timeout = 5
# retries = 3
# http {
# path = "/healthz"
# }
# }
# }
# resource "hcloud_load_balancer_target" "https" {
# type = "label_selector"
# load_balancer_id = hcloud_load_balancer.api.id
# label_selector = "label=web"
# }

View File

@@ -31,8 +31,7 @@ resource "hcloud_firewall" "controlplane" {
direction = "in"
protocol = "tcp"
port = "50000"
source_ips = ["0.0.0.0/0", "::/0"]
# source_ips = var.whitelist_admins
source_ips = concat(var.whitelist_admins, [var.vpc_main_cidr])
}
rule {
direction = "in"
@@ -54,6 +53,14 @@ resource "hcloud_firewall" "controlplane" {
source_ips = ["0.0.0.0/0", "::/0"]
# source_ips = var.whitelist_admins
}
# cilium health
rule {
direction = "in"
protocol = "tcp"
port = "4240"
source_ips = ["::/0"]
}
}
resource "hcloud_firewall" "web" {
@@ -90,6 +97,14 @@ resource "hcloud_firewall" "web" {
port = "443"
source_ips = var.whitelist_web
}
# cilium health
rule {
direction = "in"
protocol = "tcp"
port = "4240"
source_ips = ["::/0"]
}
}
resource "hcloud_firewall" "worker" {
@@ -113,4 +128,12 @@ resource "hcloud_firewall" "worker" {
port = "any"
source_ips = [var.vpc_main_cidr]
}
# cilium health
rule {
direction = "in"
protocol = "tcp"
port = "4240"
source_ips = ["::/0"]
}
}

View File

@@ -8,6 +8,7 @@ machine:
- "${lbv6}"
- "${lbv4_local}"
- "${ipv4_local}"
- "${ipv4_vip}"
kubelet:
extraArgs:
node-ip: "${ipv4_local}"
@@ -17,16 +18,14 @@ machine:
interfaces:
- interface: eth1
dhcp: true
addresses:
- ${ipv4_vip}
- interface: dummy0
addresses:
- 169.254.2.53/32
- fd00::169:254:2:53/128
install:
disk: /dev/sda
bootloader: true
wipe: false
extraKernelArgs:
- elevator=noop
sysctls:
net.core.somaxconn: 65535
net.core.netdev_max_backlog: 4096
@@ -38,7 +37,7 @@ machine:
slot: 0
cluster:
controlPlane:
endpoint: https://${lbv4}:6443
endpoint: https://${ipv4_vip}:6443
network:
dnsDomain: ${domain}
podSubnets: ${format("[%s]",podSubnets)}
@@ -56,6 +55,7 @@ cluster:
- "${lbv6}"
- "${lbv4_local}"
- "${ipv4_local}"
- "${ipv4_vip}"
extraArgs:
feature-gates: IPv6DualStack=true
controllerManager: