mirror of
https://github.com/optim-enterprises-bv/terraform-talos.git
synced 2025-10-29 01:22:29 +00:00
routing fixes
This commit is contained in:
@@ -1,6 +1,6 @@
|
|||||||
|
|
||||||
ENDPOINT := ${shell terraform output -raw controlplane_endpoint_public 2>/dev/null}
|
ENDPOINT ?= $(shell terraform output -no-color -raw controlplane_endpoint_public 2>/dev/null)
|
||||||
ifeq ($(ENDPOINT),)
|
ifneq (,$(findstring Warning,${ENDPOINT}))
|
||||||
ENDPOINT := 127.0.0.1
|
ENDPOINT := 127.0.0.1
|
||||||
endif
|
endif
|
||||||
|
|
||||||
@@ -33,7 +33,7 @@ create-templates:
|
|||||||
@yq eval -o=json '{"kubernetes": .}' _cfgs/tfstate.vars > terraform.tfvars.json
|
@yq eval -o=json '{"kubernetes": .}' _cfgs/tfstate.vars > terraform.tfvars.json
|
||||||
|
|
||||||
create-deployments:
|
create-deployments:
|
||||||
helm template --namespace=kube-system --version=1.12.6 -f deployments/cilium.yaml cilium \
|
helm template --namespace=kube-system --version=1.12.7 -f deployments/cilium.yaml cilium \
|
||||||
cilium/cilium > deployments/cilium-result.yaml
|
cilium/cilium > deployments/cilium-result.yaml
|
||||||
helm template --namespace=kube-system -f deployments/azure-autoscaler.yaml cluster-autoscaler-azure \
|
helm template --namespace=kube-system -f deployments/azure-autoscaler.yaml cluster-autoscaler-azure \
|
||||||
autoscaler/cluster-autoscaler > deployments/azure-autoscaler-result.yaml
|
autoscaler/cluster-autoscaler > deployments/azure-autoscaler-result.yaml
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ metadata:
|
|||||||
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
||||||
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
||||||
app.kubernetes.io/managed-by: "Helm"
|
app.kubernetes.io/managed-by: "Helm"
|
||||||
helm.sh/chart: "cluster-autoscaler-9.23.0"
|
helm.sh/chart: "cluster-autoscaler-9.24.0"
|
||||||
name: cluster-autoscaler-azure
|
name: cluster-autoscaler-azure
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
spec:
|
spec:
|
||||||
@@ -26,7 +26,7 @@ metadata:
|
|||||||
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
||||||
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
||||||
app.kubernetes.io/managed-by: "Helm"
|
app.kubernetes.io/managed-by: "Helm"
|
||||||
helm.sh/chart: "cluster-autoscaler-9.23.0"
|
helm.sh/chart: "cluster-autoscaler-9.24.0"
|
||||||
name: cluster-autoscaler-azure
|
name: cluster-autoscaler-azure
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
automountServiceAccountToken: true
|
automountServiceAccountToken: true
|
||||||
@@ -39,7 +39,7 @@ metadata:
|
|||||||
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
||||||
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
||||||
app.kubernetes.io/managed-by: "Helm"
|
app.kubernetes.io/managed-by: "Helm"
|
||||||
helm.sh/chart: "cluster-autoscaler-9.23.0"
|
helm.sh/chart: "cluster-autoscaler-9.24.0"
|
||||||
name: cluster-autoscaler-azure
|
name: cluster-autoscaler-azure
|
||||||
rules:
|
rules:
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
@@ -180,7 +180,7 @@ metadata:
|
|||||||
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
||||||
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
||||||
app.kubernetes.io/managed-by: "Helm"
|
app.kubernetes.io/managed-by: "Helm"
|
||||||
helm.sh/chart: "cluster-autoscaler-9.23.0"
|
helm.sh/chart: "cluster-autoscaler-9.24.0"
|
||||||
name: cluster-autoscaler-azure
|
name: cluster-autoscaler-azure
|
||||||
roleRef:
|
roleRef:
|
||||||
apiGroup: rbac.authorization.k8s.io
|
apiGroup: rbac.authorization.k8s.io
|
||||||
@@ -199,7 +199,7 @@ metadata:
|
|||||||
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
||||||
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
||||||
app.kubernetes.io/managed-by: "Helm"
|
app.kubernetes.io/managed-by: "Helm"
|
||||||
helm.sh/chart: "cluster-autoscaler-9.23.0"
|
helm.sh/chart: "cluster-autoscaler-9.24.0"
|
||||||
name: cluster-autoscaler-azure
|
name: cluster-autoscaler-azure
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
rules:
|
rules:
|
||||||
@@ -228,7 +228,7 @@ metadata:
|
|||||||
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
||||||
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
||||||
app.kubernetes.io/managed-by: "Helm"
|
app.kubernetes.io/managed-by: "Helm"
|
||||||
helm.sh/chart: "cluster-autoscaler-9.23.0"
|
helm.sh/chart: "cluster-autoscaler-9.24.0"
|
||||||
name: cluster-autoscaler-azure
|
name: cluster-autoscaler-azure
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
roleRef:
|
roleRef:
|
||||||
@@ -248,7 +248,7 @@ metadata:
|
|||||||
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
||||||
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
||||||
app.kubernetes.io/managed-by: "Helm"
|
app.kubernetes.io/managed-by: "Helm"
|
||||||
helm.sh/chart: "cluster-autoscaler-9.23.0"
|
helm.sh/chart: "cluster-autoscaler-9.24.0"
|
||||||
name: cluster-autoscaler-azure
|
name: cluster-autoscaler-azure
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
spec:
|
spec:
|
||||||
@@ -272,7 +272,7 @@ metadata:
|
|||||||
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
||||||
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
||||||
app.kubernetes.io/managed-by: "Helm"
|
app.kubernetes.io/managed-by: "Helm"
|
||||||
helm.sh/chart: "cluster-autoscaler-9.23.0"
|
helm.sh/chart: "cluster-autoscaler-9.24.0"
|
||||||
name: cluster-autoscaler-azure
|
name: cluster-autoscaler-azure
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
spec:
|
spec:
|
||||||
@@ -291,7 +291,7 @@ spec:
|
|||||||
dnsPolicy: "ClusterFirst"
|
dnsPolicy: "ClusterFirst"
|
||||||
containers:
|
containers:
|
||||||
- name: azure-cluster-autoscaler
|
- name: azure-cluster-autoscaler
|
||||||
image: "k8s.gcr.io/autoscaling/cluster-autoscaler:v1.26.1"
|
image: "registry.k8s.io/autoscaling/cluster-autoscaler:v1.26.1"
|
||||||
imagePullPolicy: "IfNotPresent"
|
imagePullPolicy: "IfNotPresent"
|
||||||
command:
|
command:
|
||||||
- ./cluster-autoscaler
|
- ./cluster-autoscaler
|
||||||
|
|||||||
@@ -551,7 +551,7 @@ spec:
|
|||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- name: cilium-agent
|
- name: cilium-agent
|
||||||
image: "quay.io/cilium/cilium:v1.12.6@sha256:454134506b0448c756398d3e8df68d474acde2a622ab58d0c7e8b272b5867d0d"
|
image: "quay.io/cilium/cilium:v1.12.7@sha256:8cb6b4742cc27b39e4f789d282a1fc2041decb6f5698bfe09112085a07b1fd61"
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
command:
|
command:
|
||||||
- cilium-agent
|
- cilium-agent
|
||||||
@@ -686,7 +686,7 @@ spec:
|
|||||||
mountPath: /run/xtables.lock
|
mountPath: /run/xtables.lock
|
||||||
initContainers:
|
initContainers:
|
||||||
- name: clean-cilium-state
|
- name: clean-cilium-state
|
||||||
image: "quay.io/cilium/cilium:v1.12.6@sha256:454134506b0448c756398d3e8df68d474acde2a622ab58d0c7e8b272b5867d0d"
|
image: "quay.io/cilium/cilium:v1.12.7@sha256:8cb6b4742cc27b39e4f789d282a1fc2041decb6f5698bfe09112085a07b1fd61"
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
command:
|
command:
|
||||||
- /init-container.sh
|
- /init-container.sh
|
||||||
@@ -820,7 +820,7 @@ spec:
|
|||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- name: cilium-operator
|
- name: cilium-operator
|
||||||
image: "quay.io/cilium/operator-generic:v1.12.6@sha256:eec4430d222cb2967d42d3b404d2606e66468de47ae85e0a3ca3f58f00a5e017"
|
image: "quay.io/cilium/operator-generic:v1.12.7@sha256:80f24810bf8484974c757382eb2c7408c9c024e5cb0719f4a56fba3f47695c72"
|
||||||
imagePullPolicy: IfNotPresent
|
imagePullPolicy: IfNotPresent
|
||||||
command:
|
command:
|
||||||
- cilium-operator-generic
|
- cilium-operator-generic
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ output "controlplane_endpoint" {
|
|||||||
|
|
||||||
output "controlplane_endpoint_public" {
|
output "controlplane_endpoint_public" {
|
||||||
description = "Kubernetes controlplane endpoint public"
|
description = "Kubernetes controlplane endpoint public"
|
||||||
value = try(flatten([for c in module.controlplane : c.controlplane_endpoints])[0], "")
|
value = try(flatten([for c in module.controlplane : c.controlplane_endpoints])[0], "127.0.0.1")
|
||||||
}
|
}
|
||||||
|
|
||||||
output "web_endpoint" {
|
output "web_endpoint" {
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ terraform {
|
|||||||
required_providers {
|
required_providers {
|
||||||
exoscale = {
|
exoscale = {
|
||||||
source = "exoscale/exoscale"
|
source = "exoscale/exoscale"
|
||||||
version = ">= 0.41.0"
|
version = ">= 0.45.0"
|
||||||
}
|
}
|
||||||
talos = {
|
talos = {
|
||||||
source = "siderolabs/talos"
|
source = "siderolabs/talos"
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ regions = ["GRA7", "GRA9"]
|
|||||||
```
|
```
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
wget https://github.com/siderolabs/talos/releases/download/v1.3.0/openstack-amd64.tar.gz
|
wget https://github.com/siderolabs/talos/releases/download/v1.3.4/openstack-amd64.tar.gz
|
||||||
tar -xzf openstack-amd64.tar.gz
|
tar -xzf openstack-amd64.tar.gz
|
||||||
|
|
||||||
terraform init && terraform apply -auto-approve
|
terraform init && terraform apply -auto-approve
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ resource "openstack_images_image_v2" "talos" {
|
|||||||
disk_format = "raw"
|
disk_format = "raw"
|
||||||
min_disk_gb = 5
|
min_disk_gb = 5
|
||||||
min_ram_mb = 1
|
min_ram_mb = 1
|
||||||
tags = ["talos-1.3.0"]
|
tags = ["talos-1.3.4"]
|
||||||
|
|
||||||
properties = {
|
properties = {
|
||||||
hw_qemu_guest_agent = "no"
|
hw_qemu_guest_agent = "no"
|
||||||
|
|||||||
@@ -61,21 +61,24 @@ resource "openstack_networking_subnet_v2" "private_v6" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_subnet_route_v2" "public_v4" {
|
resource "openstack_networking_subnet_route_v2" "public_v4" {
|
||||||
for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].gateway, false) && data.openstack_networking_quota_v2.quota[name].router > 0 }
|
for_each = { for idx, name in var.regions : name => idx if data.openstack_networking_quota_v2.quota[name].router > 0 }
|
||||||
|
region = each.key
|
||||||
subnet_id = openstack_networking_subnet_v2.public[each.key].id
|
subnet_id = openstack_networking_subnet_v2.public[each.key].id
|
||||||
destination_cidr = var.network_cidr
|
destination_cidr = var.network_cidr
|
||||||
next_hop = try(var.capabilities[each.key].gateway, false) ? cidrhost(openstack_networking_subnet_v2.private[each.key].cidr, 2) : cidrhost(openstack_networking_subnet_v2.private[each.key].cidr, 1)
|
next_hop = try(var.capabilities[each.key].gateway, false) ? cidrhost(openstack_networking_subnet_v2.private[each.key].cidr, 2) : cidrhost(openstack_networking_subnet_v2.private[each.key].cidr, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_subnet_route_v2" "private_v4" {
|
resource "openstack_networking_subnet_route_v2" "private_v4" {
|
||||||
for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].gateway, false) && data.openstack_networking_quota_v2.quota[name].router > 0 }
|
for_each = { for idx, name in var.regions : name => idx if data.openstack_networking_quota_v2.quota[name].router > 0 }
|
||||||
|
region = each.key
|
||||||
subnet_id = openstack_networking_subnet_v2.private[each.key].id
|
subnet_id = openstack_networking_subnet_v2.private[each.key].id
|
||||||
destination_cidr = var.network_cidr
|
destination_cidr = var.network_cidr
|
||||||
next_hop = try(var.capabilities[each.key].gateway, false) ? cidrhost(openstack_networking_subnet_v2.private[each.key].cidr, 2) : cidrhost(openstack_networking_subnet_v2.private[each.key].cidr, 1)
|
next_hop = try(var.capabilities[each.key].gateway, false) ? cidrhost(openstack_networking_subnet_v2.private[each.key].cidr, 2) : cidrhost(openstack_networking_subnet_v2.private[each.key].cidr, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_subnet_route_v2" "private_v6" {
|
resource "openstack_networking_subnet_route_v2" "private_v6" {
|
||||||
for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].gateway, false) && data.openstack_networking_quota_v2.quota[name].router > 0 }
|
for_each = { for idx, name in var.regions : name => idx if data.openstack_networking_quota_v2.quota[name].router > 0 }
|
||||||
|
region = each.key
|
||||||
subnet_id = openstack_networking_subnet_v2.private_v6[each.key].id
|
subnet_id = openstack_networking_subnet_v2.private_v6[each.key].id
|
||||||
destination_cidr = local.network_cidr_v6
|
destination_cidr = local.network_cidr_v6
|
||||||
next_hop = cidrhost(openstack_networking_subnet_v2.private_v6[each.key].cidr, 1)
|
next_hop = cidrhost(openstack_networking_subnet_v2.private_v6[each.key].cidr, 1)
|
||||||
|
|||||||
Reference in New Issue
Block a user