mirror of
https://github.com/optim-enterprises-bv/terraform-talos.git
synced 2025-10-28 17:12:24 +00:00
routing fixes
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
|
||||
ENDPOINT := ${shell terraform output -raw controlplane_endpoint_public 2>/dev/null}
|
||||
ifeq ($(ENDPOINT),)
|
||||
ENDPOINT ?= $(shell terraform output -no-color -raw controlplane_endpoint_public 2>/dev/null)
|
||||
ifneq (,$(findstring Warning,${ENDPOINT}))
|
||||
ENDPOINT := 127.0.0.1
|
||||
endif
|
||||
|
||||
@@ -33,7 +33,7 @@ create-templates:
|
||||
@yq eval -o=json '{"kubernetes": .}' _cfgs/tfstate.vars > terraform.tfvars.json
|
||||
|
||||
create-deployments:
|
||||
helm template --namespace=kube-system --version=1.12.6 -f deployments/cilium.yaml cilium \
|
||||
helm template --namespace=kube-system --version=1.12.7 -f deployments/cilium.yaml cilium \
|
||||
cilium/cilium > deployments/cilium-result.yaml
|
||||
helm template --namespace=kube-system -f deployments/azure-autoscaler.yaml cluster-autoscaler-azure \
|
||||
autoscaler/cluster-autoscaler > deployments/azure-autoscaler-result.yaml
|
||||
|
||||
@@ -7,7 +7,7 @@ metadata:
|
||||
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
||||
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
helm.sh/chart: "cluster-autoscaler-9.23.0"
|
||||
helm.sh/chart: "cluster-autoscaler-9.24.0"
|
||||
name: cluster-autoscaler-azure
|
||||
namespace: kube-system
|
||||
spec:
|
||||
@@ -26,7 +26,7 @@ metadata:
|
||||
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
||||
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
helm.sh/chart: "cluster-autoscaler-9.23.0"
|
||||
helm.sh/chart: "cluster-autoscaler-9.24.0"
|
||||
name: cluster-autoscaler-azure
|
||||
namespace: kube-system
|
||||
automountServiceAccountToken: true
|
||||
@@ -39,7 +39,7 @@ metadata:
|
||||
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
||||
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
helm.sh/chart: "cluster-autoscaler-9.23.0"
|
||||
helm.sh/chart: "cluster-autoscaler-9.24.0"
|
||||
name: cluster-autoscaler-azure
|
||||
rules:
|
||||
- apiGroups:
|
||||
@@ -180,7 +180,7 @@ metadata:
|
||||
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
||||
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
helm.sh/chart: "cluster-autoscaler-9.23.0"
|
||||
helm.sh/chart: "cluster-autoscaler-9.24.0"
|
||||
name: cluster-autoscaler-azure
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
@@ -199,7 +199,7 @@ metadata:
|
||||
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
||||
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
helm.sh/chart: "cluster-autoscaler-9.23.0"
|
||||
helm.sh/chart: "cluster-autoscaler-9.24.0"
|
||||
name: cluster-autoscaler-azure
|
||||
namespace: kube-system
|
||||
rules:
|
||||
@@ -228,7 +228,7 @@ metadata:
|
||||
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
||||
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
helm.sh/chart: "cluster-autoscaler-9.23.0"
|
||||
helm.sh/chart: "cluster-autoscaler-9.24.0"
|
||||
name: cluster-autoscaler-azure
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
@@ -248,7 +248,7 @@ metadata:
|
||||
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
||||
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
helm.sh/chart: "cluster-autoscaler-9.23.0"
|
||||
helm.sh/chart: "cluster-autoscaler-9.24.0"
|
||||
name: cluster-autoscaler-azure
|
||||
namespace: kube-system
|
||||
spec:
|
||||
@@ -272,7 +272,7 @@ metadata:
|
||||
app.kubernetes.io/instance: "cluster-autoscaler-azure"
|
||||
app.kubernetes.io/name: "azure-cluster-autoscaler"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
helm.sh/chart: "cluster-autoscaler-9.23.0"
|
||||
helm.sh/chart: "cluster-autoscaler-9.24.0"
|
||||
name: cluster-autoscaler-azure
|
||||
namespace: kube-system
|
||||
spec:
|
||||
@@ -291,7 +291,7 @@ spec:
|
||||
dnsPolicy: "ClusterFirst"
|
||||
containers:
|
||||
- name: azure-cluster-autoscaler
|
||||
image: "k8s.gcr.io/autoscaling/cluster-autoscaler:v1.26.1"
|
||||
image: "registry.k8s.io/autoscaling/cluster-autoscaler:v1.26.1"
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
command:
|
||||
- ./cluster-autoscaler
|
||||
|
||||
@@ -551,7 +551,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: cilium-agent
|
||||
image: "quay.io/cilium/cilium:v1.12.6@sha256:454134506b0448c756398d3e8df68d474acde2a622ab58d0c7e8b272b5867d0d"
|
||||
image: "quay.io/cilium/cilium:v1.12.7@sha256:8cb6b4742cc27b39e4f789d282a1fc2041decb6f5698bfe09112085a07b1fd61"
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- cilium-agent
|
||||
@@ -686,7 +686,7 @@ spec:
|
||||
mountPath: /run/xtables.lock
|
||||
initContainers:
|
||||
- name: clean-cilium-state
|
||||
image: "quay.io/cilium/cilium:v1.12.6@sha256:454134506b0448c756398d3e8df68d474acde2a622ab58d0c7e8b272b5867d0d"
|
||||
image: "quay.io/cilium/cilium:v1.12.7@sha256:8cb6b4742cc27b39e4f789d282a1fc2041decb6f5698bfe09112085a07b1fd61"
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- /init-container.sh
|
||||
@@ -820,7 +820,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: cilium-operator
|
||||
image: "quay.io/cilium/operator-generic:v1.12.6@sha256:eec4430d222cb2967d42d3b404d2606e66468de47ae85e0a3ca3f58f00a5e017"
|
||||
image: "quay.io/cilium/operator-generic:v1.12.7@sha256:80f24810bf8484974c757382eb2c7408c9c024e5cb0719f4a56fba3f47695c72"
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- cilium-operator-generic
|
||||
|
||||
@@ -6,7 +6,7 @@ output "controlplane_endpoint" {
|
||||
|
||||
output "controlplane_endpoint_public" {
|
||||
description = "Kubernetes controlplane endpoint public"
|
||||
value = try(flatten([for c in module.controlplane : c.controlplane_endpoints])[0], "")
|
||||
value = try(flatten([for c in module.controlplane : c.controlplane_endpoints])[0], "127.0.0.1")
|
||||
}
|
||||
|
||||
output "web_endpoint" {
|
||||
|
||||
@@ -3,7 +3,7 @@ terraform {
|
||||
required_providers {
|
||||
exoscale = {
|
||||
source = "exoscale/exoscale"
|
||||
version = ">= 0.41.0"
|
||||
version = ">= 0.45.0"
|
||||
}
|
||||
talos = {
|
||||
source = "siderolabs/talos"
|
||||
|
||||
@@ -10,7 +10,7 @@ regions = ["GRA7", "GRA9"]
|
||||
```
|
||||
|
||||
```shell
|
||||
wget https://github.com/siderolabs/talos/releases/download/v1.3.0/openstack-amd64.tar.gz
|
||||
wget https://github.com/siderolabs/talos/releases/download/v1.3.4/openstack-amd64.tar.gz
|
||||
tar -xzf openstack-amd64.tar.gz
|
||||
|
||||
terraform init && terraform apply -auto-approve
|
||||
|
||||
@@ -7,7 +7,7 @@ resource "openstack_images_image_v2" "talos" {
|
||||
disk_format = "raw"
|
||||
min_disk_gb = 5
|
||||
min_ram_mb = 1
|
||||
tags = ["talos-1.3.0"]
|
||||
tags = ["talos-1.3.4"]
|
||||
|
||||
properties = {
|
||||
hw_qemu_guest_agent = "no"
|
||||
|
||||
@@ -61,21 +61,24 @@ resource "openstack_networking_subnet_v2" "private_v6" {
|
||||
}
|
||||
|
||||
resource "openstack_networking_subnet_route_v2" "public_v4" {
|
||||
for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].gateway, false) && data.openstack_networking_quota_v2.quota[name].router > 0 }
|
||||
for_each = { for idx, name in var.regions : name => idx if data.openstack_networking_quota_v2.quota[name].router > 0 }
|
||||
region = each.key
|
||||
subnet_id = openstack_networking_subnet_v2.public[each.key].id
|
||||
destination_cidr = var.network_cidr
|
||||
next_hop = try(var.capabilities[each.key].gateway, false) ? cidrhost(openstack_networking_subnet_v2.private[each.key].cidr, 2) : cidrhost(openstack_networking_subnet_v2.private[each.key].cidr, 1)
|
||||
}
|
||||
|
||||
resource "openstack_networking_subnet_route_v2" "private_v4" {
|
||||
for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].gateway, false) && data.openstack_networking_quota_v2.quota[name].router > 0 }
|
||||
for_each = { for idx, name in var.regions : name => idx if data.openstack_networking_quota_v2.quota[name].router > 0 }
|
||||
region = each.key
|
||||
subnet_id = openstack_networking_subnet_v2.private[each.key].id
|
||||
destination_cidr = var.network_cidr
|
||||
next_hop = try(var.capabilities[each.key].gateway, false) ? cidrhost(openstack_networking_subnet_v2.private[each.key].cidr, 2) : cidrhost(openstack_networking_subnet_v2.private[each.key].cidr, 1)
|
||||
}
|
||||
|
||||
resource "openstack_networking_subnet_route_v2" "private_v6" {
|
||||
for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].gateway, false) && data.openstack_networking_quota_v2.quota[name].router > 0 }
|
||||
for_each = { for idx, name in var.regions : name => idx if data.openstack_networking_quota_v2.quota[name].router > 0 }
|
||||
region = each.key
|
||||
subnet_id = openstack_networking_subnet_v2.private_v6[each.key].id
|
||||
destination_cidr = local.network_cidr_v6
|
||||
next_hop = cidrhost(openstack_networking_subnet_v2.private_v6[each.key].cidr, 1)
|
||||
|
||||
Reference in New Issue
Block a user