From b335bf7b1d27ce3b5b3191fcd80c3eb6feff7967 Mon Sep 17 00:00:00 2001 From: Serge Logvinov Date: Wed, 15 Feb 2023 19:44:16 +0200 Subject: [PATCH] routing fixes --- azure/Makefile | 6 +++--- azure/deployments/azure-autoscaler-result.yaml | 18 +++++++++--------- azure/deployments/cilium-result.yaml | 6 +++--- azure/outputs.tf | 2 +- exoscale/versions.tf | 2 +- openstack/images/README.md | 2 +- openstack/images/images.tf | 2 +- openstack/prepare/network.tf | 9 ++++++--- 8 files changed, 25 insertions(+), 22 deletions(-) diff --git a/azure/Makefile b/azure/Makefile index 7da145a..50f9ebf 100644 --- a/azure/Makefile +++ b/azure/Makefile @@ -1,6 +1,6 @@ -ENDPOINT := ${shell terraform output -raw controlplane_endpoint_public 2>/dev/null} -ifeq ($(ENDPOINT),) +ENDPOINT ?= $(shell terraform output -no-color -raw controlplane_endpoint_public 2>/dev/null) +ifneq (,$(findstring Warning,${ENDPOINT})) ENDPOINT := 127.0.0.1 endif @@ -33,7 +33,7 @@ create-templates: @yq eval -o=json '{"kubernetes": .}' _cfgs/tfstate.vars > terraform.tfvars.json create-deployments: - helm template --namespace=kube-system --version=1.12.6 -f deployments/cilium.yaml cilium \ + helm template --namespace=kube-system --version=1.12.7 -f deployments/cilium.yaml cilium \ cilium/cilium > deployments/cilium-result.yaml helm template --namespace=kube-system -f deployments/azure-autoscaler.yaml cluster-autoscaler-azure \ autoscaler/cluster-autoscaler > deployments/azure-autoscaler-result.yaml diff --git a/azure/deployments/azure-autoscaler-result.yaml b/azure/deployments/azure-autoscaler-result.yaml index 7c13436..a2d392e 100644 --- a/azure/deployments/azure-autoscaler-result.yaml +++ b/azure/deployments/azure-autoscaler-result.yaml @@ -7,7 +7,7 @@ metadata: app.kubernetes.io/instance: "cluster-autoscaler-azure" app.kubernetes.io/name: "azure-cluster-autoscaler" app.kubernetes.io/managed-by: "Helm" - helm.sh/chart: "cluster-autoscaler-9.23.0" + helm.sh/chart: "cluster-autoscaler-9.24.0" name: cluster-autoscaler-azure namespace: kube-system spec: @@ -26,7 +26,7 @@ metadata: app.kubernetes.io/instance: "cluster-autoscaler-azure" app.kubernetes.io/name: "azure-cluster-autoscaler" app.kubernetes.io/managed-by: "Helm" - helm.sh/chart: "cluster-autoscaler-9.23.0" + helm.sh/chart: "cluster-autoscaler-9.24.0" name: cluster-autoscaler-azure namespace: kube-system automountServiceAccountToken: true @@ -39,7 +39,7 @@ metadata: app.kubernetes.io/instance: "cluster-autoscaler-azure" app.kubernetes.io/name: "azure-cluster-autoscaler" app.kubernetes.io/managed-by: "Helm" - helm.sh/chart: "cluster-autoscaler-9.23.0" + helm.sh/chart: "cluster-autoscaler-9.24.0" name: cluster-autoscaler-azure rules: - apiGroups: @@ -180,7 +180,7 @@ metadata: app.kubernetes.io/instance: "cluster-autoscaler-azure" app.kubernetes.io/name: "azure-cluster-autoscaler" app.kubernetes.io/managed-by: "Helm" - helm.sh/chart: "cluster-autoscaler-9.23.0" + helm.sh/chart: "cluster-autoscaler-9.24.0" name: cluster-autoscaler-azure roleRef: apiGroup: rbac.authorization.k8s.io @@ -199,7 +199,7 @@ metadata: app.kubernetes.io/instance: "cluster-autoscaler-azure" app.kubernetes.io/name: "azure-cluster-autoscaler" app.kubernetes.io/managed-by: "Helm" - helm.sh/chart: "cluster-autoscaler-9.23.0" + helm.sh/chart: "cluster-autoscaler-9.24.0" name: cluster-autoscaler-azure namespace: kube-system rules: @@ -228,7 +228,7 @@ metadata: app.kubernetes.io/instance: "cluster-autoscaler-azure" app.kubernetes.io/name: "azure-cluster-autoscaler" app.kubernetes.io/managed-by: "Helm" - helm.sh/chart: "cluster-autoscaler-9.23.0" + helm.sh/chart: "cluster-autoscaler-9.24.0" name: cluster-autoscaler-azure namespace: kube-system roleRef: @@ -248,7 +248,7 @@ metadata: app.kubernetes.io/instance: "cluster-autoscaler-azure" app.kubernetes.io/name: "azure-cluster-autoscaler" app.kubernetes.io/managed-by: "Helm" - helm.sh/chart: "cluster-autoscaler-9.23.0" + helm.sh/chart: "cluster-autoscaler-9.24.0" name: cluster-autoscaler-azure namespace: kube-system spec: @@ -272,7 +272,7 @@ metadata: app.kubernetes.io/instance: "cluster-autoscaler-azure" app.kubernetes.io/name: "azure-cluster-autoscaler" app.kubernetes.io/managed-by: "Helm" - helm.sh/chart: "cluster-autoscaler-9.23.0" + helm.sh/chart: "cluster-autoscaler-9.24.0" name: cluster-autoscaler-azure namespace: kube-system spec: @@ -291,7 +291,7 @@ spec: dnsPolicy: "ClusterFirst" containers: - name: azure-cluster-autoscaler - image: "k8s.gcr.io/autoscaling/cluster-autoscaler:v1.26.1" + image: "registry.k8s.io/autoscaling/cluster-autoscaler:v1.26.1" imagePullPolicy: "IfNotPresent" command: - ./cluster-autoscaler diff --git a/azure/deployments/cilium-result.yaml b/azure/deployments/cilium-result.yaml index 81e1714..0440fab 100644 --- a/azure/deployments/cilium-result.yaml +++ b/azure/deployments/cilium-result.yaml @@ -551,7 +551,7 @@ spec: spec: containers: - name: cilium-agent - image: "quay.io/cilium/cilium:v1.12.6@sha256:454134506b0448c756398d3e8df68d474acde2a622ab58d0c7e8b272b5867d0d" + image: "quay.io/cilium/cilium:v1.12.7@sha256:8cb6b4742cc27b39e4f789d282a1fc2041decb6f5698bfe09112085a07b1fd61" imagePullPolicy: IfNotPresent command: - cilium-agent @@ -686,7 +686,7 @@ spec: mountPath: /run/xtables.lock initContainers: - name: clean-cilium-state - image: "quay.io/cilium/cilium:v1.12.6@sha256:454134506b0448c756398d3e8df68d474acde2a622ab58d0c7e8b272b5867d0d" + image: "quay.io/cilium/cilium:v1.12.7@sha256:8cb6b4742cc27b39e4f789d282a1fc2041decb6f5698bfe09112085a07b1fd61" imagePullPolicy: IfNotPresent command: - /init-container.sh @@ -820,7 +820,7 @@ spec: spec: containers: - name: cilium-operator - image: "quay.io/cilium/operator-generic:v1.12.6@sha256:eec4430d222cb2967d42d3b404d2606e66468de47ae85e0a3ca3f58f00a5e017" + image: "quay.io/cilium/operator-generic:v1.12.7@sha256:80f24810bf8484974c757382eb2c7408c9c024e5cb0719f4a56fba3f47695c72" imagePullPolicy: IfNotPresent command: - cilium-operator-generic diff --git a/azure/outputs.tf b/azure/outputs.tf index bf2f591..25ee03a 100644 --- a/azure/outputs.tf +++ b/azure/outputs.tf @@ -6,7 +6,7 @@ output "controlplane_endpoint" { output "controlplane_endpoint_public" { description = "Kubernetes controlplane endpoint public" - value = try(flatten([for c in module.controlplane : c.controlplane_endpoints])[0], "") + value = try(flatten([for c in module.controlplane : c.controlplane_endpoints])[0], "127.0.0.1") } output "web_endpoint" { diff --git a/exoscale/versions.tf b/exoscale/versions.tf index 4b2fa49..dda713d 100644 --- a/exoscale/versions.tf +++ b/exoscale/versions.tf @@ -3,7 +3,7 @@ terraform { required_providers { exoscale = { source = "exoscale/exoscale" - version = ">= 0.41.0" + version = ">= 0.45.0" } talos = { source = "siderolabs/talos" diff --git a/openstack/images/README.md b/openstack/images/README.md index 3d543e0..da19c91 100644 --- a/openstack/images/README.md +++ b/openstack/images/README.md @@ -10,7 +10,7 @@ regions = ["GRA7", "GRA9"] ``` ```shell -wget https://github.com/siderolabs/talos/releases/download/v1.3.0/openstack-amd64.tar.gz +wget https://github.com/siderolabs/talos/releases/download/v1.3.4/openstack-amd64.tar.gz tar -xzf openstack-amd64.tar.gz terraform init && terraform apply -auto-approve diff --git a/openstack/images/images.tf b/openstack/images/images.tf index 16e1f3d..3ca0601 100644 --- a/openstack/images/images.tf +++ b/openstack/images/images.tf @@ -7,7 +7,7 @@ resource "openstack_images_image_v2" "talos" { disk_format = "raw" min_disk_gb = 5 min_ram_mb = 1 - tags = ["talos-1.3.0"] + tags = ["talos-1.3.4"] properties = { hw_qemu_guest_agent = "no" diff --git a/openstack/prepare/network.tf b/openstack/prepare/network.tf index 957e9db..c762298 100644 --- a/openstack/prepare/network.tf +++ b/openstack/prepare/network.tf @@ -61,21 +61,24 @@ resource "openstack_networking_subnet_v2" "private_v6" { } resource "openstack_networking_subnet_route_v2" "public_v4" { - for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].gateway, false) && data.openstack_networking_quota_v2.quota[name].router > 0 } + for_each = { for idx, name in var.regions : name => idx if data.openstack_networking_quota_v2.quota[name].router > 0 } + region = each.key subnet_id = openstack_networking_subnet_v2.public[each.key].id destination_cidr = var.network_cidr next_hop = try(var.capabilities[each.key].gateway, false) ? cidrhost(openstack_networking_subnet_v2.private[each.key].cidr, 2) : cidrhost(openstack_networking_subnet_v2.private[each.key].cidr, 1) } resource "openstack_networking_subnet_route_v2" "private_v4" { - for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].gateway, false) && data.openstack_networking_quota_v2.quota[name].router > 0 } + for_each = { for idx, name in var.regions : name => idx if data.openstack_networking_quota_v2.quota[name].router > 0 } + region = each.key subnet_id = openstack_networking_subnet_v2.private[each.key].id destination_cidr = var.network_cidr next_hop = try(var.capabilities[each.key].gateway, false) ? cidrhost(openstack_networking_subnet_v2.private[each.key].cidr, 2) : cidrhost(openstack_networking_subnet_v2.private[each.key].cidr, 1) } resource "openstack_networking_subnet_route_v2" "private_v6" { - for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].gateway, false) && data.openstack_networking_quota_v2.quota[name].router > 0 } + for_each = { for idx, name in var.regions : name => idx if data.openstack_networking_quota_v2.quota[name].router > 0 } + region = each.key subnet_id = openstack_networking_subnet_v2.private_v6[each.key].id destination_cidr = local.network_cidr_v6 next_hop = cidrhost(openstack_networking_subnet_v2.private_v6[each.key].cidr, 1)