This commit is contained in:
Serge Logvinov
2023-02-16 19:12:03 +02:00
parent 6df1a74d41
commit 782f6184fe
18 changed files with 155 additions and 69 deletions

View File

@@ -186,6 +186,8 @@ data:
tofqdns-max-deferred-connection-deletes: "10000"
tofqdns-min-ttl: "3600"
tofqdns-proxy-response-max-delay: "100ms"
mtu: "1420"
agent-not-ready-taint-key: "node.cilium.io/agent-not-ready"
---
# Source: cilium/templates/cilium-agent/clusterrole.yaml
@@ -813,7 +815,7 @@ spec:
metadata:
annotations:
# ensure pods roll when configmap updates
cilium.io/cilium-configmap-checksum: "5e23bd083a707099be04c9822a92e7ebf88fb85dff685037eca164fcf07a0662"
cilium.io/cilium-configmap-checksum: "91a303965c397a04cb454230bd07a7565b12e96248534e33f52c8c2be5f59781"
labels:
io.cilium/app: operator
name: cilium-operator

View File

@@ -23,6 +23,8 @@ localRedirectPolicy: true
tunnel: "vxlan"
autoDirectNodeRoutes: false
devices: [eth+,wg+]
extraConfig:
mtu: "1420"
healthChecking: true

View File

@@ -34,10 +34,14 @@ resource "azurerm_linux_virtual_machine_scale_set" "db" {
name = "db-${lower(each.key)}-v6"
version = "IPv6"
subnet_id = local.network_public[each.key].network_id
# public_ip_address {
# name = "db-${lower(each.key)}-v6"
# version = "IPv6"
# }
dynamic "public_ip_address" {
for_each = local.network_public[each.key].sku == "Standard" ? ["IPv6"] : []
content {
name = "worker-${lower(each.key)}-v6"
version = public_ip_address.value
}
}
}
}

View File

@@ -38,6 +38,14 @@ resource "azurerm_linux_virtual_machine_scale_set" "web" {
name = "web-${lower(each.key)}-v6"
version = "IPv6"
subnet_id = local.network_public[each.key].network_id
dynamic "public_ip_address" {
for_each = local.network_public[each.key].sku == "Standard" ? ["IPv6"] : []
content {
name = "worker-${lower(each.key)}-v6"
version = public_ip_address.value
}
}
}
}

View File

@@ -32,6 +32,14 @@ resource "azurerm_linux_virtual_machine_scale_set" "worker" {
name = "worker-${lower(each.key)}-v6"
version = "IPv6"
subnet_id = local.network_private[each.key].network_id
dynamic "public_ip_address" {
for_each = local.network_private[each.key].sku == "Standard" ? ["IPv6"] : []
content {
name = "worker-${lower(each.key)}-v6"
version = public_ip_address.value
}
}
}
}

View File

@@ -5,20 +5,20 @@ resource "azurerm_public_ip" "router_v4" {
name = "router-${lower(each.key)}-v4"
resource_group_name = var.resource_group
ip_version = "IPv4"
sku = azurerm_lb.controlplane[each.key].sku
allocation_method = azurerm_lb.controlplane[each.key].sku == "Standard" ? "Static" : "Dynamic"
sku = var.capabilities[each.key].network_gw_sku
allocation_method = var.capabilities[each.key].network_gw_sku == "Standard" ? "Static" : "Dynamic"
tags = merge(var.tags, { type = "infra" })
}
resource "azurerm_public_ip" "router_v6" {
for_each = { for idx, name in var.regions : name => idx if azurerm_lb.controlplane[name].sku == "Standard" && try(var.capabilities[name].network_gw_enable, false) }
for_each = { for idx, name in var.regions : name => idx if var.capabilities[name].network_gw_sku == "Standard" && try(var.capabilities[name].network_gw_enable, false) }
location = each.key
name = "router-${lower(each.key)}-v6"
resource_group_name = var.resource_group
ip_version = "IPv6"
sku = azurerm_lb.controlplane[each.key].sku
allocation_method = "Static"
sku = var.capabilities[each.key].network_gw_sku
allocation_method = var.capabilities[each.key].network_gw_sku == "Standard" ? "Static" : "Dynamic"
tags = merge(var.tags, { type = "infra" })
}
@@ -31,12 +31,12 @@ resource "azurerm_network_interface" "router" {
enable_ip_forwarding = true
dynamic "ip_configuration" {
for_each = azurerm_subnet.controlplane[each.key].address_prefixes
for_each = azurerm_subnet.shared[each.key].address_prefixes
content {
name = "router-${lower(each.key)}-v${length(split(".", ip_configuration.value)) > 1 ? "4" : "6"}"
primary = length(split(".", ip_configuration.value)) > 1
subnet_id = azurerm_subnet.controlplane[each.key].id
subnet_id = azurerm_subnet.shared[each.key].id
private_ip_address = cidrhost(ip_configuration.value, -2)
private_ip_address_version = length(split(".", ip_configuration.value)) > 1 ? "IPv4" : "IPv6"
private_ip_address_allocation = "Static"
@@ -68,7 +68,7 @@ resource "azurerm_linux_virtual_machine" "router" {
name = "router-${lower(each.key)}"
caching = "ReadOnly"
storage_account_type = "Standard_LRS"
disk_size_gb = 32
disk_size_gb = 30
}
admin_username = "debian"

View File

@@ -4,10 +4,10 @@ resource "azurerm_lb" "controlplane" {
location = each.key
name = "controlplane-${each.key}"
resource_group_name = var.resource_group
sku = try(var.capabilities[each.key].network_lb_type, "Basic")
sku = try(var.capabilities[each.key].network_lb_sku, "Basic")
dynamic "frontend_ip_configuration" {
for_each = [for ip in azurerm_subnet.controlplane[each.key].address_prefixes : ip if try(var.capabilities[each.key].network_lb_type, "Basic") != "Basic" || length(split(".", ip)) > 1]
for_each = [for ip in azurerm_subnet.controlplane[each.key].address_prefixes : ip if try(var.capabilities[each.key].network_lb_sku, "Basic") != "Basic" || length(split(".", ip)) > 1]
content {
name = "controlplane-lb-v${length(split(".", frontend_ip_configuration.value)) > 1 ? "4" : "6"}"
@@ -37,7 +37,7 @@ resource "azurerm_lb_backend_address_pool" "controlplane_v4" {
}
resource "azurerm_lb_backend_address_pool" "controlplane_v6" {
for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].network_lb_type, "Basic") != "Basic" }
for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].network_lb_sku, "Basic") != "Basic" }
loadbalancer_id = azurerm_lb.controlplane[each.key].id
name = "controlplane-pool-v6"
}
@@ -53,11 +53,11 @@ resource "azurerm_lb_rule" "kubernetes_v4" {
frontend_port = 6443
backend_port = 6443
idle_timeout_in_minutes = 30
enable_tcp_reset = try(var.capabilities[each.key].network_lb_type, "Basic") != "Basic"
enable_tcp_reset = try(var.capabilities[each.key].network_lb_sku, "Basic") != "Basic"
}
resource "azurerm_lb_rule" "kubernetes_v6" {
for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].network_lb_type, "Basic") != "Basic" }
for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].network_lb_sku, "Basic") != "Basic" }
name = "controlplane-v6"
loadbalancer_id = azurerm_lb.controlplane[each.key].id
frontend_ip_configuration_name = "controlplane-lb-v6"
@@ -67,33 +67,33 @@ resource "azurerm_lb_rule" "kubernetes_v6" {
frontend_port = 6443
backend_port = 6443
idle_timeout_in_minutes = 30
enable_tcp_reset = try(var.capabilities[each.key].network_lb_type, "Basic") != "Basic"
enable_tcp_reset = try(var.capabilities[each.key].network_lb_sku, "Basic") != "Basic"
}
# resource "azurerm_lb_rule" "talos" {
# for_each = { for idx, name in var.regions : name => idx }
# name = "controlplane-talos-v4"
# loadbalancer_id = azurerm_lb.controlplane[each.key].id
# frontend_ip_configuration_name = "controlplane-lb-v4"
# probe_id = azurerm_lb_probe.controlplane[each.key].id
# backend_address_pool_ids = [azurerm_lb_backend_address_pool.controlplane_v4[each.key].id]
# protocol = "Tcp"
# frontend_port = 50000
# backend_port = 50000
# idle_timeout_in_minutes = 30
# enable_tcp_reset = try(var.capabilities[each.key].network_lb_type, "Basic") != "Basic"
# }
resource "azurerm_lb_rule" "talos" {
for_each = { for idx, name in var.regions : name => idx }
name = "controlplane-talos-v4"
loadbalancer_id = azurerm_lb.controlplane[each.key].id
frontend_ip_configuration_name = "controlplane-lb-v4"
probe_id = azurerm_lb_probe.controlplane[each.key].id
backend_address_pool_ids = [azurerm_lb_backend_address_pool.controlplane_v4[each.key].id]
protocol = "Tcp"
frontend_port = 50001
backend_port = 50001
idle_timeout_in_minutes = 30
enable_tcp_reset = try(var.capabilities[each.key].network_lb_sku, "Basic") != "Basic"
}
# resource "azurerm_lb_rule" "talos_v6" {
# for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].network_lb_type, "Basic") != "Basic" }
# name = "controlplane-talos-v6"
# loadbalancer_id = azurerm_lb.controlplane[each.key].id
# frontend_ip_configuration_name = "controlplane-lb-v6"
# probe_id = azurerm_lb_probe.controlplane[each.key].id
# backend_address_pool_ids = [azurerm_lb_backend_address_pool.controlplane_v6[each.key].id]
# protocol = "Tcp"
# frontend_port = 50000
# backend_port = 50000
# idle_timeout_in_minutes = 30
# enable_tcp_reset = try(var.capabilities[each.key].network_lb_type, "Basic") != "Basic"
# }
resource "azurerm_lb_rule" "talos_v6" {
for_each = { for idx, name in var.regions : name => idx if try(var.capabilities[name].network_lb_sku, "Basic") != "Basic" }
name = "controlplane-talos-v6"
loadbalancer_id = azurerm_lb.controlplane[each.key].id
frontend_ip_configuration_name = "controlplane-lb-v6"
probe_id = azurerm_lb_probe.controlplane[each.key].id
backend_address_pool_ids = [azurerm_lb_backend_address_pool.controlplane_v6[each.key].id]
protocol = "Tcp"
frontend_port = 50001
backend_port = 50001
idle_timeout_in_minutes = 30
enable_tcp_reset = try(var.capabilities[each.key].network_lb_sku, "Basic") != "Basic"
}

View File

@@ -15,11 +15,21 @@ resource "azurerm_subnet" "controlplane" {
resource_group_name = var.resource_group
virtual_network_name = azurerm_virtual_network.main[each.key].name
address_prefixes = [
for cidr in azurerm_virtual_network.main[each.key].address_space : cidrsubnet(cidr, length(split(".", cidr)) > 1 ? 3 : 2, 0)
for cidr in azurerm_virtual_network.main[each.key].address_space : cidrsubnet(cidr, length(split(".", cidr)) > 1 ? 4 : 2, 0)
]
service_endpoints = ["Microsoft.ContainerRegistry", "Microsoft.Storage"]
}
resource "azurerm_subnet" "shared" {
for_each = { for idx, name in var.regions : name => idx }
name = "shared"
resource_group_name = var.resource_group
virtual_network_name = azurerm_virtual_network.main[each.key].name
address_prefixes = [
for cidr in azurerm_virtual_network.main[each.key].address_space : cidrsubnet(cidr, length(split(".", cidr)) > 1 ? 4 : 2, 1)
]
}
resource "azurerm_subnet" "services" {
for_each = { for idx, name in var.regions : name => idx }
name = "services"
@@ -36,7 +46,7 @@ resource "azurerm_subnet" "public" {
resource_group_name = var.resource_group
virtual_network_name = azurerm_virtual_network.main[each.key].name
address_prefixes = [
for cidr in azurerm_virtual_network.main[each.key].address_space : cidrsubnet(cidr, 2, 1)
for cidr in azurerm_virtual_network.main[each.key].address_space : cidrsubnet(cidr, 2, 2)
]
service_endpoints = ["Microsoft.ContainerRegistry", "Microsoft.Storage"]
}
@@ -47,7 +57,7 @@ resource "azurerm_subnet" "private" {
resource_group_name = var.resource_group
virtual_network_name = azurerm_virtual_network.main[each.key].name
address_prefixes = [
for cidr in azurerm_virtual_network.main[each.key].address_space : cidrsubnet(cidr, 2, 2)
for cidr in azurerm_virtual_network.main[each.key].address_space : cidrsubnet(cidr, 2, 3)
]
service_endpoints = ["Microsoft.ContainerRegistry", "Microsoft.Storage"]
}
@@ -88,11 +98,40 @@ resource "azurerm_route_table" "main" {
next_hop_in_ip_address = azurerm_network_interface.router[each.key].private_ip_addresses[route.value]
}
}
tags = merge(var.tags, { type = "infra" })
}
resource "azurerm_route_table" "controlplane" {
for_each = { for idx, name in var.regions : name => idx }
location = each.key
name = "controlplane-${each.key}"
resource_group_name = var.resource_group
dynamic "route" {
for_each = try(var.capabilities[each.key].network_gw_enable, false) ? [for ip in azurerm_network_interface.router[each.key].private_ip_addresses : ip if length(split(".", ip)) == 1] : []
for_each = [for cidr in azurerm_virtual_network.main[each.key].address_space : cidr if length(split(".", cidr)) == 1]
content {
name = "main-${each.key}-default-v6"
name = "controlplane-${each.key}-local-v6"
address_prefix = route.value
next_hop_type = "VnetLocal"
}
}
dynamic "route" {
for_each = try(var.capabilities[each.key].network_gw_enable, false) ? range(0, length(var.network_cidr)) : []
content {
name = "controlplane-${each.key}-route-v${length(split(".", var.network_cidr[route.value])) > 1 ? "4" : "6"}"
address_prefix = var.network_cidr[route.value]
next_hop_type = "VirtualAppliance"
next_hop_in_ip_address = azurerm_network_interface.router[each.key].private_ip_addresses[route.value]
}
}
dynamic "route" {
for_each = try(var.capabilities[each.key].network_gw_enable, false) && try(var.capabilities[each.key].network_lb_sku, "Basic") == "Basic" ? [for ip in azurerm_network_interface.router[each.key].private_ip_addresses : ip if length(split(".", ip)) == 1] : []
content {
name = "controlplane-${each.key}-default-v6"
address_prefix = "::/0"
next_hop_type = "VirtualAppliance"
next_hop_in_ip_address = route.value
@@ -102,10 +141,11 @@ resource "azurerm_route_table" "main" {
tags = merge(var.tags, { type = "infra" })
}
resource "azurerm_subnet_route_table_association" "controlplane" {
for_each = { for idx, name in var.regions : name => idx }
subnet_id = azurerm_subnet.controlplane[each.key].id
route_table_id = azurerm_route_table.main[each.key].id
route_table_id = azurerm_route_table.controlplane[each.key].id
}
resource "azurerm_subnet_route_table_association" "public" {

View File

@@ -41,7 +41,7 @@ output "network_public" {
value = { for zone, subnet in azurerm_subnet.public : zone => {
network_id = subnet.id
cidr = subnet.address_prefixes
sku = azurerm_lb.controlplane[zone].sku
sku = var.capabilities[zone].network_gw_sku
} }
}
@@ -51,6 +51,7 @@ output "network_private" {
network_id = subnet.id
cidr = subnet.address_prefixes
nat = try(azurerm_public_ip.nat[zone].ip_address, "")
sku = try(azurerm_public_ip.nat[zone].ip_address, "") == "" ? "Standard" : var.capabilities[zone].network_gw_sku
} }
}

View File

@@ -35,7 +35,7 @@ resource "azurerm_network_security_group" "common" {
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
source_address_prefix = security_rule.value
source_address_prefix = length(split(".", security_rule.value)) > 1 ? security_rule.value : "::/0"
destination_port_ranges = ["4240"]
destination_address_prefix = security_rule.value
}
@@ -63,7 +63,7 @@ resource "azurerm_network_security_group" "common" {
access = "Allow"
protocol = "Icmp"
source_port_range = "*"
source_address_prefix = security_rule.value
source_address_prefix = length(split(".", security_rule.value)) > 1 ? security_rule.value : "::/0"
destination_port_range = "*"
destination_address_prefix = security_rule.value
}

View File

@@ -44,7 +44,7 @@ resource "azurerm_network_security_group" "controlplane" {
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
source_address_prefix = security_rule.value
source_address_prefix = length(split(".", security_rule.value)) > 1 ? security_rule.value : "::/0"
destination_port_ranges = ["4240"]
destination_address_prefix = security_rule.value
}
@@ -72,7 +72,7 @@ resource "azurerm_network_security_group" "controlplane" {
access = "Allow"
protocol = "Icmp"
source_port_range = "*"
source_address_prefix = security_rule.value
source_address_prefix = length(split(".", security_rule.value)) > 1 ? security_rule.value : "::/0"
destination_port_range = "*"
destination_address_prefix = security_rule.value
}

View File

@@ -62,5 +62,20 @@ resource "azurerm_network_security_group" "router" {
}
}
dynamic "security_rule" {
for_each = var.network_cidr
content {
name = "Nat-${security_rule.key}"
priority = 1800 + security_rule.key
direction = "Inbound"
access = "Allow"
protocol = "*"
source_port_range = "*"
source_address_prefix = security_rule.value
destination_port_range = "*"
destination_address_prefix = "*"
}
}
tags = merge(var.tags, { type = "infra" })
}

View File

@@ -44,7 +44,7 @@ resource "azurerm_network_security_group" "web" {
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
source_address_prefix = security_rule.value
source_address_prefix = length(split(".", security_rule.value)) > 1 ? security_rule.value : "::/0"
destination_port_ranges = ["4240"]
destination_address_prefix = security_rule.value
}
@@ -72,7 +72,7 @@ resource "azurerm_network_security_group" "web" {
access = "Allow"
protocol = "Icmp"
source_port_range = "*"
source_address_prefix = security_rule.value
source_address_prefix = length(split(".", security_rule.value)) > 1 ? security_rule.value : "::/0"
destination_port_range = "*"
destination_address_prefix = security_rule.value
}

View File

@@ -72,15 +72,17 @@ variable "capabilities" {
},
"uksouth" = {
network_nat_enable = false,
network_lb_type = "Basic", # Standard
network_lb_sku = "Basic", # Standard
network_gw_enable = false,
network_gw_type = "Standard_B1s",
network_gw_sku = "Basic", # Standard
},
"ukwest" = {
network_nat_enable = false,
network_lb_type = "Basic",
network_lb_sku = "Basic",
network_gw_enable = false,
network_gw_type = "Standard_B1s",
network_gw_sku = "Basic", # Standard
},
}
}

View File

@@ -55,7 +55,7 @@ create-kubeconfig: ## Prepare kubeconfig
kubectl --kubeconfig=kubeconfig config set-context --current --namespace=kube-system
create-deployments:
helm template --namespace=kube-system --version=1.12.4 -f deployments/cilium.yaml cilium \
helm template --namespace=kube-system --version=1.12.7 -f deployments/cilium.yaml cilium \
cilium/cilium > deployments/cilium-result.yaml
helm template --namespace=ingress-nginx --version=4.4.0 -f deployments/ingress.yaml ingress-nginx \
ingress-nginx/ingress-nginx > deployments/ingress-result.yaml

View File

@@ -34,6 +34,7 @@ data:
identity-allocation-mode: crd
cilium-endpoint-gc-interval: "5m0s"
nodes-gc-interval: "5m0s"
skip-cnp-status-startup-clean: "false"
# Disable the usage of CiliumEndpoint CRD
disable-endpoint-crd: "false"
@@ -550,7 +551,7 @@ spec:
spec:
containers:
- name: cilium-agent
image: "quay.io/cilium/cilium:v1.12.4@sha256:4b074fcfba9325c18e97569ed1988464309a5ebf64bbc79bec6f3d58cafcb8cf"
image: "quay.io/cilium/cilium:v1.12.7@sha256:8cb6b4742cc27b39e4f789d282a1fc2041decb6f5698bfe09112085a07b1fd61"
imagePullPolicy: IfNotPresent
command:
- cilium-agent
@@ -685,7 +686,7 @@ spec:
mountPath: /run/xtables.lock
initContainers:
- name: clean-cilium-state
image: "quay.io/cilium/cilium:v1.12.4@sha256:4b074fcfba9325c18e97569ed1988464309a5ebf64bbc79bec6f3d58cafcb8cf"
image: "quay.io/cilium/cilium:v1.12.7@sha256:8cb6b4742cc27b39e4f789d282a1fc2041decb6f5698bfe09112085a07b1fd61"
imagePullPolicy: IfNotPresent
command:
- /init-container.sh
@@ -811,13 +812,15 @@ spec:
template:
metadata:
annotations:
# ensure pods roll when configmap updates
cilium.io/cilium-configmap-checksum: "93ed3047796c548140dd014145d2cb313155de38c36595eb2f05f60856400ae5"
labels:
io.cilium/app: operator
name: cilium-operator
spec:
containers:
- name: cilium-operator
image: "quay.io/cilium/operator-generic:v1.12.4@sha256:071089ec5bca1f556afb8e541d9972a0dfb09d1e25504ae642ced021ecbedbd1"
image: "quay.io/cilium/operator-generic:v1.12.7@sha256:80f24810bf8484974c757382eb2c7408c9c024e5cb0719f4a56fba3f47695c72"
imagePullPolicy: IfNotPresent
command:
- cilium-operator-generic

View File

@@ -38,7 +38,7 @@ create-controlplane-bootstrap:
talosctl --talosconfig _cfgs/talosconfig --nodes ${ENDPOINT} bootstrap
create-deployments:
helm template --namespace=kube-system --version=1.12.4 -f deployments/cilium.yaml cilium \
helm template --namespace=kube-system --version=1.12.7 -f deployments/cilium.yaml cilium \
cilium/cilium > deployments/cilium-result.yaml
helm template --namespace=ingress-nginx --version=4.4.0 -f deployments/ingress.yaml ingress-nginx \
ingress-nginx/ingress-nginx > deployments/ingress-result.yaml

View File

@@ -34,6 +34,7 @@ data:
identity-allocation-mode: crd
cilium-endpoint-gc-interval: "5m0s"
nodes-gc-interval: "5m0s"
skip-cnp-status-startup-clean: "false"
# Disable the usage of CiliumEndpoint CRD
disable-endpoint-crd: "false"
@@ -550,7 +551,7 @@ spec:
spec:
containers:
- name: cilium-agent
image: "quay.io/cilium/cilium:v1.12.4@sha256:4b074fcfba9325c18e97569ed1988464309a5ebf64bbc79bec6f3d58cafcb8cf"
image: "quay.io/cilium/cilium:v1.12.7@sha256:8cb6b4742cc27b39e4f789d282a1fc2041decb6f5698bfe09112085a07b1fd61"
imagePullPolicy: IfNotPresent
command:
- cilium-agent
@@ -685,7 +686,7 @@ spec:
mountPath: /run/xtables.lock
initContainers:
- name: clean-cilium-state
image: "quay.io/cilium/cilium:v1.12.4@sha256:4b074fcfba9325c18e97569ed1988464309a5ebf64bbc79bec6f3d58cafcb8cf"
image: "quay.io/cilium/cilium:v1.12.7@sha256:8cb6b4742cc27b39e4f789d282a1fc2041decb6f5698bfe09112085a07b1fd61"
imagePullPolicy: IfNotPresent
command:
- /init-container.sh
@@ -812,14 +813,14 @@ spec:
metadata:
annotations:
# ensure pods roll when configmap updates
cilium.io/cilium-configmap-checksum: "c3ffdb3de5df1007b50c84e0af5ba77bc44d069f56d62d3232573a21084f2f80"
cilium.io/cilium-configmap-checksum: "93ed3047796c548140dd014145d2cb313155de38c36595eb2f05f60856400ae5"
labels:
io.cilium/app: operator
name: cilium-operator
spec:
containers:
- name: cilium-operator
image: "quay.io/cilium/operator-generic:v1.12.4@sha256:071089ec5bca1f556afb8e541d9972a0dfb09d1e25504ae642ced021ecbedbd1"
image: "quay.io/cilium/operator-generic:v1.12.7@sha256:80f24810bf8484974c757382eb2c7408c9c024e5cb0719f4a56fba3f47695c72"
imagePullPolicy: IfNotPresent
command:
- cilium-operator-generic