ipv6 on podes

This commit is contained in:
Serge Logvinov
2024-08-16 11:33:31 +03:00
parent a0fb5ac4fd
commit b037692727
8 changed files with 172 additions and 18 deletions

View File

@@ -20,20 +20,20 @@ Having a single Kubernetes control plane that spans multiple cloud providers can
## Clouds ## Clouds
| Platform | Checked Talos version | Addons | Setup type | Nat-IPv4 | IPv6 | | Platform | Checked Talos version | Addons | Setup type | Nat-IPv4 | IPv6 | Pod with IPv6 |
|---|---|---|---|---|---| |---|---|---|---|---|---|---|
| [Azure](azure) | 1.3.4 | CCM,CSI,Autoscaler | many regions, many zones | ✓ | ✓ | | [Azure](azure) | 1.3.4 | CCM,CSI,Autoscaler | many regions, many zones | ✓ | ✓ | |
| [Exoscale](exoscale) | 1.3.0 | CCM,Autoscaler | many regions | ✗ | | | [Exoscale](exoscale) | 1.3.0 | CCM,Autoscaler | many regions | ✗ | | |
| [GCP](gcp-zonal) | 1.3.4 | CCM,CSI,Autoscaler | one region, many zones | ✓ | ✓ | | [GCP](gcp-zonal) | 1.3.4 | CCM,CSI,Autoscaler | one region, many zones | ✓ | ✓ | ✓ |
| [Hetzner](hetzner) | 1.8.0 | CCM,CSI,Autoscaler | many regions, one network zone | ✗ | ✓ | | [Hetzner](hetzner) | 1.7.6 | CCM,CSI,Autoscaler | many regions, one network zone | ✗ | ✓ | ✓ |
| [Openstack](openstack) | 1.3.4 | CCM,CSI | many regions, many zones | ✓ | ✓ | | [Openstack](openstack) | 1.3.4 | CCM,CSI | many regions, many zones | ✓ | ✓ | ✓ |
| [Oracle](oracle) | 1.3.4 | CCM,CSI,Autoscaler | one region, many zones | ✓ | ✓ | | [Oracle](oracle) | 1.3.4 | CCM,CSI,Autoscaler | one region, many zones | ✓ | ✓ |
| [Proxmox](proxmox) | 1.8.0 | CCM,CSI | one region, mny zones | ✓ | ✓ | | [Proxmox](proxmox) | 1.7.6 | CCM,CSI | one region, mny zones | ✓ | ✓ | ✓ |
| [Scaleway](scaleway) | 1.8.0 | CCM,CSI | one region | ✓ | ✓ | | [Scaleway](scaleway) | 1.7.6 | CCM,CSI | one region | ✓ | ✓ | |
## Known issues ## Known issues
* Talos does not support Oracle CSI. * Talos does not support upstream Oracle CSI, use my [fork](https://github.com/sergelogvinov/oci-cloud-controller-manager)
## Multi cloud compatibility ## Multi cloud compatibility

View File

@@ -33,6 +33,16 @@ Where:
* master-X - talos control plane nodes * master-X - talos control plane nodes
* store-X - debian bare metal worker servers * store-X - debian bare metal worker servers
Pod with IPv6
```sh
# kubectl exec -ti cilium-dddgc -- bash
root@controlplane-fsn1-1:/home/cilium# cilium node list
Name IPv4 Address Endpoint CIDR IPv6 Address Endpoint CIDR Source
controlplane-fsn1-1 172.16.0.12 10.32.0.0/24 2a01:4f8:c17:9967::1 2a01:4f8:c17:9967::/80 local
web-fsn1-1 172.16.0.50 10.32.2.0/24 2a01:4f8:c012:5795::1 2a01:4f8:c012:5795::/80 custom-resource
```
## Prepare the base image ## Prepare the base image
Use packer (system_os/hetzner) to upload image. Use packer (system_os/hetzner) to upload image.

View File

@@ -0,0 +1,80 @@
---
k8sServiceHost: "api.cluster.local"
k8sServicePort: "6443"
operator:
enabled: true
rollOutPods: true
replicas: 1
prometheus:
enabled: false
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- operator: Exists
effect: NoSchedule
identityAllocationMode: crd
kubeProxyReplacement: strict
enableK8sEndpointSlice: true
localRedirectPolicy: true
tunnel: "vxlan"
autoDirectNodeRoutes: false
devices: [eth+]
healthChecking: true
cni:
install: true
ipam:
mode: "kubernetes"
k8s:
requireIPv4PodCIDR: true
requireIPv6PodCIDR: true
enableIPv6Masquerade: false
enableIPv4Masquerade: true
bpf:
masquerade: true
ipv4:
enabled: true
ipv6:
enabled: true
hostServices:
enabled: true
hostPort:
enabled: true
nodePort:
enabled: true
externalIPs:
enabled: true
hostFirewall:
enabled: true
ingressController:
enabled: false
securityContext:
privileged: true
hubble:
enabled: false
prometheus:
enabled: true
cgroup:
autoMount:
enabled: false
hostRoot: /sys/fs/cgroup
resources:
limits:
cpu: 2
memory: 2Gi
requests:
cpu: 100m
memory: 128Mi

View File

@@ -14,13 +14,13 @@ logVerbosityLevel: 4
enabledControllers: enabledControllers:
- cloud-node - cloud-node
# - node-ipam-controller - node-ipam-controller
# extraArgs: extraArgs:
# - --allocate-node-cidrs - --allocate-node-cidrs
# - --cidr-allocator-type=CloudAllocator - --cidr-allocator-type=CloudAllocator
# - --node-cidr-mask-size-ipv4=24 - --node-cidr-mask-size-ipv4=24
# - --node-cidr-mask-size-ipv6=80 - --node-cidr-mask-size-ipv6=80
tolerations: tolerations:
- effect: NoSchedule - effect: NoSchedule

View File

@@ -0,0 +1,61 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: test
namespace: kube-system
labels:
app: alpine
spec:
podManagementPolicy: Parallel # default is OrderedReady
serviceName: test
replicas: 1
template:
metadata:
labels:
app: alpine
spec:
terminationGracePeriodSeconds: 3
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
# nodeSelector:
# node-pool: web
# kubernetes.io/hostname: kube-21
# topology.kubernetes.io/zone: hvm-1
# affinity:
# nodeAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - preference:
# matchExpressions:
# - key: kubernetes.io/hostname
# operator: In
# values:
# - controlplane-41
# weight: 100
containers:
- name: alpine
image: alpine
command: ["sleep","1d"]
securityContext:
privileged: true
seccompProfile:
type: RuntimeDefault
# capabilities:
# drop: ["ALL"]
# volumeMounts:
# - name: storage
# mountPath: /mnt
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app: alpine
# volumeClaimTemplates:
# - metadata:
# name: storage
# spec:
# accessModes: ["ReadWriteOnce"]
# resources:
# requests:
# storage: 5Gi
# storageClassName: proxmox

View File

@@ -34,6 +34,7 @@ resource "hcloud_server" "controlplane" {
network { network {
network_id = hcloud_network.main.id network_id = hcloud_network.main.id
ip = each.value.ip ip = each.value.ip
alias_ips = each.key == keys(local.controlplanes)[0] ? [local.ipv4_vip] : []
} }
lifecycle { lifecycle {
@@ -48,7 +49,7 @@ resource "hcloud_server" "controlplane" {
} }
resource "hcloud_load_balancer_target" "api" { resource "hcloud_load_balancer_target" "api" {
count = local.lb_enable ? lookup(var.controlplane, "count", 0) : 0 count = local.lb_enable ? length(local.controlplanes) : 0
type = "server" type = "server"
load_balancer_id = hcloud_load_balancer.api[0].id load_balancer_id = hcloud_load_balancer.api[0].id
server_id = hcloud_server.controlplane[count.index].id server_id = hcloud_server.controlplane[count.index].id

View File

@@ -93,8 +93,9 @@ cluster:
controllerManager: controllerManager:
image: registry.k8s.io/kube-controller-manager:${version} image: registry.k8s.io/kube-controller-manager:${version}
extraArgs: extraArgs:
controllers: "*,tokencleaner,-node-ipam-controller"
node-cidr-mask-size-ipv4: "24" node-cidr-mask-size-ipv4: "24"
node-cidr-mask-size-ipv6: "112" node-cidr-mask-size-ipv6: "80"
scheduler: scheduler:
image: registry.k8s.io/kube-scheduler:${version} image: registry.k8s.io/kube-scheduler:${version}
etcd: etcd:

View File

@@ -40,6 +40,7 @@ create-templates:
@sops --encrypt -i terraform.tfvars.sops.json @sops --encrypt -i terraform.tfvars.sops.json
@yq eval .ca _cfgs/tfstate.vars | base64 --decode > _cfgs/ca.crt @yq eval .ca _cfgs/tfstate.vars | base64 --decode > _cfgs/ca.crt
@sops --encrypt --input-type=yaml --output-type=yaml _cfgs/talosconfig > _cfgs/talosconfig.sops.yaml @sops --encrypt --input-type=yaml --output-type=yaml _cfgs/talosconfig > _cfgs/talosconfig.sops.yaml
@sops --encrypt --input-type=yaml --output-type=yaml _cfgs/controlplane.yaml > _cfgs/controlplane.sops.yaml
@git add -f _cfgs/talosconfig.sops.yaml _cfgs/ca.crt terraform.tfvars.sops.json @git add -f _cfgs/talosconfig.sops.yaml _cfgs/ca.crt terraform.tfvars.sops.json
bootstrap: ## Bootstrap controlplane bootstrap: ## Bootstrap controlplane