Remove support for Calico CNI

* Cilium has been the default for about 3 years and is the defacto
standard CNI choice. flannel is supported as a simple alternative
* Remove various historical options that were needed that are
specific to Calico
This commit is contained in:
Dalton Hubble
2024-12-27 20:38:36 -08:00
parent a8eae32b53
commit 8059eb9f0c
72 changed files with 88 additions and 204 deletions

View File

@@ -9,6 +9,9 @@ Notable changes between versions.
* Kubernetes [v1.32.0](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.32.md#v1320)
* Change the default Pod CIDR from 10.2.0.0/16 to 10.20.0.0/14 ([#1555](https://github.com/poseidon/typhoon/pull/1555))
* Configure Kubelets for parallel image pulls ([#1556](https://github.com/poseidon/typhoon/pull/1556))
* Remove support for Calico CNI (choose between `networking` cilium or flannel) ([#1558](https://github.com/poseidon/typhoon/pull/1558))
* Remove `network_mtu`, `network_encapsulation`, and `network_ip_autodetection_method` variables (Calico-specific)
* Remove Calico-specific Kubelet mounts
# v1.31.4

View File

@@ -19,7 +19,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
* Kubernetes v1.32.0 (upstream)
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
* Single or multi-master, [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [preemptible](https://typhoon.psdn.io/flatcar-linux/google-cloud/#preemption) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
* Ready for Ingress, Prometheus, Grafana, CSI, or other [addons](https://typhoon.psdn.io/addons/overview/)

View File

@@ -12,7 +12,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
* Kubernetes v1.32.0 (upstream)
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
* Single or multi-master, [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [spot](https://typhoon.psdn.io/fedora-coreos/aws/#spot) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
* Ready for Ingress, Prometheus, Grafana, CSI, and other optional [addons](https://typhoon.psdn.io/addons/overview/)

View File

@@ -1,12 +1,11 @@
# Kubernetes assets (kubeconfig, manifests)
module "bootstrap" {
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=c775b4de9a16ad1a94fef811f891c49169e7729f"
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=33f8d2083cd2da5a18f954dd4f765b482d9b8046"
cluster_name = var.cluster_name
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
etcd_servers = aws_route53_record.etcds.*.fqdn
networking = var.networking
network_mtu = var.network_mtu
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
daemonset_tolerations = var.daemonset_tolerations

View File

@@ -62,7 +62,6 @@ systemd:
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /var/lib/calico
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
ExecStartPre=-/usr/bin/podman rm kubelet
@@ -80,7 +79,6 @@ systemd:
--volume /sys/fs/cgroup:/sys/fs/cgroup \
--volume /etc/selinux:/etc/selinux \
--volume /sys/fs/selinux:/sys/fs/selinux \
--volume /var/lib/calico:/var/lib/calico:ro \
--volume /var/lib/containerd:/var/lib/containerd \
--volume /var/lib/kubelet:/var/lib/kubelet:rshared,z \
--volume /var/log:/var/log \

View File

@@ -133,16 +133,10 @@ variable "ssh_authorized_key" {
variable "networking" {
type = string
description = "Choice of networking provider (flannel, calico, or cilium)"
description = "Choice of networking provider (flannel or cilium)"
default = "cilium"
}
variable "network_mtu" {
type = number
description = "CNI interface MTU (applies to calico only). Use 8981 if using instances types with Jumbo frames."
default = 1480
}
variable "host_cidr" {
type = string
description = "CIDR IPv4 range to assign to EC2 nodes"

View File

@@ -34,7 +34,6 @@ systemd:
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /var/lib/calico
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
ExecStartPre=-/usr/bin/podman rm kubelet
@@ -52,7 +51,6 @@ systemd:
--volume /sys/fs/cgroup:/sys/fs/cgroup \
--volume /etc/selinux:/etc/selinux \
--volume /sys/fs/selinux:/sys/fs/selinux \
--volume /var/lib/calico:/var/lib/calico:ro \
--volume /var/lib/containerd:/var/lib/containerd \
--volume /var/lib/kubelet:/var/lib/kubelet:rshared,z \
--volume /var/log:/var/log \

View File

@@ -12,7 +12,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
* Kubernetes v1.32.0 (upstream)
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
* Single or multi-master, [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [spot](https://typhoon.psdn.io/flatcar-linux/aws/#spot) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
* Ready for Ingress, Prometheus, Grafana, CSI, and other optional [addons](https://typhoon.psdn.io/addons/overview/)

View File

@@ -1,12 +1,11 @@
# Kubernetes assets (kubeconfig, manifests)
module "bootstrap" {
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=c775b4de9a16ad1a94fef811f891c49169e7729f"
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=33f8d2083cd2da5a18f954dd4f765b482d9b8046"
cluster_name = var.cluster_name
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
etcd_servers = aws_route53_record.etcds.*.fqdn
networking = var.networking
network_mtu = var.network_mtu
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
daemonset_tolerations = var.daemonset_tolerations

View File

@@ -63,7 +63,6 @@ systemd:
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /var/lib/calico
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
ExecStartPre=/usr/bin/docker run -d \
@@ -78,7 +77,6 @@ systemd:
-v /lib/modules:/lib/modules:ro \
-v /run:/run \
-v /sys/fs/cgroup:/sys/fs/cgroup \
-v /var/lib/calico:/var/lib/calico:ro \
-v /var/lib/containerd:/var/lib/containerd \
-v /var/lib/kubelet:/var/lib/kubelet:rshared \
-v /var/log:/var/log \

View File

@@ -133,16 +133,10 @@ variable "ssh_authorized_key" {
variable "networking" {
type = string
description = "Choice of networking provider (flannel, calico, or cilium)"
description = "Choice of networking provider (flannel or cilium)"
default = "cilium"
}
variable "network_mtu" {
type = number
description = "CNI interface MTU (applies to calico only). Use 8981 if using instances types with Jumbo frames."
default = 1480
}
variable "host_cidr" {
type = string
description = "CIDR IPv4 range to assign to EC2 nodes"

View File

@@ -35,7 +35,6 @@ systemd:
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /var/lib/calico
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
# Podman, rkt, or runc run container processes, whereas docker run
@@ -53,7 +52,6 @@ systemd:
-v /lib/modules:/lib/modules:ro \
-v /run:/run \
-v /sys/fs/cgroup:/sys/fs/cgroup \
-v /var/lib/calico:/var/lib/calico:ro \
-v /var/lib/containerd:/var/lib/containerd \
-v /var/lib/kubelet:/var/lib/kubelet:rshared \
-v /var/log:/var/log \

View File

@@ -12,7 +12,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
* Kubernetes v1.32.0 (upstream)
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
* Single or multi-master, [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [spot priority](https://typhoon.psdn.io/fedora-coreos/azure/#low-priority) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
* Ready for Ingress, Prometheus, Grafana, and other optional [addons](https://typhoon.psdn.io/addons/overview/)

View File

@@ -1,17 +1,12 @@
# Kubernetes assets (kubeconfig, manifests)
module "bootstrap" {
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=c775b4de9a16ad1a94fef811f891c49169e7729f"
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=33f8d2083cd2da5a18f954dd4f765b482d9b8046"
cluster_name = var.cluster_name
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
etcd_servers = formatlist("%s.%s", azurerm_dns_a_record.etcds.*.name, var.dns_zone)
networking = var.networking
# only effective with Calico networking
# we should be able to use 1450 MTU, but in practice, 1410 was needed
network_encapsulation = "vxlan"
network_mtu = "1410"
networking = var.networking
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
daemonset_tolerations = var.daemonset_tolerations

View File

@@ -58,7 +58,6 @@ systemd:
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /var/lib/calico
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
ExecStartPre=-/usr/bin/podman rm kubelet
@@ -76,7 +75,6 @@ systemd:
--volume /sys/fs/cgroup:/sys/fs/cgroup \
--volume /etc/selinux:/etc/selinux \
--volume /sys/fs/selinux:/sys/fs/selinux \
--volume /var/lib/calico:/var/lib/calico:ro \
--volume /var/lib/containerd:/var/lib/containerd \
--volume /var/lib/kubelet:/var/lib/kubelet:rshared,z \
--volume /var/log:/var/log \

View File

@@ -114,7 +114,7 @@ variable "azure_authorized_key" {
variable "networking" {
type = string
description = "Choice of networking provider (flannel, calico, or cilium)"
description = "Choice of networking provider (flannel or cilium)"
default = "cilium"
}

View File

@@ -30,7 +30,6 @@ systemd:
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /var/lib/calico
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
ExecStartPre=-/usr/bin/podman rm kubelet
@@ -48,7 +47,6 @@ systemd:
--volume /sys/fs/cgroup:/sys/fs/cgroup \
--volume /etc/selinux:/etc/selinux \
--volume /sys/fs/selinux:/sys/fs/selinux \
--volume /var/lib/calico:/var/lib/calico:ro \
--volume /var/lib/containerd:/var/lib/containerd \
--volume /var/lib/kubelet:/var/lib/kubelet:rshared,z \
--volume /var/log:/var/log \

View File

@@ -12,7 +12,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
* Kubernetes v1.32.0 (upstream)
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
* Single or multi-master, [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [low-priority](https://typhoon.psdn.io/flatcar-linux/azure/#low-priority) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
* Ready for Ingress, Prometheus, Grafana, and other optional [addons](https://typhoon.psdn.io/addons/overview/)

View File

@@ -1,17 +1,12 @@
# Kubernetes assets (kubeconfig, manifests)
module "bootstrap" {
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=c775b4de9a16ad1a94fef811f891c49169e7729f"
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=33f8d2083cd2da5a18f954dd4f765b482d9b8046"
cluster_name = var.cluster_name
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
etcd_servers = formatlist("%s.%s", azurerm_dns_a_record.etcds.*.name, var.dns_zone)
networking = var.networking
# only effective with Calico networking
# we should be able to use 1450 MTU, but in practice, 1410 was needed
network_encapsulation = "vxlan"
network_mtu = "1410"
networking = var.networking
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
daemonset_tolerations = var.daemonset_tolerations

View File

@@ -60,7 +60,6 @@ systemd:
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /var/lib/calico
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
ExecStartPre=/usr/bin/docker run -d \
@@ -75,7 +74,6 @@ systemd:
-v /lib/modules:/lib/modules:ro \
-v /run:/run \
-v /sys/fs/cgroup:/sys/fs/cgroup \
-v /var/lib/calico:/var/lib/calico:ro \
-v /var/lib/containerd:/var/lib/containerd \
-v /var/lib/kubelet:/var/lib/kubelet:rshared \
-v /var/log:/var/log \

View File

@@ -120,7 +120,7 @@ variable "azure_authorized_key" {
variable "networking" {
type = string
description = "Choice of networking provider (flannel, calico, or cilium)"
description = "Choice of networking provider (flannel or cilium)"
default = "cilium"
}

View File

@@ -32,7 +32,6 @@ systemd:
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /var/lib/calico
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
# Podman, rkt, or runc run container processes, whereas docker run
@@ -50,7 +49,6 @@ systemd:
-v /lib/modules:/lib/modules:ro \
-v /run:/run \
-v /sys/fs/cgroup:/sys/fs/cgroup \
-v /var/lib/calico:/var/lib/calico:ro \
-v /var/lib/containerd:/var/lib/containerd \
-v /var/lib/kubelet:/var/lib/kubelet:rshared \
-v /var/log:/var/log \

View File

@@ -12,7 +12,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
* Kubernetes v1.32.0 (upstream)
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
* Single or multi-master, [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing
* Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
* Ready for Ingress, Prometheus, Grafana, and other optional [addons](https://typhoon.psdn.io/addons/overview/)

View File

@@ -1,16 +1,14 @@
# Kubernetes assets (kubeconfig, manifests)
module "bootstrap" {
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=c775b4de9a16ad1a94fef811f891c49169e7729f"
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=33f8d2083cd2da5a18f954dd4f765b482d9b8046"
cluster_name = var.cluster_name
api_servers = [var.k8s_domain_name]
etcd_servers = var.controllers.*.domain
networking = var.networking
network_mtu = var.network_mtu
network_ip_autodetection_method = var.network_ip_autodetection_method
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
components = var.components
cluster_name = var.cluster_name
api_servers = [var.k8s_domain_name]
etcd_servers = var.controllers.*.domain
networking = var.networking
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
components = var.components
}

View File

@@ -57,7 +57,6 @@ systemd:
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /var/lib/calico
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
ExecStartPre=-/usr/bin/podman rm kubelet
@@ -75,7 +74,6 @@ systemd:
--volume /sys/fs/cgroup:/sys/fs/cgroup \
--volume /etc/selinux:/etc/selinux \
--volume /sys/fs/selinux:/sys/fs/selinux \
--volume /var/lib/calico:/var/lib/calico:ro \
--volume /var/lib/containerd:/var/lib/containerd \
--volume /var/lib/kubelet:/var/lib/kubelet:rshared,z \
--volume /var/log:/var/log \

View File

@@ -88,22 +88,10 @@ variable "ssh_authorized_key" {
variable "networking" {
type = string
description = "Choice of networking provider (flannel, calico, or cilium)"
description = "Choice of networking provider (flannel or cilium)"
default = "cilium"
}
variable "network_mtu" {
type = number
description = "CNI interface MTU (applies to calico only)"
default = 1480
}
variable "network_ip_autodetection_method" {
type = string
description = "Method to autodetect the host IPv4 address (applies to calico only)"
default = "first-found"
}
variable "pod_cidr" {
type = string
description = "CIDR IPv4 range to assign Kubernetes pods"

View File

@@ -29,7 +29,6 @@ systemd:
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /var/lib/calico
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
ExecStartPre=-/usr/bin/podman rm kubelet
@@ -47,7 +46,6 @@ systemd:
--volume /sys/fs/cgroup:/sys/fs/cgroup \
--volume /etc/selinux:/etc/selinux \
--volume /sys/fs/selinux:/sys/fs/selinux \
--volume /var/lib/calico:/var/lib/calico:ro \
--volume /var/lib/containerd:/var/lib/containerd \
--volume /var/lib/kubelet:/var/lib/kubelet:rshared,z \
--volume /var/log:/var/log \

View File

@@ -12,7 +12,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
* Kubernetes v1.32.0 (upstream)
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
* Single or multi-master, [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
* Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
* Ready for Ingress, Prometheus, Grafana, and other optional [addons](https://typhoon.psdn.io/addons/overview/)

View File

@@ -1,15 +1,13 @@
# Kubernetes assets (kubeconfig, manifests)
module "bootstrap" {
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=c775b4de9a16ad1a94fef811f891c49169e7729f"
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=33f8d2083cd2da5a18f954dd4f765b482d9b8046"
cluster_name = var.cluster_name
api_servers = [var.k8s_domain_name]
etcd_servers = var.controllers.*.domain
networking = var.networking
network_mtu = var.network_mtu
network_ip_autodetection_method = var.network_ip_autodetection_method
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
components = var.components
cluster_name = var.cluster_name
api_servers = [var.k8s_domain_name]
etcd_servers = var.controllers.*.domain
networking = var.networking
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
components = var.components
}

View File

@@ -68,7 +68,6 @@ systemd:
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /var/lib/calico
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
ExecStartPre=/usr/bin/docker run -d \
@@ -83,7 +82,6 @@ systemd:
-v /lib/modules:/lib/modules:ro \
-v /run:/run \
-v /sys/fs/cgroup:/sys/fs/cgroup \
-v /var/lib/calico:/var/lib/calico:ro \
-v /var/lib/containerd:/var/lib/containerd \
-v /var/lib/kubelet:/var/lib/kubelet:rshared \
-v /var/log:/var/log \

View File

@@ -87,22 +87,10 @@ variable "ssh_authorized_key" {
variable "networking" {
type = string
description = "Choice of networking provider (flannel, calico, or cilium)"
description = "Choice of networking provider (flannel or cilium)"
default = "cilium"
}
variable "network_mtu" {
type = number
description = "CNI interface MTU (applies to calico only)"
default = 1480
}
variable "network_ip_autodetection_method" {
type = string
description = "Method to autodetect the host IPv4 address (applies to calico only)"
default = "first-found"
}
variable "pod_cidr" {
type = string
description = "CIDR IPv4 range to assign Kubernetes pods"

View File

@@ -40,7 +40,6 @@ systemd:
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /var/lib/calico
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
# Podman, rkt, or runc run container processes, whereas docker run
@@ -58,7 +57,6 @@ systemd:
-v /lib/modules:/lib/modules:ro \
-v /run:/run \
-v /sys/fs/cgroup:/sys/fs/cgroup \
-v /var/lib/calico:/var/lib/calico:ro \
-v /var/lib/containerd:/var/lib/containerd \
-v /var/lib/kubelet:/var/lib/kubelet:rshared \
-v /var/log:/var/log \

View File

@@ -12,7 +12,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
* Kubernetes v1.32.0 (upstream)
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [flannel](https://github.com/coreos/flannel) networking
* Single or multi-master, [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing
* Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
* Ready for Ingress, Prometheus, Grafana, CSI, and other [addons](https://typhoon.psdn.io/addons/overview/)

View File

@@ -1,16 +1,12 @@
# Kubernetes assets (kubeconfig, manifests)
module "bootstrap" {
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=c775b4de9a16ad1a94fef811f891c49169e7729f"
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=33f8d2083cd2da5a18f954dd4f765b482d9b8046"
cluster_name = var.cluster_name
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
etcd_servers = digitalocean_record.etcds.*.fqdn
networking = var.networking
# only effective with Calico networking
network_encapsulation = "vxlan"
network_mtu = "1450"
networking = var.networking
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
components = var.components

View File

@@ -60,7 +60,6 @@ systemd:
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /var/lib/calico
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
ExecStartPre=-/usr/bin/podman rm kubelet
@@ -78,7 +77,6 @@ systemd:
--volume /sys/fs/cgroup:/sys/fs/cgroup \
--volume /etc/selinux:/etc/selinux \
--volume /sys/fs/selinux:/sys/fs/selinux \
--volume /var/lib/calico:/var/lib/calico:ro \
--volume /var/lib/containerd:/var/lib/containerd \
--volume /var/lib/kubelet:/var/lib/kubelet:rshared,z \
--volume /var/log:/var/log \

View File

@@ -33,7 +33,6 @@ systemd:
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /var/lib/calico
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
ExecStartPre=-/usr/bin/podman rm kubelet
@@ -51,7 +50,6 @@ systemd:
--volume /sys/fs/cgroup:/sys/fs/cgroup \
--volume /etc/selinux:/etc/selinux \
--volume /sys/fs/selinux:/sys/fs/selinux \
--volume /var/lib/calico:/var/lib/calico:ro \
--volume /var/lib/containerd:/var/lib/containerd \
--volume /var/lib/kubelet:/var/lib/kubelet:rshared,z \
--volume /var/log:/var/log \

View File

@@ -39,7 +39,7 @@ resource "digitalocean_firewall" "rules" {
source_tags = [digitalocean_tag.controllers.name, digitalocean_tag.workers.name]
}
# IANA vxlan (flannel, calico)
# IANA vxlan (flannel)
inbound_rule {
protocol = "udp"
port_range = "4789"

View File

@@ -67,7 +67,7 @@ variable "ssh_fingerprints" {
variable "networking" {
type = string
description = "Choice of networking provider (flannel, calico, or cilium)"
description = "Choice of networking provider (flannel or cilium)"
default = "cilium"
}

View File

@@ -12,7 +12,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
* Kubernetes v1.32.0 (upstream)
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
* Single or multi-master, [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
* Advanced features like [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
* Ready for Ingress, Prometheus, Grafana, CSI, and other [addons](https://typhoon.psdn.io/addons/overview/)

View File

@@ -1,16 +1,12 @@
# Kubernetes assets (kubeconfig, manifests)
module "bootstrap" {
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=c775b4de9a16ad1a94fef811f891c49169e7729f"
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=33f8d2083cd2da5a18f954dd4f765b482d9b8046"
cluster_name = var.cluster_name
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
etcd_servers = digitalocean_record.etcds.*.fqdn
networking = var.networking
# only effective with Calico networking
network_encapsulation = "vxlan"
network_mtu = "1450"
networking = var.networking
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
components = var.components

View File

@@ -71,7 +71,6 @@ systemd:
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /var/lib/calico
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
ExecStartPre=/usr/bin/docker run -d \
@@ -86,7 +85,6 @@ systemd:
-v /lib/modules:/lib/modules:ro \
-v /run:/run \
-v /sys/fs/cgroup:/sys/fs/cgroup \
-v /var/lib/calico:/var/lib/calico:ro \
-v /var/lib/containerd:/var/lib/containerd \
-v /var/lib/kubelet:/var/lib/kubelet:rshared \
-v /var/log:/var/log \

View File

@@ -43,7 +43,6 @@ systemd:
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /var/lib/calico
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
# Podman, rkt, or runc run container processes, whereas docker run
@@ -61,7 +60,6 @@ systemd:
-v /lib/modules:/lib/modules:ro \
-v /run:/run \
-v /sys/fs/cgroup:/sys/fs/cgroup \
-v /var/lib/calico:/var/lib/calico:ro \
-v /var/lib/containerd:/var/lib/containerd \
-v /var/lib/kubelet:/var/lib/kubelet:rshared \
-v /var/log:/var/log \

View File

@@ -39,7 +39,7 @@ resource "digitalocean_firewall" "rules" {
source_tags = [digitalocean_tag.controllers.name, digitalocean_tag.workers.name]
}
# IANA vxlan (flannel, calico)
# IANA vxlan (flannel)
inbound_rule {
protocol = "udp"
port_range = "4789"

View File

@@ -67,7 +67,7 @@ variable "ssh_fingerprints" {
variable "networking" {
type = string
description = "Choice of networking provider (flannel, calico, or cilium)"
description = "Choice of networking provider (flannel or cilium)"
default = "cilium"
}

View File

@@ -2,7 +2,7 @@
Typhoon's component model allows for managing cluster components independent from the cluster's lifecycle, upgrading in a rolling or automated fashion, or customizing components in advanced ways.
Typhoon clusters install core components like `CoreDNS`, `kube-proxy`, and a chosen CNI provider (`flannel`, `calico`, or `cilium`) by default. Since v1.30.1, pre-installed components are optional. Other "addon" components like Nginx Ingress, Prometheus, or Grafana may be optionally applied though the component model (after cluster creation).
Typhoon clusters install core components like `CoreDNS`, `kube-proxy`, and a chosen CNI provider (`flannel` or `cilium`) by default. Since v1.30.1, pre-installed components are optional. Other "addon" components like Nginx Ingress, Prometheus, or Grafana may be optionally applied though the component model (after cluster creation).
## Components
@@ -12,7 +12,6 @@ Pre-installed by default:
* kube-proxy
* CNI provider (set via `var.networking`)
* flannel
* Calico
* Cilium
Addons:
@@ -24,7 +23,7 @@ Addons:
## Pre-installed Components
By default, Typhoon clusters install `CoreDNS`, `kube-proxy`, and a chosen CNI provider (`flannel`, `calico`, or `cilium`). Disable any or all of these components using the `components` system.
By default, Typhoon clusters install `CoreDNS`, `kube-proxy`, and a chosen CNI provider (`flannel` or `cilium`). Disable any or all of these components using the `components` system.
```tf
module "yavin" {

View File

@@ -83,7 +83,7 @@ In the example above, the two default workers would be labeled `pool: default` a
Add custom initial taints on worker pool nodes to indicate a node is unique and should only schedule workloads that explicitly tolerate a given taint key.
!!! warning
Since taints prevent workloads scheduling onto a node, you must decide whether `kube-system` DaemonSets (e.g. flannel, Calico, Cilium) should tolerate your custom taint by setting `daemonset_tolerations`. If you don't list your custom taint(s), important components won't run on these nodes.
Since taints prevent workloads scheduling onto a node, you must decide whether `kube-system` DaemonSets (e.g. flannel, Cilium) should tolerate your custom taint by setting `daemonset_tolerations`. If you don't list your custom taint(s), important components won't run on these nodes.
=== "Cluster"
@@ -130,5 +130,5 @@ Add custom initial taints on worker pool nodes to indicate a node is unique and
}
```
In the example above, the the additional worker would be tainted with `role=gpu:NoSchedule` to prevent workloads scheduling, but `kube-system` components like flannel, Calico, or Cilium would tolerate that custom taint to run there.
In the example above, the the additional worker would be tainted with `role=gpu:NoSchedule` to prevent workloads scheduling, but `kube-system` components like flannel or Cilium would tolerate that custom taint to run there.

View File

@@ -8,7 +8,7 @@ Let's cover the concepts you'll need to get started.
#### Nodes
All cluster nodes provision themselves from a declarative configuration upfront. Nodes run a `kubelet` service and register themselves with the control plane to join the cluster. All nodes run `kube-proxy` and `calico` or `flannel` pods.
All cluster nodes provision themselves from a declarative configuration upfront. Nodes run a `kubelet` service and register themselves with the control plane to join the cluster. All nodes run `kube-proxy` and `cilium` or `flannel` pods.
#### Controllers

View File

@@ -39,7 +39,7 @@ Together, they diversify Typhoon to support a range of container technologies.
| control plane images | upstream images | upstream images |
| on-host etcd | docker | podman |
| on-host kubelet | docker | podman |
| CNI plugins | calico, cilium, flannel | calico, cilium, flannel |
| CNI plugins | cilium, flannel | cilium, flannel |
| coordinated drain & OS update | [FLUO](https://github.com/kinvolk/flatcar-linux-update-operator) addon | [fleetlock](https://github.com/poseidon/fleetlock) |
## Directory Locations

View File

@@ -4,7 +4,7 @@ In this tutorial, we'll create a Kubernetes v1.32.0 cluster on AWS with Fedora C
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a VPC, gateway, subnets, security groups, controller instances, worker auto-scaling group, network load balancer, and TLS assets.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and (`flannel`, `calico`, or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and (`flannel` or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
## Requirements
@@ -224,8 +224,7 @@ Reference the DNS zone id with `aws_route53_zone.zone-for-clusters.zone_id`.
| worker_target_groups | Target group ARNs to which worker instances should be added | [] | [aws_lb_target_group.app.id] |
| controller_snippets | Controller Butane snippets | [] | [examples](/advanced/customization/) |
| worker_snippets | Worker Butane snippets | [] | [examples](/advanced/customization/) |
| networking | Choice of networking provider | "cilium" | "calico" or "cilium" or "flannel" |
| network_mtu | CNI interface MTU (calico only) | 1480 | 8981 |
| networking | Choice of networking provider | "cilium" | "cilium" or "flannel" |
| host_cidr | CIDR IPv4 range to assign to EC2 instances | "10.0.0.0/16" | "10.1.0.0/16" |
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.20.0.0/14" | "10.22.0.0/16" |
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
@@ -236,9 +235,6 @@ Check the list of valid [instance types](https://aws.amazon.com/ec2/instance-typ
!!! warning
Do not choose a `controller_type` smaller than `t3.small`. Smaller instances are not sufficient for running a controller.
!!! tip "MTU"
If your EC2 instance type supports [Jumbo frames](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/network_mtu.html#jumbo_frame_instances) (most do), we recommend you change the `network_mtu` to 8981! You will get better pod-to-pod bandwidth.
#### Spot
Add `worker_price = "0.10"` to use spot instance workers (instead of "on-demand") and set a maximum spot price in USD. Clusters can tolerate spot market interuptions fairly well (reschedules pods, but cannot drain) to save money, with the tradeoff that requests for workers may go unfulfilled.

View File

@@ -4,7 +4,7 @@ In this tutorial, we'll create a Kubernetes v1.32.0 cluster on Azure with Fedora
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a resource group, virtual network, subnets, security groups, controller availability set, worker scale set, load balancer, and TLS assets.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and (`flannel`, `calico`, or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and (`flannel` or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
## Requirements
@@ -252,7 +252,7 @@ Reference the DNS zone with `azurerm_dns_zone.clusters.name` and its resource gr
| worker_priority | Set priority to Spot to use reduced cost surplus capacity, with the tradeoff that instances can be deallocated at any time | Regular | Spot |
| controller_snippets | Controller Butane snippets | [] | [example](/advanced/customization/#usage) |
| worker_snippets | Worker Butane snippets | [] | [example](/advanced/customization/#usage) |
| networking | Choice of networking provider | "cilium" | "calico" or "cilium" or "flannel" |
| networking | Choice of networking provider | "cilium" | "cilium" or "flannel" |
| network_cidr | Virtual network CIDR ranges | { ipv4 = ["10.0.0.0/16"], ipv6 = [ULA, ...] } | { ipv4 = ["10.0.0.0/20"] } |
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.20.0.0/14" | "10.22.0.0/16" |
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |

View File

@@ -4,7 +4,7 @@ In this tutorial, we'll network boot and provision a Kubernetes v1.32.0 cluster
First, we'll deploy a [Matchbox](https://github.com/poseidon/matchbox) service and setup a network boot environment. Then, we'll declare a Kubernetes cluster using the Typhoon Terraform module and power on machines. On PXE boot, machines will install Fedora CoreOS to disk, reboot into the disk install, and provision themselves as Kubernetes controllers or workers via Ignition.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and (`flannel`, `calico`, or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and (`flannel` or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
## Requirements
@@ -292,7 +292,6 @@ $ journalctl -f -u bootstrap
podman[1750]: The connection to the server cluster.example.com:6443 was refused - did you specify the right host or port?
podman[1750]: Waiting for static pod control plane
...
podman[1750]: serviceaccount/calico-node unchanged
systemd[1]: Started Kubernetes control plane.
```
@@ -365,10 +364,8 @@ Check the [variables.tf](https://github.com/poseidon/typhoon/blob/master/bare-me
| workers | List of worker machine detail objects (unique name, identifying MAC address, FQDN) | [] | `[{name="node2", mac="52:54:00:b2:2f:86", domain="node2.example.com"}, {name="node3", mac="52:54:00:c3:61:77", domain="node3.example.com"}]` |
| cached_install | PXE boot and install from the Matchbox `/assets` cache. Admin MUST have downloaded Fedora CoreOS images into the cache | false | true |
| install_disk | Disk device where Fedora CoreOS should be installed | "sda" (not "/dev/sda" like Container Linux) | "sdb" |
| networking | Choice of networking provider | "cilium" | "calico" or "cilium" or "flannel" |
| network_mtu | CNI interface MTU (calico-only) | 1480 | - |
| networking | Choice of networking provider | "cilium" | "cilium" or "flannel" |
| snippets | Map from machine names to lists of Butane snippets | {} | [examples](/advanced/customization/) |
| network_ip_autodetection_method | Method to detect host IPv4 address (calico-only) | "first-found" | "can-reach=10.0.0.1" |
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.20.0.0/14" | "10.22.0.0/16" |
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
| kernel_args | Additional kernel args to provide at PXE boot | [] | ["kvm-intel.nested=1"] |

View File

@@ -4,7 +4,7 @@ In this tutorial, we'll create a Kubernetes v1.32.0 cluster on DigitalOcean with
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create controller droplets, worker droplets, DNS records, tags, and TLS assets.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and (`flannel`, `calico`, or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and (`flannel` or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
## Requirements
@@ -241,7 +241,7 @@ Digital Ocean requires the SSH public key be uploaded to your account, so you ma
| worker_type | Droplet type for workers | "s-1vcpu-2gb" | s-1vcpu-2gb, s-2vcpu-2gb, ... |
| controller_snippets | Controller Butane snippets | [] | [example](/advanced/customization/) |
| worker_snippets | Worker Butane snippets | [] | [example](/advanced/customization/) |
| networking | Choice of networking provider | "cilium" | "calico" or "cilium" or "flannel" |
| networking | Choice of networking provider | "cilium" | "cilium" or "flannel" |
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.20.0.0/14" | "10.22.0.0/16" |
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |

View File

@@ -4,7 +4,7 @@ In this tutorial, we'll create a Kubernetes v1.32.0 cluster on Google Compute En
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a network, firewall rules, health checks, controller instances, worker managed instance group, load balancers, and TLS assets.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and (`flannel`, `calico`, or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and (`flannel` or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
## Requirements
@@ -224,7 +224,7 @@ resource "google_dns_managed_zone" "zone-for-clusters" {
| worker_preemptible | If enabled, Compute Engine will terminate workers randomly within 24 hours | false | true |
| controller_snippets | Controller Butane snippets | [] | [examples](/advanced/customization/) |
| worker_snippets | Worker Butane snippets | [] | [examples](/advanced/customization/) |
| networking | Choice of networking provider | "cilium" | "calico" or "cilium" or "flannel" |
| networking | Choice of networking provider | "cilium" | "cilium" or "flannel" |
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.20.0.0/14" | "10.22.0.0/16" |
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
| worker_node_labels | List of initial worker node labels | [] | ["worker-pool=default"] |

View File

@@ -4,7 +4,7 @@ In this tutorial, we'll create a Kubernetes v1.32.0 cluster on AWS with Flatcar
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a VPC, gateway, subnets, security groups, controller instances, worker auto-scaling group, network load balancer, and TLS assets.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and (`flannel`, `calico`, or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and (`flannel` or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
## Requirements
@@ -222,8 +222,7 @@ Reference the DNS zone id with `aws_route53_zone.zone-for-clusters.zone_id`.
| worker_target_groups | Target group ARNs to which worker instances should be added | [] | [aws_lb_target_group.app.id] |
| controller_snippets | Controller Container Linux Config snippets | [] | [example](/advanced/customization/) |
| worker_snippets | Worker Container Linux Config snippets | [] | [example](/advanced/customization/) |
| networking | Choice of networking provider | "cilium" | "calico" or "cilium" or "flannel" |
| network_mtu | CNI interface MTU (calico only) | 1480 | 8981 |
| networking | Choice of networking provider | "cilium" | "cilium" or "flannel" |
| host_cidr | CIDR IPv4 range to assign to EC2 instances | "10.0.0.0/16" | "10.1.0.0/16" |
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.20.0.0/14" | "10.22.0.0/16" |
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
@@ -234,9 +233,6 @@ Check the list of valid [instance types](https://aws.amazon.com/ec2/instance-typ
!!! warning
Do not choose a `controller_type` smaller than `t3.small`. Smaller instances are not sufficient for running a controller.
!!! tip "MTU"
If your EC2 instance type supports [Jumbo frames](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/network_mtu.html#jumbo_frame_instances) (most do), we recommend you change the `network_mtu` to 8981! You will get better pod-to-pod bandwidth.
#### Spot
Add `worker_price = "0.10"` to use spot instance workers (instead of "on-demand") and set a maximum spot price in USD. Clusters can tolerate spot market interuptions fairly well (reschedules pods, but cannot drain) to save money, with the tradeoff that requests for workers may go unfulfilled.

View File

@@ -4,7 +4,7 @@ In this tutorial, we'll create a Kubernetes v1.32.0 cluster on Azure with Flatca
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a resource group, virtual network, subnets, security groups, controller availability set, worker scale set, load balancer, and TLS assets.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and (`flannel`, `calico`, or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and (`flannel` or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
## Requirements
@@ -240,7 +240,7 @@ Reference the DNS zone with `azurerm_dns_zone.clusters.name` and its resource gr
| worker_priority | Set priority to Spot to use reduced cost surplus capacity, with the tradeoff that instances can be deallocated at any time | Regular | Spot |
| controller_snippets | Controller Container Linux Config snippets | [] | [example](/advanced/customization/#usage) |
| worker_snippets | Worker Container Linux Config snippets | [] | [example](/advanced/customization/#usage) |
| networking | Choice of networking provider | "cilium" | "calico" or "cilium" or "flannel" |
| networking | Choice of networking provider | "cilium" | "cilium" or "flannel" |
| network_cidr | Virtual network CIDR ranges | { ipv4 = ["10.0.0.0/16"], ipv6 = [ULA, ...] } | { ipv4 = ["10.0.0.0/20"] } |
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.20.0.0/14" | "10.22.0.0/16" |
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |

View File

@@ -4,7 +4,7 @@ In this tutorial, we'll network boot and provision a Kubernetes v1.32.0 cluster
First, we'll deploy a [Matchbox](https://github.com/poseidon/matchbox) service and setup a network boot environment. Then, we'll declare a Kubernetes cluster using the Typhoon Terraform module and power on machines. On PXE boot, machines will install Container Linux to disk, reboot into the disk install, and provision themselves as Kubernetes controllers or workers via Ignition.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns` while `kube-proxy` and (`flannel`, `calico`, or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns` while `kube-proxy` and (`flannel` or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
## Requirements
@@ -302,7 +302,6 @@ $ journalctl -f -u bootstrap
The connection to the server cluster.example.com:6443 was refused - did you specify the right host or port?
Waiting for static pod control plane
...
serviceaccount/calico-node unchanged
systemd[1]: Started Kubernetes control plane.
```
@@ -376,10 +375,8 @@ Check the [variables.tf](https://github.com/poseidon/typhoon/blob/master/bare-me
| download_protocol | Protocol iPXE uses to download the kernel and initrd. iPXE must be compiled with [crypto](https://ipxe.org/crypto) support for https. Unused if cached_install is true | "https" | "http" |
| cached_install | PXE boot and install from the Matchbox `/assets` cache. Admin MUST have downloaded Container Linux or Flatcar images into the cache | false | true |
| install_disk | Disk device where Container Linux should be installed | "/dev/sda" | "/dev/sdb" |
| networking | Choice of networking provider | "cilium" | "calico" or "cilium" or "flannel" |
| network_mtu | CNI interface MTU (calico-only) | 1480 | - |
| networking | Choice of networking provider | "cilium" | "cilium" or "flannel" |
| snippets | Map from machine names to lists of Container Linux Config snippets | {} | [examples](/advanced/customization/) |
| network_ip_autodetection_method | Method to detect host IPv4 address (calico-only) | "first-found" | "can-reach=10.0.0.1" |
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.20.0.0/14" | "10.22.0.0/16" |
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
| kernel_args | Additional kernel args to provide at PXE boot | [] | ["kvm-intel.nested=1"] |

View File

@@ -4,7 +4,7 @@ In this tutorial, we'll create a Kubernetes v1.32.0 cluster on DigitalOcean with
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create controller droplets, worker droplets, DNS records, tags, and TLS assets.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and (`flannel`, `calico`, or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and (`flannel` or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
## Requirements
@@ -241,7 +241,7 @@ Digital Ocean requires the SSH public key be uploaded to your account, so you ma
| worker_type | Droplet type for workers | "s-1vcpu-2gb" | s-1vcpu-2gb, s-2vcpu-2gb, ... |
| controller_snippets | Controller Container Linux Config snippets | [] | [example](/advanced/customization/) |
| worker_snippets | Worker Container Linux Config snippets | [] | [example](/advanced/customization/) |
| networking | Choice of networking provider | "cilium" | "calico" or "cilium" or "flannel" |
| networking | Choice of networking provider | "cilium" | "cilium" or "flannel" |
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.20.0.0/14" | "10.22.0.0/16" |
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |

View File

@@ -4,7 +4,7 @@ In this tutorial, we'll create a Kubernetes v1.32.0 cluster on Google Compute En
We'll declare a Kubernetes cluster using the Typhoon Terraform module. Then apply the changes to create a network, firewall rules, health checks, controller instances, worker managed instance group, load balancers, and TLS assets.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and (`flannel`, `calico`, or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
Controller hosts are provisioned to run an `etcd-member` peer and a `kubelet` service. Worker hosts run a `kubelet` service. Controller nodes run `kube-apiserver`, `kube-scheduler`, `kube-controller-manager`, and `coredns`, while `kube-proxy` and (`flannel` or `cilium`) run on every node. A generated `kubeconfig` provides `kubectl` access to the cluster.
## Requirements
@@ -222,7 +222,7 @@ resource "google_dns_managed_zone" "zone-for-clusters" {
| worker_preemptible | If enabled, Compute Engine will terminate workers randomly within 24 hours | false | true |
| controller_snippets | Controller Container Linux Config snippets | [] | [example](/advanced/customization/) |
| worker_snippets | Worker Container Linux Config snippets | [] | [example](/advanced/customization/) |
| networking | Choice of networking provider | "cilium" | "calico" or "cilium" or "flannel" |
| networking | Choice of networking provider | "cilium" | "cilium" or "flannel" |
| pod_cidr | CIDR IPv4 range to assign to Kubernetes pods | "10.20.0.0/14" | "10.22.0.0/16" |
| service_cidr | CIDR IPv4 range to assign to Kubernetes services | "10.3.0.0/16" | "10.3.0.0/24" |
| worker_node_labels | List of initial worker node labels | [] | ["worker-pool=default"] |

View File

@@ -12,7 +12,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
* Kubernetes v1.32.0 (upstream)
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
* Single or multi-master, [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing
* Advanced features like [worker pools](advanced/worker-pools/), [preemptible](fedora-coreos/google-cloud/#preemption) workers, and [snippets](advanced/customization/#hosts) customization
* Ready for Ingress, Prometheus, Grafana, CSI, or other [addons](addons/overview/)
@@ -119,9 +119,9 @@ List the pods.
```
$ kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-node-1cs8z 2/2 Running 0 6m
kube-system calico-node-d1l5b 2/2 Running 0 6m
kube-system calico-node-sp9ps 2/2 Running 0 6m
kube-system cilium-1cs8z 2/2 Running 0 6m
kube-system cilium-d1l5b 2/2 Running 0 6m
kube-system cilium-sp9ps 2/2 Running 0 6m
kube-system coredns-1187388186-dkh3o 1/1 Running 0 6m
kube-system coredns-1187388186-zj5dl 1/1 Running 0 6m
kube-system kube-apiserver-controller-0 1/1 Running 0 6m

View File

@@ -128,7 +128,7 @@ Apply complete! Resources: 0 added, 0 changed, 55 destroyed.
#### In-place Edits
Typhoon uses a static pod Kubernetes control plane which allows certain manifest upgrades to be performed in-place. Components like `kube-apiserver`, `kube-controller-manager`, and `kube-scheduler` are run as static pods. Components `flannel`/`calico`, `coredns`, and `kube-proxy` are scheduled on Kubernetes and can be edited via `kubectl`.
Typhoon uses a static pod Kubernetes control plane which allows certain manifest upgrades to be performed in-place. Components like `kube-apiserver`, `kube-controller-manager`, and `kube-scheduler` are run as static pods. Components `flannel`/`cilium`, `coredns`, and `kube-proxy` are scheduled on Kubernetes and can be edited via `kubectl`.
In certain scenarios, in-place edits can be useful for quickly rolling out security patches (e.g. bumping `coredns`) or prioritizing speed over the safety of a proper cluster re-provision and transition.

View File

@@ -38,7 +38,7 @@ Network performance varies based on the platform and CNI plugin. `iperf` was use
Notes:
* Calico, Cilium, and Flannel have comparable performance. Platform and configuration differences dominate.
* Cilium and Flannel have comparable performance. Platform and configuration differences dominate.
* Azure and DigitalOcean network performance can be quite variable or depend on machine type
* Only [certain AWS EC2 instance types](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/network_mtu.html#jumbo_frame_instances) allow jumbo frames. This is why the default MTU on AWS must be 1480.

View File

@@ -12,9 +12,9 @@ Typhoon aims to be minimal and secure. We're running it ourselves after all.
* [NodeRestriction](https://kubernetes.io/docs/reference/access-authn-authz/node/) is enabled to limit Kubelet authorization
* [Role-Based Access Control](https://kubernetes.io/docs/admin/authorization/rbac/) is enabled. Apps must define RBAC policies for API access
* Workloads run on worker nodes only, unless they tolerate the master taint
* Kubernetes [Network Policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/) and Calico [NetworkPolicy](https://docs.projectcalico.org/latest/reference/calicoctl/resources/networkpolicy) support [^1]
* Kubernetes [Network Policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/) and Cilium [NetworkPolicy](https://docs.cilium.io/en/latest/security/policy/index.html) support [^1]
[^1]: Requires `networking = "calico"`. Calico is the default on all platforms (AWS, Azure, bare-metal, DigitalOcean, and Google Cloud).
[^1]: Requires `networking = "cilium"`. Cilium is the default on all platforms (AWS, Azure, bare-metal, DigitalOcean, and Google Cloud).
**Hosts**
@@ -91,7 +91,6 @@ Typhoon publishes Terraform providers to the Terraform Registry, GPG signed by 0
| coredns | NA | false | false |
| kube-proxy | root | true | true |
| cilium | root | true | true |
| calico | root | true | true |
| flannel | root | true | true |
@@ -103,7 +102,6 @@ Typhoon publishes Terraform providers to the Terraform Registry, GPG signed by 0
| coredns | system-cluster-critical |
| kube-proxy | system-node-critical |
| cilium | system-node-critical |
| calico | system-node-critical |
| flannel | system-node-critical |
## Disclosures

View File

@@ -12,7 +12,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
* Kubernetes v1.32.0 (upstream)
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
* Single or multi-master, [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/), SELinux enforcing
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [preemptible](https://typhoon.psdn.io/fedora-coreos/google-cloud/#preemption) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
* Ready for Ingress, Prometheus, Grafana, CSI, and other optional [addons](https://typhoon.psdn.io/addons/overview/)

View File

@@ -1,12 +1,11 @@
# Kubernetes assets (kubeconfig, manifests)
module "bootstrap" {
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=c775b4de9a16ad1a94fef811f891c49169e7729f"
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=33f8d2083cd2da5a18f954dd4f765b482d9b8046"
cluster_name = var.cluster_name
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
etcd_servers = [for fqdn in google_dns_record_set.etcds.*.name : trimsuffix(fqdn, ".")]
networking = var.networking
network_mtu = 1440
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
daemonset_tolerations = var.daemonset_tolerations

View File

@@ -58,7 +58,6 @@ systemd:
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /var/lib/calico
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
ExecStartPre=-/usr/bin/podman rm kubelet
@@ -76,7 +75,6 @@ systemd:
--volume /sys/fs/cgroup:/sys/fs/cgroup \
--volume /etc/selinux:/etc/selinux \
--volume /sys/fs/selinux:/sys/fs/selinux \
--volume /var/lib/calico:/var/lib/calico:ro \
--volume /var/lib/containerd:/var/lib/containerd \
--volume /var/lib/kubelet:/var/lib/kubelet:rshared,z \
--volume /var/log:/var/log \

View File

@@ -116,7 +116,7 @@ variable "ssh_authorized_key" {
variable "networking" {
type = string
description = "Choice of networking provider (flannel, calico, or cilium)"
description = "Choice of networking provider (flannel or cilium)"
default = "cilium"
}

View File

@@ -30,7 +30,6 @@ systemd:
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /var/lib/calico
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
ExecStartPre=-/usr/bin/podman rm kubelet
@@ -48,7 +47,6 @@ systemd:
--volume /sys/fs/cgroup:/sys/fs/cgroup \
--volume /etc/selinux:/etc/selinux \
--volume /sys/fs/selinux:/sys/fs/selinux \
--volume /var/lib/calico:/var/lib/calico:ro \
--volume /var/lib/containerd:/var/lib/containerd \
--volume /var/lib/kubelet:/var/lib/kubelet:rshared,z \
--volume /var/log:/var/log \

View File

@@ -12,7 +12,7 @@ Typhoon distributes upstream Kubernetes, architectural conventions, and cluster
## Features <a href="https://www.cncf.io/certification/software-conformance/"><img align="right" src="https://storage.googleapis.com/poseidon/certified-kubernetes.png"></a>
* Kubernetes v1.32.0 (upstream)
* Single or multi-master, [Calico](https://www.projectcalico.org/) or [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
* Single or multi-master, [Cilium](https://github.com/cilium/cilium) or [flannel](https://github.com/coreos/flannel) networking
* On-cluster etcd with TLS, [RBAC](https://kubernetes.io/docs/admin/authorization/rbac/)-enabled, [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
* Advanced features like [worker pools](https://typhoon.psdn.io/advanced/worker-pools/), [preemptible](https://typhoon.psdn.io/flatcar-linux/google-cloud/#preemption) workers, and [snippets](https://typhoon.psdn.io/advanced/customization/#hosts) customization
* Ready for Ingress, Prometheus, Grafana, CSI, and other optional [addons](https://typhoon.psdn.io/addons/overview/)

View File

@@ -1,12 +1,11 @@
# Kubernetes assets (kubeconfig, manifests)
module "bootstrap" {
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=c775b4de9a16ad1a94fef811f891c49169e7729f"
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=33f8d2083cd2da5a18f954dd4f765b482d9b8046"
cluster_name = var.cluster_name
api_servers = [format("%s.%s", var.cluster_name, var.dns_zone)]
etcd_servers = [for fqdn in google_dns_record_set.etcds.*.name : trimsuffix(fqdn, ".")]
networking = var.networking
network_mtu = 1440
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
daemonset_tolerations = var.daemonset_tolerations

View File

@@ -60,7 +60,6 @@ systemd:
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /var/lib/calico
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
ExecStartPre=/usr/bin/docker run -d \
@@ -75,7 +74,6 @@ systemd:
-v /lib/modules:/lib/modules:ro \
-v /run:/run \
-v /sys/fs/cgroup:/sys/fs/cgroup \
-v /var/lib/calico:/var/lib/calico:ro \
-v /var/lib/containerd:/var/lib/containerd \
-v /var/lib/kubelet:/var/lib/kubelet:rshared \
-v /var/log:/var/log \

View File

@@ -116,7 +116,7 @@ variable "ssh_authorized_key" {
variable "networking" {
type = string
description = "Choice of networking provider (flannel, calico, or cilium)"
description = "Choice of networking provider (flannel or cilium)"
default = "cilium"
}

View File

@@ -32,7 +32,6 @@ systemd:
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /var/lib/calico
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
# Podman, rkt, or runc run container processes, whereas docker run
@@ -50,7 +49,6 @@ systemd:
-v /lib/modules:/lib/modules:ro \
-v /run:/run \
-v /sys/fs/cgroup:/sys/fs/cgroup \
-v /var/lib/calico:/var/lib/calico:ro \
-v /var/lib/containerd:/var/lib/containerd \
-v /var/lib/kubelet:/var/lib/kubelet:rshared \
-v /var/log:/var/log \