diff --git a/packages/apps/kubernetes-proxmox/.helmignore b/packages/apps/kubernetes-proxmox/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/packages/apps/kubernetes-proxmox/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/packages/apps/kubernetes-proxmox/Chart.yaml b/packages/apps/kubernetes-proxmox/Chart.yaml new file mode 100644 index 00000000..49dbcc63 --- /dev/null +++ b/packages/apps/kubernetes-proxmox/Chart.yaml @@ -0,0 +1,25 @@ +apiVersion: v2 +name: kubernetes +description: Managed Kubernetes service +icon: https://upload.wikimedia.org/wikipedia/commons/thumb/3/39/Kubernetes_logo_without_workmark.svg/723px-Kubernetes_logo_without_workmark.svg.png + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.2.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.19.0" diff --git a/packages/apps/kubernetes-proxmox/Makefile b/packages/apps/kubernetes-proxmox/Makefile new file mode 100644 index 00000000..ecefd153 --- /dev/null +++ b/packages/apps/kubernetes-proxmox/Makefile @@ -0,0 +1,17 @@ +UBUNTU_CONTAINER_DISK_TAG = v1.29.1 + +include ../../../scripts/common-envs.mk + +image: image-ubuntu-container-disk + +image-ubuntu-container-disk: + docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/ubuntu-container-disk \ + --provenance false \ + --tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(UBUNTU_CONTAINER_DISK_TAG)) \ + --tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(UBUNTU_CONTAINER_DISK_TAG)-$(TAG)) \ + --cache-from type=registry,ref=$(REGISTRY)/ubuntu-container-disk:latest \ + --cache-to type=inline \ + --metadata-file images/ubuntu-container-disk.json \ + --push=$(PUSH) \ + --load=$(LOAD) + echo "$(REGISTRY)/ubuntu-container-disk:$(call settag,$(UBUNTU_CONTAINER_DISK_TAG))" > images/ubuntu-container-disk.tag diff --git a/packages/apps/kubernetes-proxmox/README.md b/packages/apps/kubernetes-proxmox/README.md new file mode 100644 index 00000000..aaa8d398 --- /dev/null +++ b/packages/apps/kubernetes-proxmox/README.md @@ -0,0 +1,28 @@ +# Managed Kubernetes Service + +## Overview + +The Managed Kubernetes Service offers a streamlined solution for efficiently managing server workloads. Kubernetes has emerged as the industry standard, providing a unified and accessible API, primarily utilizing YAML for configuration. This means that teams can easily understand and work with Kubernetes, streamlining infrastructure management. + +The Kubernetes leverages robust software design patterns, enabling continuous recovery in any scenario through the reconciliation method. Additionally, it ensures seamless scaling across a multitude of servers, addressing the challenges posed by complex and outdated APIs found in traditional virtualization platforms. This managed service eliminates the need for developing custom solutions or modifying source code, saving valuable time and effort. + +## Deployment Details + +The managed Kubernetes service deploys a standard Kubernetes cluster utilizing the Cluster API, Kamaji as control-plane provicer and the KubeVirt infrastructure provider. This ensures a consistent and reliable setup for workloads. + +Within this cluster, users can take advantage of LoadBalancer services and easily provision physical volumes as needed. The control-plane operates within containers, while the worker nodes are deployed as virtual machines, all seamlessly managed by the application. + +- Docs: https://github.com/clastix/kamaji +- Docs: https://cluster-api.sigs.k8s.io/ +- GitHub: https://github.com/clastix/kamaji +- GitHub: https://github.com/kubernetes-sigs/cluster-api-provider-kubevirt +- GitHub: https://github.com/kubevirt/csi-driver + + +## How-Tos + +How to access to deployed cluster: + +``` +kubectl get secret -n kubernetes--admin-kubeconfig -o go-template='{{ printf "%s\n" (index .data "super-admin.conf" | base64decode) }}' > test +``` diff --git a/packages/apps/kubernetes-proxmox/images/ubuntu-container-disk.json b/packages/apps/kubernetes-proxmox/images/ubuntu-container-disk.json new file mode 100644 index 00000000..82131b31 --- /dev/null +++ b/packages/apps/kubernetes-proxmox/images/ubuntu-container-disk.json @@ -0,0 +1,4 @@ +{ + "containerimage.config.digest": "sha256:62baab666445d76498fb14cc1d0865fc82e4bdd5cb1d7ba80475dc5024184622", + "containerimage.digest": "sha256:9363d717f966f4e7927da332eaaf17401b42203a2fcb493b428f94d096dae3a5" +} \ No newline at end of file diff --git a/packages/apps/kubernetes-proxmox/images/ubuntu-container-disk.tag b/packages/apps/kubernetes-proxmox/images/ubuntu-container-disk.tag new file mode 100644 index 00000000..c2389748 --- /dev/null +++ b/packages/apps/kubernetes-proxmox/images/ubuntu-container-disk.tag @@ -0,0 +1 @@ +ghcr.io/aenix-io/cozystack/ubuntu-container-disk:v1.29.1 diff --git a/packages/apps/kubernetes-proxmox/images/ubuntu-container-disk/Dockerfile b/packages/apps/kubernetes-proxmox/images/ubuntu-container-disk/Dockerfile new file mode 100644 index 00000000..a30d70f7 --- /dev/null +++ b/packages/apps/kubernetes-proxmox/images/ubuntu-container-disk/Dockerfile @@ -0,0 +1,51 @@ +FROM ubuntu:22.04 as guestfish + +ARG DEBIAN_FRONTEND=noninteractive +RUN apt-get update \ + && apt-get -y install \ + libguestfs-tools \ + linux-image-generic \ + make \ + bash-completion \ + && apt-get clean + +WORKDIR /build + +FROM guestfish as builder + +RUN wget -O image.img https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img + +RUN qemu-img resize image.img 5G \ + && eval "$(guestfish --listen --network)" \ + && guestfish --remote add-drive image.img \ + && guestfish --remote run \ + && guestfish --remote mount /dev/sda1 / \ + && guestfish --remote command "growpart /dev/sda 1 --verbose" \ + && guestfish --remote command "resize2fs /dev/sda1" \ +# docker repo + && guestfish --remote sh "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg" \ + && guestfish --remote sh 'echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list' \ +# kubernetes repo + && guestfish --remote sh "curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg" \ + && guestfish --remote sh "echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.29/deb/ /' | tee /etc/apt/sources.list.d/kubernetes.list" \ +# install containerd + && guestfish --remote command "apt-get update -y" \ + && guestfish --remote command "apt-get install -y containerd.io" \ +# configure containerd + && guestfish --remote command "mkdir -p /etc/containerd" \ + && guestfish --remote sh "containerd config default | tee /etc/containerd/config.toml" \ + && guestfish --remote command "sed -i '/SystemdCgroup/ s/=.*/= true/' /etc/containerd/config.toml" \ +# install kubernetes + && guestfish --remote command "apt-get install -y kubelet kubeadm" \ +# clean apt cache + && guestfish --remote sh 'apt-get clean && rm -rf /var/lib/apt/lists/*' \ +# write system configuration + && guestfish --remote sh 'printf "%s\n" net.bridge.bridge-nf-call-iptables=1 net.bridge.bridge-nf-call-ip6tables=1 net.ipv4.ip_forward=1 net.ipv6.conf.all.forwarding=1 net.ipv6.conf.all.disable_ipv6=0 net.ipv4.tcp_congestion_control=bbr vm.overcommit_memory=1 kernel.panic=10 kernel.panic_on_oops=1 fs.inotify.max_user_instances=8192 fs.inotify.max_user_watches=524288 | tee > /etc/sysctl.d/kubernetes.conf' \ + && guestfish --remote sh 'printf "%s\n" overlay br_netfilter | tee /etc/modules-load.d/kubernetes.conf' \ + && guestfish --remote sh "rm -f /etc/resolv.conf && ln -s ../run/systemd/resolve/stub-resolv.conf /etc/resolv.conf" \ +# umount all and exit + && guestfish --remote umount-all \ + && guestfish --remote exit + +FROM scratch +COPY --from=builder /build/image.img /disk/image.qcow2 diff --git a/packages/apps/kubernetes-proxmox/templates/NOTES.txt b/packages/apps/kubernetes-proxmox/templates/NOTES.txt new file mode 100644 index 00000000..23e07dab --- /dev/null +++ b/packages/apps/kubernetes-proxmox/templates/NOTES.txt @@ -0,0 +1,3 @@ +To get kubeconfig for this cluster run: + +kubectl get secret -n {{ .Release.Namespace }} {{ .Release.Name }}-admin-kubeconfig -o go-template='{{`{{ printf "%s\n" (index .data "super-admin.conf" | base64decode) }}`}}' diff --git a/packages/apps/kubernetes-proxmox/templates/_helpers.tpl b/packages/apps/kubernetes-proxmox/templates/_helpers.tpl new file mode 100644 index 00000000..36c06b64 --- /dev/null +++ b/packages/apps/kubernetes-proxmox/templates/_helpers.tpl @@ -0,0 +1,51 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "kubernetes.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "kubernetes.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "kubernetes.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "kubernetes.labels" -}} +helm.sh/chart: {{ include "kubernetes.chart" . }} +{{ include "kubernetes.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "kubernetes.selectorLabels" -}} +app.kubernetes.io/name: {{ include "kubernetes.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/packages/apps/kubernetes-proxmox/templates/cloud-config.yaml b/packages/apps/kubernetes-proxmox/templates/cloud-config.yaml new file mode 100644 index 00000000..9f16548a --- /dev/null +++ b/packages/apps/kubernetes-proxmox/templates/cloud-config.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-cloud-config +data: + cloud-config: | + loadBalancer: + creationPollInterval: 5 + creationPollTimeout: 60 + namespace: {{ .Release.Namespace }} diff --git a/packages/apps/kubernetes-proxmox/templates/cluster-autoscaler/deployment.yaml b/packages/apps/kubernetes-proxmox/templates/cluster-autoscaler/deployment.yaml new file mode 100644 index 00000000..ceccf990 --- /dev/null +++ b/packages/apps/kubernetes-proxmox/templates/cluster-autoscaler/deployment.yaml @@ -0,0 +1,86 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Release.Name }}-cluster-autoscaler + labels: + app: {{ .Release.Name }}-cluster-autoscaler +spec: + selector: + matchLabels: + app: {{ .Release.Name }}-cluster-autoscaler + replicas: 1 + template: + metadata: + labels: + app: {{ .Release.Name }}-cluster-autoscaler + spec: + containers: + - image: ghcr.io/kvaps/test:cluster-autoscaller + name: cluster-autoscaler + command: + - /cluster-autoscaler + args: + - --cloud-provider=clusterapi + - --kubeconfig=/etc/kubernetes/kubeconfig/super-admin.svc + - --clusterapi-cloud-config-authoritative + - --node-group-auto-discovery=clusterapi:namespace={{ .Release.Namespace }},clusterName={{ .Release.Name }} + volumeMounts: + - mountPath: /etc/kubernetes/kubeconfig + name: kubeconfig + readOnly: true + volumes: + - configMap: + name: {{ .Release.Name }}-cloud-config + name: cloud-config + - secret: + secretName: {{ .Release.Name }}-admin-kubeconfig + name: kubeconfig + serviceAccountName: {{ .Release.Name }}-cluster-autoscaler + terminationGracePeriodSeconds: 10 +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Release.Name }}-cluster-autoscaler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ .Release.Name }}-cluster-autoscaler +subjects: +- kind: ServiceAccount + name: {{ .Release.Name }}-cluster-autoscaler + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Release.Name }}-cluster-autoscaler +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Release.Name }}-cluster-autoscaler +rules: + - apiGroups: + - cluster.x-k8s.io + resources: + - machinedeployments + - machinedeployments/scale + - machines + - machinesets + - machinepools + verbs: + - get + - list + - update + - watch + - apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - proxmoxmachinetemplates + verbs: + - get + - list + - update + - watch diff --git a/packages/apps/kubernetes-proxmox/templates/cluster.yaml b/packages/apps/kubernetes-proxmox/templates/cluster.yaml new file mode 100644 index 00000000..186ed08f --- /dev/null +++ b/packages/apps/kubernetes-proxmox/templates/cluster.yaml @@ -0,0 +1,147 @@ +{{- $myNS := lookup "v1" "Namespace" "" .Release.Namespace }} +{{- $etcd := index $myNS.metadata.annotations "namespace.cozystack.io/etcd" }} +{{- $ingress := index $myNS.metadata.annotations "namespace.cozystack.io/ingress" }} +{{- $host := index $myNS.metadata.annotations "namespace.cozystack.io/host" }} +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} + labels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' +spec: + clusterNetwork: + pods: + cidrBlocks: + - 10.243.0.0/16 + controlPlaneRef: + namespace: {{ .Release.Namespace }} + apiVersion: controlplane.cluster.x-k8s.io/v1alpha1 + kind: KamajiControlPlane + name: {{ .Release.Name }} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxCluster + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: controlplane.cluster.x-k8s.io/v1alpha1 +kind: KamajiControlPlane +metadata: + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} + labels: + cluster.x-k8s.io/role: control-plane + annotations: + kamaji.clastix.io/kubeconfig-secret-key: "super-admin.svc" +spec: + dataStoreName: "{{ $etcd }}" + addons: + coreDNS: {} + konnectivity: {} + kubelet: + cgroupfs: systemd + preferredAddressTypes: + - InternalIP + - ExternalIP + network: + serviceType: ClusterIP + ingress: + extraAnnotations: + nginx.ingress.kubernetes.io/ssl-passthrough: "true" + hostname: {{ .Values.host | default (printf "%s.%s" .Release.Name $host) }}:443 + className: "{{ $ingress }}" + deployment: + replicas: 2 + version: 1.29.0 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ProxmoxCluster +metadata: + annotations: + cluster.x-k8s.io/managed-by: kamaji + cluster.x-k8s.io/cluster-name: {{ .Release.Name }} + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} + spec: + controlPlaneEndpoint: + host: {{ .Values.host | default (printf "%s.%s" .Release.Name $host) }} + port: 443 + ipv4Config: + addresses: ${NODE_IP_RANGES} + prefix: ${IP_PREFIX} + gateway: ${GATEWAY} + dnsServers: ${DNS_SERVERS} + allowedNodes: ${ALLOWED_NODES:=[]} + +{{- range $groupName, $group := .Values.nodeGroups }} +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: {{ $.Release.Name }}-{{ $groupName }} + namespace: {{ $.Release.Namespace }} +spec: + template: + spec: + users: + - name: root + sshAuthorizedKeys: [${VM_SSH_KEYS}] + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + provider-id: "proxmox://'{{ ds.meta_data.instance_id }}'" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ProxmoxMachineTemplate +metadata: + name: {{ $.Release.Name }}-{{ $groupName }} + namespace: {{ $.Release.Namespace }} +spec: + template: + spec: + sourceNode: "${PROXMOX_SOURCENODE}" + templateID: ${TEMPLATE_VMID} + format: "qcow2" + full: true + numSockets: ${NUM_SOCKETS:=2} + numCores: ${NUM_CORES:=4} + memoryMiB: ${MEMORY_MIB:=16384} + disks: + bootVolume: + disk: ${BOOT_VOLUME_DEVICE} + sizeGb: ${BOOT_VOLUME_SIZE:=100} + network: + default: + bridge: ${BRIDGE} + model: virtio +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: {{ $.Release.Name }}-{{ $groupName }} + namespace: {{ $.Release.Namespace }} + annotations: + cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size: "{{ $group.minReplicas }}" + cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size: "{{ $group.maxReplicas }}" + capacity.cluster-autoscaler.kubernetes.io/memory: "{{ $group.resources.memory }}" + capacity.cluster-autoscaler.kubernetes.io/cpu: "{{ $group.resources.cpu }}" +spec: + clusterName: {{ $.Release.Name }} + template: + spec: + clusterName: {{ $.Release.Name }} + version: v1.29.0 + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: {{ $.Release.Name }}-{{ $groupName }} + namespace: default + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate + name: {{ $.Release.Name }}-{{ $groupName }} + namespace: default +{{- end }} diff --git a/packages/apps/kubernetes-proxmox/templates/clusterctl.yaml b/packages/apps/kubernetes-proxmox/templates/clusterctl.yaml new file mode 100644 index 00000000..5642f8cf --- /dev/null +++ b/packages/apps/kubernetes-proxmox/templates/clusterctl.yaml @@ -0,0 +1,29 @@ +## -- Controller settings -- ## +PROXMOX_URL: "https://pve.example:8006" # The Proxmox VE host +PROXMOX_TOKEN: "root@pam!capi" # The Proxmox VE TokenID for authentication +PROXMOX_SECRET: "REDACTED" # The secret associated with the TokenID + + +## -- Required workload cluster default settings -- ## +PROXMOX_SOURCENODE: "pve" # The node that hosts the VM template to be used to provision VMs +TEMPLATE_VMID: "100" # The template VM ID used for cloning VMs +ALLOWED_NODES: "[pve1,pve2,pve3, ...]" # The Proxmox VE nodes used for VM deployments +VM_SSH_KEYS: "ssh-ed25519 ..., ssh-ed25519 ..." # The ssh authorized keys used to ssh to the machines. + +## -- networking configuration-- ## +CONTROL_PLANE_ENDPOINT_IP: "10.10.10.4" # The IP that kube-vip is going to use as a control plane endpoint +NODE_IP_RANGES: "[10.10.10.5-10.10.10.50, ...]" # The IP ranges for Cluster nodes +GATEWAY: "10.10.10.1" # The gateway for the machines network-config. +IP_PREFIX: "25" # Subnet Mask in CIDR notation for your node IP ranges +DNS_SERVERS: "[8.8.8.8,8.8.4.4]" # The dns nameservers for the machines network-config. +BRIDGE: "vmbr1" # The network bridge device for Proxmox VE VMs + +## -- xl nodes -- ## +BOOT_VOLUME_DEVICE: "scsi0" # The device used for the boot disk. +BOOT_VOLUME_SIZE: "100" # The size of the boot disk in GB. +NUM_SOCKETS: "2" # The number of sockets for the VMs. +NUM_CORES: "4" # The number of cores for the VMs. +MEMORY_MIB: "8048" # The memory size for the VMs. + +EXP_CLUSTER_RESOURCE_SET: "true" # This enables the ClusterResourceSet feature that we are using to deploy CNI +CLUSTER_TOPOLOGY: "true" # This enables experimental ClusterClass templating \ No newline at end of file diff --git a/packages/apps/kubernetes-proxmox/templates/csi/deploy.yaml b/packages/apps/kubernetes-proxmox/templates/csi/deploy.yaml new file mode 100644 index 00000000..6d8783cf --- /dev/null +++ b/packages/apps/kubernetes-proxmox/templates/csi/deploy.yaml @@ -0,0 +1,126 @@ +kind: Deployment +apiVersion: apps/v1 +metadata: + name: {{ .Release.Name }}-kcsi-controller + labels: + app: {{ .Release.Name }}-kcsi-driver +spec: + replicas: 1 + selector: + matchLabels: + app: {{ .Release.Name }}-kcsi-driver + template: + metadata: + labels: + app: {{ .Release.Name }}-kcsi-driver + spec: + serviceAccountName: {{ .Release.Name }}-kcsi + priorityClassName: system-cluster-critical + nodeSelector: + node-role.kubernetes.io/control-plane: "" + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + effect: "NoSchedule" + containers: + - name: csi-driver + imagePullPolicy: Always + image: ghcr.io/kvaps/test:kubevirt-csi-driver + args: + - "--endpoint=$(CSI_ENDPOINT)" + - "--infra-cluster-namespace=$(INFRACLUSTER_NAMESPACE)" + - "--infra-cluster-labels=$(INFRACLUSTER_LABELS)" + - "--v=5" + ports: + - name: healthz + containerPort: 10301 + protocol: TCP + env: + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: INFRACLUSTER_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: INFRACLUSTER_LABELS + value: "csi-driver/cluster=test" + - name: INFRA_STORAGE_CLASS_ENFORCEMENT + valueFrom: + configMapKeyRef: + name: driver-config + key: infraStorageClassEnforcement + optional: true + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: kubeconfig + mountPath: /etc/kubernetes/kubeconfig + readOnly: true + resources: + requests: + memory: 50Mi + cpu: 10m + - name: csi-provisioner + image: quay.io/openshift/origin-csi-external-provisioner:latest + args: + - "--csi-address=$(ADDRESS)" + - "--default-fstype=ext4" + - "--kubeconfig=/etc/kubernetes/kubeconfig/super-admin.svc" + - "--v=5" + - "--timeout=3m" + - "--retry-interval-max=1m" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: kubeconfig + mountPath: /etc/kubernetes/kubeconfig + readOnly: true + - name: csi-attacher + image: quay.io/openshift/origin-csi-external-attacher:latest + args: + - "--csi-address=$(ADDRESS)" + - "--kubeconfig=/etc/kubernetes/kubeconfig/super-admin.svc" + - "--v=5" + - "--timeout=3m" + - "--retry-interval-max=1m" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: kubeconfig + mountPath: /etc/kubernetes/kubeconfig + readOnly: true + resources: + requests: + memory: 50Mi + cpu: 10m + - name: csi-liveness-probe + image: quay.io/openshift/origin-csi-livenessprobe:latest + args: + - "--csi-address=/csi/csi.sock" + - "--probe-timeout=3s" + - "--health-port=10301" + volumeMounts: + - name: socket-dir + mountPath: /csi + resources: + requests: + memory: 50Mi + cpu: 10m + volumes: + - name: socket-dir + emptyDir: {} + - secret: + secretName: {{ .Release.Name }}-admin-kubeconfig + name: kubeconfig diff --git a/packages/apps/kubernetes-proxmox/templates/csi/infra-cluster-service-account.yaml b/packages/apps/kubernetes-proxmox/templates/csi/infra-cluster-service-account.yaml new file mode 100644 index 00000000..d70ea04a --- /dev/null +++ b/packages/apps/kubernetes-proxmox/templates/csi/infra-cluster-service-account.yaml @@ -0,0 +1,32 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Release.Name }}-kcsi +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ .Release.Name }}-kcsi +rules: +- apiGroups: ["cdi.kubevirt.io"] + resources: ["datavolumes"] + verbs: ["get", "create", "delete"] +- apiGroups: ["kubevirt.io"] + resources: ["virtualmachineinstances"] + verbs: ["list", "get"] +- apiGroups: ["subresources.kubevirt.io"] + resources: ["virtualmachineinstances/addvolume", "virtualmachineinstances/removevolume"] + verbs: ["update"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ .Release.Name }}-kcsi +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ .Release.Name }}-kcsi +subjects: +- kind: ServiceAccount + name: {{ .Release.Name }}-kcsi diff --git a/packages/apps/kubernetes-proxmox/templates/helmreleases/cilium.yaml b/packages/apps/kubernetes-proxmox/templates/helmreleases/cilium.yaml new file mode 100644 index 00000000..96c759fd --- /dev/null +++ b/packages/apps/kubernetes-proxmox/templates/helmreleases/cilium.yaml @@ -0,0 +1,46 @@ +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: {{ .Release.Name }}-cilium + labels: + cozystack.io/repository: system + coztstack.io/target-cluster-name: {{ .Release.Name }} +spec: + interval: 1m + releaseName: cilium + chart: + spec: + chart: cozy-cilium + reconcileStrategy: Revision + sourceRef: + kind: HelmRepository + name: cozystack-system + namespace: cozy-system + kubeConfig: + secretRef: + name: {{ .Release.Name }}-kubeconfig + targetNamespace: cozy-cilium + storageNamespace: cozy-cilium + install: + createNamespace: true + values: + cilium: + tunnel: disabled + autoDirectNodeRoutes: true + cgroup: + autoMount: + enabled: true + hostRoot: /run/cilium/cgroupv2 + k8sServiceHost: {{ .Release.Name }}.{{ .Release.Namespace }}.svc + k8sServicePort: 6443 + + cni: + chainingMode: ~ + customConf: false + configMap: "" + routingMode: native + enableIPv4Masquerade: true + ipv4NativeRoutingCIDR: "10.244.0.0/16" + dependsOn: + - name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} diff --git a/packages/apps/kubernetes-proxmox/templates/helmreleases/csi.yaml b/packages/apps/kubernetes-proxmox/templates/helmreleases/csi.yaml new file mode 100644 index 00000000..55169278 --- /dev/null +++ b/packages/apps/kubernetes-proxmox/templates/helmreleases/csi.yaml @@ -0,0 +1,28 @@ +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: {{ .Release.Name }}-csi + labels: + cozystack.io/repository: system + coztstack.io/target-cluster-name: {{ .Release.Name }} +spec: + interval: 1m + releaseName: csi + chart: + spec: + chart: cozy-kubevirt-csi-node + reconcileStrategy: Revision + sourceRef: + kind: HelmRepository + name: cozystack-system + namespace: cozy-system + kubeConfig: + secretRef: + name: {{ .Release.Name }}-kubeconfig + targetNamespace: cozy-csi + storageNamespace: cozy-csi + install: + createNamespace: true + dependsOn: + - name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} diff --git a/packages/apps/kubernetes-proxmox/templates/helmreleases/delete.yaml b/packages/apps/kubernetes-proxmox/templates/helmreleases/delete.yaml new file mode 100644 index 00000000..e143653a --- /dev/null +++ b/packages/apps/kubernetes-proxmox/templates/helmreleases/delete.yaml @@ -0,0 +1,73 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + annotations: + "helm.sh/hook": pre-delete + "helm.sh/hook-weight": "10" + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation,hook-failed + name: {{ .Release.Name }}-flux-teardown +spec: + template: + spec: + serviceAccountName: {{ .Release.Name }}-flux-teardown + restartPolicy: Never + containers: + - name: kubectl + image: docker.io/clastix/kubectl:v1.29.1 + command: + - kubectl + - --namespace={{ .Release.Namespace }} + - patch + - helmrelease + - {{ .Release.Name }}-cilium + - {{ .Release.Name }}-csi + - -p + - '{"spec": {"suspend": true}}' + - --type=merge +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Release.Name }}-flux-teardown + annotations: + helm.sh/hook: pre-delete + helm.sh/hook-delete-policy: before-hook-creation,hook-failed + helm.sh/hook-weight: "0" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + "helm.sh/hook": pre-install,post-install,pre-delete + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation,hook-failed + "helm.sh/hook-weight": "5" + name: {{ .Release.Name }}-flux-teardown +rules: + - apiGroups: + - "helm.toolkit.fluxcd.io" + resources: + - helmreleases + verbs: + - get + - patch + resourceNames: + - {{ .Release.Name }}-cilium + - {{ .Release.Name }}-csi +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + helm.sh/hook: pre-delete + helm.sh/hook-delete-policy: hook-succeeded,before-hook-creation,hook-failed + helm.sh/hook-weight: "5" + name: {{ .Release.Name }}-flux-teardown +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ .Release.Name }}-flux-teardown +subjects: + - kind: ServiceAccount + name: {{ .Release.Name }}-flux-teardown + namespace: {{ .Release.Namespace }} diff --git a/packages/apps/kubernetes-proxmox/templates/pccm/manager.yaml b/packages/apps/kubernetes-proxmox/templates/pccm/manager.yaml new file mode 100644 index 00000000..4e0984b5 --- /dev/null +++ b/packages/apps/kubernetes-proxmox/templates/pccm/manager.yaml @@ -0,0 +1,102 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Release.Name }}-kccm + labels: + helm.sh/chart: proxmox-cloud-controller-manager-0.2.0 + app.kubernetes.io/name: {{ .Release.Name }}-kccm + app.kubernetes.io/instance: {{ .Release.Name }}-kccm + app.kubernetes.io/version: "v0.4.0" + app.kubernetes.io/managed-by: Helm + namespace: {{ .Release.Namespace }} +spec: + replicas: 1 + strategy: + type: RollingUpdate + selector: + matchLabels: + app.kubernetes.io/name: {{ .Release.Name }}-kccm + app.kubernetes.io/instance: {{ .Release.Name }}-kccm + spec: + enableServiceLinks: false + priorityClassName: system-cluster-critical + serviceAccountName: {{ .Release.Name }}-pccm + securityContext: + fsGroup: 10258 + fsGroupChangePolicy: OnRootMismatch + runAsGroup: 10258 + runAsNonRoot: true + runAsUser: 10258 + hostAliases: + [] + initContainers: + [] + containers: + - name: proxmox-cloud-controller-manager + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + image: ghcr.io/sergelogvinov/proxmox-cloud-controller-manager + imagePullPolicy: IfNotPresent + args: + - --v=4 + - --cloud-provider=proxmox + - --cloud-config=/etc/cloud/cloud-config + - --controllers=cloud-node,cloud-node-lifecycle + - --leader-elect-resource-name=cloud-controller-manager-proxmox + - --use-service-account-credentials + - --secure-port=10258 + - --kubeconfig=/etc/kubernetes/kubeconfig/super-admin.svc + livenessProbe: + httpGet: + path: /healthz + port: 10258 + scheme: HTTPS + initialDelaySeconds: 20 + periodSeconds: 30 + timeoutSeconds: 5 + resources: + requests: + cpu: 10m + memory: 32Mi + volumeMounts: + - mountPath: /etc/kubernetes/kubeconfig + name: kubeconfig + readOnly: true + - mountPath: /etc/proxmox + name: cloud-config + readOnly: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + app.kubernetes.io/name: {{ .Release.Name }}-kccm + app.kubernetes.io/instance: {{ .Release.Name }}-kccm + volumes: + - name: cloud-config + secret: + secretName: {{ .Release.Name }}-cloud-config + defaultMode: 416 + - secret: + secretName: {{ .Release.Name }}-admin-kubeconfig + name: kubeconfig diff --git a/packages/apps/kubernetes-proxmox/templates/pccm/pccm_role.yaml b/packages/apps/kubernetes-proxmox/templates/pccm/pccm_role.yaml new file mode 100644 index 00000000..1124ae06 --- /dev/null +++ b/packages/apps/kubernetes-proxmox/templates/pccm/pccm_role.yaml @@ -0,0 +1,57 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ .Release.Namespace }}-{{ .Release.Name }}-pccm + labels: + helm.sh/chart: proxmox-cloud-controller-manager-0.2.0 + app.kubernetes.io/name: {{ .Release.Name }}-kccm + app.kubernetes.io/instance: {{ .Release.Name }}-kccm + app.kubernetes.io/version: "v0.4.0" + app.kubernetes.io/managed-by: Helm +rules: +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - create + - update +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - update + - patch + - delete +- apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get +- apiGroups: + - "" + resources: + - serviceaccounts/token + verbs: + - create diff --git a/packages/apps/kubernetes-proxmox/templates/pccm/pccm_role_binding.yaml b/packages/apps/kubernetes-proxmox/templates/pccm/pccm_role_binding.yaml new file mode 100644 index 00000000..a16e0129 --- /dev/null +++ b/packages/apps/kubernetes-proxmox/templates/pccm/pccm_role_binding.yaml @@ -0,0 +1,27 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Release.Namespace }}-{{ .Release.Name }}-pccm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Release.Namespace }}-{{ .Release.Name }}-pccm +subjects: +- kind: ServiceAccount + name: {{ .Release.Namespace }}-{{ .Release.Name }}-pccm + namespace: {{ .Release.Namespace }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ .Release.Namespace }}-{{ .Release.Name }}-pccm:extension-apiserver-authentication-reader + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: + - kind: ServiceAccount + name: {{ .Release.Namespace }}-{{ .Release.Name }}-pccm + namespace: {{ .Release.Namespace }} diff --git a/packages/apps/kubernetes-proxmox/templates/pccm/pccm_secrets.yaml b/packages/apps/kubernetes-proxmox/templates/pccm/pccm_secrets.yaml new file mode 100644 index 00000000..4014f9d1 --- /dev/null +++ b/packages/apps/kubernetes-proxmox/templates/pccm/pccm_secrets.yaml @@ -0,0 +1,11 @@ +{{- if ne (len .Values.config.clusters) 0 }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Release.Namespace }}-{{ .Release.Name }}-pccm + labels: + {{- include "proxmox-cloud-controller-manager.labels" . | nindent 4 }} + namespace: {{ .Release.Namespace }} +data: + config.yaml: {{ toYaml .Values.config | b64enc | quote }} +{{- end }} \ No newline at end of file diff --git a/packages/apps/kubernetes-proxmox/templates/pccm/service_account.yaml b/packages/apps/kubernetes-proxmox/templates/pccm/service_account.yaml new file mode 100644 index 00000000..fc25bc04 --- /dev/null +++ b/packages/apps/kubernetes-proxmox/templates/pccm/service_account.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Release.Name }}-pccm + labels: + helm.sh/chart: proxmox-cloud-controller-manager-0.2.0 + app.kubernetes.io/name: {{ .Release.Name }}-pccm + app.kubernetes.io/instance: {{ .Release.Name }}-pccm + app.kubernetes.io/version: "v0.4.0" + app.kubernetes.io/managed-by: Helm diff --git a/packages/apps/kubernetes-proxmox/values.yaml b/packages/apps/kubernetes-proxmox/values.yaml new file mode 100644 index 00000000..2a8fd46b --- /dev/null +++ b/packages/apps/kubernetes-proxmox/values.yaml @@ -0,0 +1,10 @@ +host: "" +controlPlane: + replicas: 2 +nodeGroups: + md0: + minReplicas: 0 + maxReplicas: 10 + resources: + cpu: 2 + memory: 1024Mi