mirror of
https://github.com/optim-enterprises-bv/homelab.git
synced 2025-11-01 02:18:01 +00:00
feat(talos): Bootstrap Cilium from ConfigMap
Start adding infrastructure manifests
This commit is contained in:
5
.gitignore
vendored
5
.gitignore
vendored
@@ -1,6 +1,6 @@
|
||||
.idea
|
||||
charts/example
|
||||
**/infra/*/charts
|
||||
**/infra/**/charts
|
||||
*secret*.yaml
|
||||
|
||||
.terraform
|
||||
@@ -21,3 +21,6 @@ override.tf.json
|
||||
|
||||
*.qcow2
|
||||
*.raw
|
||||
|
||||
*.cert
|
||||
*.key
|
||||
@@ -64,16 +64,16 @@ over at GitLab.
|
||||
|
||||
- [ ] Clean up DNS config
|
||||
- [ ] Renovate for automatic updates
|
||||
- [ ] Build a NAS for storage
|
||||
- [x] Build a NAS for storage
|
||||
- [ ] Template Gauss
|
||||
- [ ] Replace Pi Hole with AdGuard Home
|
||||
- [ ] Use iGPU on Euclid for video transcoding
|
||||
- [ ] Replace Traefik with Cilium Ingress Controller
|
||||
- [x] Use iGPU on Euclid for video transcoding
|
||||
- [x] Replace Traefik with Cilium Ingress Controller
|
||||
- [ ] Cilium mTLS & SPIFFE/SPIRE
|
||||
|
||||
## 👷 Future Projects
|
||||
|
||||
- [ ] Use Talos instead of Debian for Kubernetes
|
||||
- [x] Use Talos instead of Debian for Kubernetes
|
||||
- [ ] Keycloak for auth
|
||||
- [ ] Dynamic Resource Allocation for GPU
|
||||
- [ ] Local LLM
|
||||
|
||||
37
remodel/k8s/README.md
Normal file
37
remodel/k8s/README.md
Normal file
@@ -0,0 +1,37 @@
|
||||
# Manual bootstrap
|
||||
|
||||
## Cilium
|
||||
|
||||
```shell
|
||||
kubectl kustomize --enable-helm infra/network/cilium | kubectl apply -f -
|
||||
```
|
||||
|
||||
## Sealed-secrets
|
||||
|
||||
```shell
|
||||
kubectl kustomize --enable-helm infra/controllers/sealed-secrets | kubectl apply -f -
|
||||
```
|
||||
|
||||
## Proxmox CSI Plugin
|
||||
|
||||
```shell
|
||||
kubectl kustomize --enable-helm infra/storage/proxmox-csi | kubectl apply -f -
|
||||
```
|
||||
|
||||
```shell
|
||||
kubectl get csistoragecapacities -ocustom-columns=CLASS:.storageClassName,AVAIL:.capacity,ZONE:.nodeTopology.matchLabels -A
|
||||
```
|
||||
|
||||
## Argo CD
|
||||
|
||||
```shell
|
||||
kubectl kustomize --enable-helm infra/controllers/argocd | kubectl apply -f -
|
||||
```
|
||||
|
||||
```shell
|
||||
kubectl -n argocd get secret argocd-initial-admin-secret -ojson | jq -r ' .data.password | @base64d'
|
||||
```
|
||||
|
||||
```shell
|
||||
kubectl kustomize --enable-helm infra/storage | kubectl apply -f -
|
||||
```
|
||||
32
remodel/k8s/infra/application-set.yaml
Normal file
32
remodel/k8s/infra/application-set.yaml
Normal file
@@ -0,0 +1,32 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: infrastructure
|
||||
namespace: argocd
|
||||
labels:
|
||||
dev.stonegarden: infrastructure
|
||||
spec:
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/vehagn/homelab
|
||||
revision: HEAD
|
||||
directories:
|
||||
- path: remodel/infra/*
|
||||
template:
|
||||
metadata:
|
||||
name: '{{ path.basename }}'
|
||||
labels:
|
||||
dev.stonegarden: infrastructure
|
||||
spec:
|
||||
project: infrastructure
|
||||
source:
|
||||
repoURL: https://github.com/vehagn/homelab
|
||||
targetRevision: HEAD
|
||||
path: '{{ path }}'
|
||||
destination:
|
||||
name: in-cluster
|
||||
namespace: argocd
|
||||
syncPolicy:
|
||||
automated:
|
||||
selfHeal: true
|
||||
prune: true
|
||||
19
remodel/k8s/infra/argocd/http-route.yaml
Normal file
19
remodel/k8s/infra/argocd/http-route.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
apiVersion: gateway.networking.k8s.io/v1
|
||||
kind: HTTPRoute
|
||||
metadata:
|
||||
name: http-route-euclid
|
||||
namespace: argocd
|
||||
spec:
|
||||
parentRefs:
|
||||
- name: euclid
|
||||
namespace: gateway
|
||||
hostnames:
|
||||
- "argocd.euclid.stonegarden.dev"
|
||||
rules:
|
||||
- matches:
|
||||
- path:
|
||||
type: PathPrefix
|
||||
value: /
|
||||
backendRefs:
|
||||
- name: argocd-server
|
||||
port: 80
|
||||
14
remodel/k8s/infra/argocd/kustomization.yaml
Normal file
14
remodel/k8s/infra/argocd/kustomization.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- ns.yaml
|
||||
# - http-route.yaml
|
||||
|
||||
helmCharts:
|
||||
- name: argo-cd
|
||||
repo: https://argoproj.github.io/argo-helm
|
||||
version: 7.3.3
|
||||
releaseName: "argocd"
|
||||
namespace: argocd
|
||||
valuesFile: values.yaml
|
||||
4
remodel/k8s/infra/argocd/ns.yaml
Normal file
4
remodel/k8s/infra/argocd/ns.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: argocd
|
||||
49
remodel/k8s/infra/argocd/values.yaml
Normal file
49
remodel/k8s/infra/argocd/values.yaml
Normal file
@@ -0,0 +1,49 @@
|
||||
configs:
|
||||
cm:
|
||||
create: true
|
||||
application.resourceTrackingMethod: "annotation+label"
|
||||
cmp:
|
||||
create: true
|
||||
plugins:
|
||||
kustomize-build-with-helm:
|
||||
generate:
|
||||
command: [ "sh", "-c" ]
|
||||
args: [ "kustomize build --enable-helm" ]
|
||||
params:
|
||||
server.insecure: true
|
||||
|
||||
crds:
|
||||
install: true
|
||||
# -- Keep CRDs on chart uninstall
|
||||
keep: false
|
||||
|
||||
repoServer:
|
||||
extraContainers:
|
||||
- name: kustomize-build-with-helm
|
||||
command:
|
||||
- argocd-cmp-server
|
||||
image: '{{ default .Values.global.image.repository .Values.repoServer.image.repository }}:{{ default (include "argo-cd.defaultTag" .) .Values.repoServer.image.tag }}'
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 999
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
capabilities:
|
||||
drop: [ "ALL" ]
|
||||
volumeMounts:
|
||||
- name: plugins
|
||||
mountPath: /home/argocd/cmp-server/plugins
|
||||
- name: cmp-kustomize-build-with-helm
|
||||
mountPath: /home/argocd/cmp-server/config/plugin.yaml
|
||||
subPath: kustomize-build-with-helm.yaml
|
||||
- mountPath: /tmp
|
||||
name: cmp-tmp
|
||||
volumes:
|
||||
- name: cmp-kustomize-build-with-helm
|
||||
configMap:
|
||||
name: argocd-cmp-cm
|
||||
- name: cmp-tmp
|
||||
emptyDir: { }
|
||||
|
||||
34
remodel/k8s/infra/controllers/application-set.yaml
Normal file
34
remodel/k8s/infra/controllers/application-set.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: controllers
|
||||
namespace: argocd
|
||||
labels:
|
||||
dev.stonegarden: controllers
|
||||
spec:
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/vehagn/homelab
|
||||
revision: remodel
|
||||
directories:
|
||||
- path: remodel/k8s/infra/controllers/*
|
||||
template:
|
||||
metadata:
|
||||
name: '{{ path.basename }}'
|
||||
labels:
|
||||
dev.stonegarden: controllers
|
||||
spec:
|
||||
project: controllers
|
||||
source:
|
||||
plugin:
|
||||
name: kustomize-build-with-helm
|
||||
repoURL: https://github.com/vehagn/homelab
|
||||
targetRevision: remodel
|
||||
path: '{{ path }}'
|
||||
destination:
|
||||
name: in-cluster
|
||||
namespace: argocd
|
||||
syncPolicy:
|
||||
automated:
|
||||
selfHeal: true
|
||||
prune: true
|
||||
9
remodel/k8s/infra/controllers/kustomization.yaml
Normal file
9
remodel/k8s/infra/controllers/kustomization.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
commonLabels:
|
||||
dev.stonegarden: controllers
|
||||
app.kubernetes.io/managed-by: argocd
|
||||
|
||||
resources:
|
||||
- project.yaml
|
||||
- application-set.yaml
|
||||
24
remodel/k8s/infra/controllers/project.yaml
Normal file
24
remodel/k8s/infra/controllers/project.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: AppProject
|
||||
metadata:
|
||||
name: controllers
|
||||
namespace: argocd
|
||||
spec:
|
||||
sourceRepos:
|
||||
- 'https://github.com/vehagn/homelab'
|
||||
destinations:
|
||||
- namespace: 'argocd'
|
||||
server: '*'
|
||||
- namespace: 'cert-manager'
|
||||
server: '*'
|
||||
- namespace: 'node-feature-discovery'
|
||||
server: '*'
|
||||
- namespace: 'intel-device-plugins'
|
||||
server: '*'
|
||||
- namespace: 'kube-system'
|
||||
server: '*'
|
||||
- namespace: 'sealed-secrets'
|
||||
server: '*'
|
||||
clusterResourceWhitelist:
|
||||
- group: '*'
|
||||
kind: '*'
|
||||
@@ -0,0 +1,13 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
helmCharts:
|
||||
- name: sealed-secrets
|
||||
# repo: https://bitnami-labs.github.io/sealed-secrets
|
||||
# version: 2.16.0
|
||||
repo: oci://registry-1.docker.io/bitnamicharts/sealed-secrets
|
||||
version: 2.3.6
|
||||
releaseName: sealed-secrets-controller
|
||||
namespace: sealed-secrets
|
||||
includeCRDs: true
|
||||
valuesFile: values.yaml
|
||||
10
remodel/k8s/infra/controllers/sealed-secrets/values.yaml
Normal file
10
remodel/k8s/infra/controllers/sealed-secrets/values.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
namespace: sealed-secrets
|
||||
keyrenewperiod: 8766h # ~ 1 year
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 64Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 32Mi
|
||||
9
remodel/k8s/infra/kustomization.yaml
Normal file
9
remodel/k8s/infra/kustomization.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
commonLabels:
|
||||
dev.stonegarden: infrastructure
|
||||
app.kubernetes.io/managed-by: argocd
|
||||
|
||||
resources:
|
||||
- project.yaml
|
||||
- application-set.yaml
|
||||
34
remodel/k8s/infra/network/application-set.yaml
Normal file
34
remodel/k8s/infra/network/application-set.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: network
|
||||
namespace: argocd
|
||||
labels:
|
||||
dev.stonegarden: network
|
||||
spec:
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/vehagn/homelab
|
||||
revision: HEAD
|
||||
directories:
|
||||
- path: remodel/infra/network/*
|
||||
template:
|
||||
metadata:
|
||||
name: '{{ path.basename }}'
|
||||
labels:
|
||||
dev.stonegarden: network
|
||||
spec:
|
||||
project: network
|
||||
source:
|
||||
plugin:
|
||||
name: kustomize-build-with-helm
|
||||
repoURL: https://github.com/vehagn/homelab
|
||||
targetRevision: HEAD
|
||||
path: '{{ path }}'
|
||||
destination:
|
||||
name: in-cluster
|
||||
namespace: argocd
|
||||
syncPolicy:
|
||||
automated:
|
||||
selfHeal: true
|
||||
prune: true
|
||||
8
remodel/k8s/infra/network/cilium/announce.yaml
Normal file
8
remodel/k8s/infra/network/cilium/announce.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
apiVersion: cilium.io/v2alpha1
|
||||
kind: CiliumL2AnnouncementPolicy
|
||||
metadata:
|
||||
name: default-l2-announcement-policy
|
||||
namespace: kube-system
|
||||
spec:
|
||||
externalIPs: true
|
||||
loadBalancerIPs: true
|
||||
8
remodel/k8s/infra/network/cilium/ip-pool.yaml
Normal file
8
remodel/k8s/infra/network/cilium/ip-pool.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
apiVersion: cilium.io/v2alpha1
|
||||
kind: CiliumLoadBalancerIPPool
|
||||
metadata:
|
||||
name: first-pool
|
||||
spec:
|
||||
blocks:
|
||||
- start: 192.168.1.220
|
||||
stop: 192.168.1.229
|
||||
15
remodel/k8s/infra/network/cilium/kustomization.yaml
Normal file
15
remodel/k8s/infra/network/cilium/kustomization.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- announce.yaml
|
||||
- ip-pool.yaml
|
||||
|
||||
helmCharts:
|
||||
- name: cilium
|
||||
repo: https://helm.cilium.io
|
||||
version: 1.15.5
|
||||
releaseName: "cilium"
|
||||
includeCRDs: true
|
||||
namespace: kube-system
|
||||
valuesFile: values.yaml
|
||||
81
remodel/k8s/infra/network/cilium/values.yaml
Normal file
81
remodel/k8s/infra/network/cilium/values.yaml
Normal file
@@ -0,0 +1,81 @@
|
||||
cluster:
|
||||
name: talos
|
||||
id: 1
|
||||
|
||||
kubeProxyReplacement: true
|
||||
|
||||
# Talos specific
|
||||
k8sServiceHost: localhost
|
||||
k8sServicePort: 7445
|
||||
securityContext:
|
||||
capabilities:
|
||||
ciliumAgent: [ CHOWN,KILL,NET_ADMIN,NET_RAW,IPC_LOCK,SYS_ADMIN,SYS_RESOURCE,DAC_OVERRIDE,FOWNER,SETGID,SETUID ]
|
||||
cleanCiliumState: [ NET_ADMIN,SYS_ADMIN,SYS_RESOURCE ]
|
||||
|
||||
cgroup:
|
||||
autoMount:
|
||||
enabled: false
|
||||
hostRoot: /sys/fs/cgroup
|
||||
|
||||
# https://docs.cilium.io/en/stable/network/concepts/ipam/
|
||||
ipam:
|
||||
mode: kubernetes
|
||||
|
||||
operator:
|
||||
rollOutPods: true
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 256Mi
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 128Mi
|
||||
|
||||
# Roll out cilium agent pods automatically when ConfigMap is updated.
|
||||
rollOutCiliumPods: true
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1000m
|
||||
memory: 1Gi
|
||||
requests:
|
||||
cpu: 200m
|
||||
memory: 512Mi
|
||||
|
||||
#debug:
|
||||
# enabled: true
|
||||
|
||||
# Increase rate limit when doing L2 announcements
|
||||
k8sClientRateLimit:
|
||||
qps: 20
|
||||
burst: 100
|
||||
|
||||
l2announcements:
|
||||
enabled: true
|
||||
|
||||
externalIPs:
|
||||
enabled: true
|
||||
|
||||
enableCiliumEndpointSlice: true
|
||||
|
||||
gatewayAPI:
|
||||
enabled: true
|
||||
|
||||
hubble:
|
||||
enabled: false
|
||||
relay:
|
||||
enabled: false
|
||||
rollOutPods: true
|
||||
ui:
|
||||
enabled: false
|
||||
rollOutPods: true
|
||||
|
||||
ingressController:
|
||||
enabled: true
|
||||
default: true
|
||||
loadbalancerMode: shared
|
||||
service:
|
||||
annotations:
|
||||
io.cilium/lb-ipam-ips: 192.168.1.223
|
||||
# Random values so Argo CD doesn't complain about the service being out of sync
|
||||
insecureNodePort: 32434
|
||||
secureNodePort: 31247
|
||||
9
remodel/k8s/infra/network/kustomization.yaml
Normal file
9
remodel/k8s/infra/network/kustomization.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
commonLabels:
|
||||
dev.stonegarden: network
|
||||
app.kubernetes.io/managed-by: argocd
|
||||
|
||||
resources:
|
||||
- project.yaml
|
||||
- application-set.yaml
|
||||
24
remodel/k8s/infra/network/project.yaml
Normal file
24
remodel/k8s/infra/network/project.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: AppProject
|
||||
metadata:
|
||||
name: network
|
||||
namespace: argocd
|
||||
spec:
|
||||
sourceRepos:
|
||||
- 'https://gitlab.com/vehagn/mini-homelab.git'
|
||||
- 'https://prometheus-community.github.io/helm-charts'
|
||||
- 'https://argoproj.github.io/argo-helm'
|
||||
destinations:
|
||||
- namespace: 'adguard'
|
||||
server: '*'
|
||||
- namespace: 'argocd'
|
||||
server: '*'
|
||||
- namespace: 'cloudflared'
|
||||
server: '*'
|
||||
- namespace: 'gateway'
|
||||
server: '*'
|
||||
- namespace: 'kube-system'
|
||||
server: '*'
|
||||
clusterResourceWhitelist:
|
||||
- group: '*'
|
||||
kind: '*'
|
||||
14
remodel/k8s/infra/project.yaml
Normal file
14
remodel/k8s/infra/project.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: AppProject
|
||||
metadata:
|
||||
name: infrastructure
|
||||
namespace: argocd
|
||||
spec:
|
||||
sourceRepos:
|
||||
- 'https://github.com/vehagn/homelab'
|
||||
destinations:
|
||||
- namespace: 'argocd'
|
||||
server: '*'
|
||||
clusterResourceWhitelist:
|
||||
- group: '*'
|
||||
kind: '*'
|
||||
36
remodel/k8s/infra/storage/application-set.yaml
Normal file
36
remodel/k8s/infra/storage/application-set.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: storage
|
||||
namespace: argocd
|
||||
labels:
|
||||
dev.stonegarden: storage
|
||||
spec:
|
||||
generators:
|
||||
- git:
|
||||
repoURL: https://github.com/vehagn/homelab
|
||||
#revision: HEAD
|
||||
revision: remodel
|
||||
directories:
|
||||
- path: remodel/k8s/infra/storage/*
|
||||
template:
|
||||
metadata:
|
||||
name: '{{ path.basename }}'
|
||||
labels:
|
||||
dev.stonegarden: storage
|
||||
spec:
|
||||
project: storage
|
||||
source:
|
||||
plugin:
|
||||
name: kustomize-build-with-helm
|
||||
repoURL: https://github.com/vehagn/homelab
|
||||
#targetRevision: HEAD
|
||||
targetRevision: remodel
|
||||
path: '{{ path }}'
|
||||
destination:
|
||||
name: in-cluster
|
||||
namespace: argocd
|
||||
syncPolicy:
|
||||
automated:
|
||||
selfHeal: true
|
||||
prune: true
|
||||
9
remodel/k8s/infra/storage/kustomization.yaml
Normal file
9
remodel/k8s/infra/storage/kustomization.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
commonLabels:
|
||||
dev.stonegarden: storage
|
||||
app.kubernetes.io/managed-by: argocd
|
||||
|
||||
resources:
|
||||
- project.yaml
|
||||
- application-set.yaml
|
||||
16
remodel/k8s/infra/storage/project.yaml
Normal file
16
remodel/k8s/infra/storage/project.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: AppProject
|
||||
metadata:
|
||||
name: storage
|
||||
namespace: argocd
|
||||
spec:
|
||||
sourceRepos:
|
||||
- 'https://github.com/vehagn/homelab'
|
||||
destinations:
|
||||
- namespace: 'argocd'
|
||||
server: '*'
|
||||
- namespace: 'csi-proxmox'
|
||||
server: '*'
|
||||
clusterResourceWhitelist:
|
||||
- group: '*'
|
||||
kind: '*'
|
||||
20
remodel/k8s/infra/storage/proxmox-csi/kustomization.yaml
Normal file
20
remodel/k8s/infra/storage/proxmox-csi/kustomization.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
# - https://raw.githubusercontent.com/sergelogvinov/proxmox-csi-plugin/v0.7.0/docs/deploy/proxmox-csi-plugin-release.yml
|
||||
- storage-class.yaml
|
||||
|
||||
#images:
|
||||
# - name: ghcr.io/sergelogvinov/proxmox-csi-node
|
||||
# newTag: edge
|
||||
# - name: ghcr.io/sergelogvinov/proxmox-csi-controller
|
||||
# newTag: edge
|
||||
|
||||
helmCharts:
|
||||
- name: proxmox-csi-plugin
|
||||
repo: oci://ghcr.io/sergelogvinov/charts/proxmox-csi-plugin
|
||||
version: 0.7.0
|
||||
releaseName: proxmox-csi-plugin
|
||||
includeCRDs: true
|
||||
namespace: csi-proxmox
|
||||
15
remodel/k8s/infra/storage/proxmox-csi/storage-class.yaml
Normal file
15
remodel/k8s/infra/storage/proxmox-csi/storage-class.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: proxmox-csi
|
||||
allowVolumeExpansion: true
|
||||
parameters:
|
||||
csi.storage.k8s.io/fstype: ext4
|
||||
storage: local-zfs
|
||||
cache: writethrough
|
||||
ssd: "true"
|
||||
mountOptions:
|
||||
- noatime
|
||||
provisioner: csi.proxmox.sinextra.dev
|
||||
reclaimPolicy: Retain
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
@@ -1,16 +1,17 @@
|
||||
resource "null_resource" "haos_image" {
|
||||
triggers = {
|
||||
on_version_change = var.haos_version
|
||||
filename = var.local_file
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "curl -s -L ${var.haos_download_url}/${var.haos_version}/haos_ova-${var.haos_version}.qcow2.xz | xz -d > ${var.local_file}"
|
||||
}
|
||||
|
||||
# provisioner "local-exec" {
|
||||
# when = destroy
|
||||
# command = "rm ${local.haos.local_file}"
|
||||
# }
|
||||
provisioner "local-exec" {
|
||||
when = destroy
|
||||
command = "rm ${self.triggers.filename}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "proxmox_virtual_environment_file" "haos_generic_image" {
|
||||
|
||||
@@ -68,16 +68,19 @@ spec:
|
||||
fieldPath: status.podIP
|
||||
- name: KUBERNETES_SERVICE_PORT
|
||||
value: "6443"
|
||||
volumeMounts:
|
||||
- name: values
|
||||
mountPath: /root/app/values.yaml
|
||||
subPath: values.yaml
|
||||
command:
|
||||
- cilium
|
||||
- install
|
||||
- --version=v1.15.5 # renovate: github-releases=cilium/cilium
|
||||
- --helm-set=ipam.mode=kubernetes
|
||||
- --set
|
||||
- kubeProxyReplacement=true
|
||||
- --helm-set=securityContext.capabilities.ciliumAgent={CHOWN,KILL,NET_ADMIN,NET_RAW,IPC_LOCK,SYS_ADMIN,SYS_RESOURCE,DAC_OVERRIDE,FOWNER,SETGID,SETUID}
|
||||
- --helm-set=securityContext.capabilities.cleanCiliumState={NET_ADMIN,SYS_ADMIN,SYS_RESOURCE}
|
||||
- --helm-set=cgroup.autoMount.enabled=false
|
||||
- --helm-set=cgroup.hostRoot=/sys/fs/cgroup
|
||||
- --helm-set=k8sServiceHost=localhost
|
||||
- --helm-set=k8sServicePort=7445
|
||||
- --values
|
||||
- values.yaml
|
||||
volumes:
|
||||
- name: values
|
||||
configMap:
|
||||
name: cilium-values
|
||||
@@ -0,0 +1,56 @@
|
||||
resource "proxmox_virtual_environment_role" "csi" {
|
||||
role_id = "CSI"
|
||||
privileges = [
|
||||
"VM.Audit",
|
||||
"VM.Config.Disk",
|
||||
"Datastore.Allocate",
|
||||
"Datastore.AllocateSpace",
|
||||
"Datastore.Audit"
|
||||
]
|
||||
}
|
||||
|
||||
resource "proxmox_virtual_environment_user" "kubernetes-csi" {
|
||||
user_id = "kubernetes-csi@pve"
|
||||
comment = "User for Proxmox CSI Plugin"
|
||||
acl {
|
||||
path = "/"
|
||||
propagate = true
|
||||
role_id = proxmox_virtual_environment_role.csi.role_id
|
||||
}
|
||||
}
|
||||
|
||||
resource "proxmox_virtual_environment_user_token" "kubernetes-csi-token" {
|
||||
comment = "Token for Proxmox CSI Plugin"
|
||||
token_name = "csi"
|
||||
user_id = proxmox_virtual_environment_user.kubernetes-csi.user_id
|
||||
privileges_separation = false
|
||||
}
|
||||
|
||||
resource "kubernetes_namespace" "csi-proxmox" {
|
||||
metadata {
|
||||
name = "csi-proxmox"
|
||||
labels = {
|
||||
"pod-security.kubernetes.io/enforce" = "privileged"
|
||||
"pod-security.kubernetes.io/audit" = "baseline"
|
||||
"pod-security.kubernetes.io/warn" = "baseline"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_secret" "proxmox-csi-plugin" {
|
||||
metadata {
|
||||
name = "proxmox-csi-plugin"
|
||||
namespace = kubernetes_namespace.csi-proxmox.id
|
||||
}
|
||||
|
||||
data = {
|
||||
"config.yaml" = <<EOF
|
||||
clusters:
|
||||
- url: "${var.proxmox.endpoint}/api2/json"
|
||||
insecure: ${var.proxmox.insecure}
|
||||
token_id: "${proxmox_virtual_environment_user_token.kubernetes-csi-token.id}"
|
||||
token_secret: "${element(split("=", proxmox_virtual_environment_user_token.kubernetes-csi-token.value), length(split("=", proxmox_virtual_environment_user_token.kubernetes-csi-token.value)) - 1)}"
|
||||
region: ${var.proxmox.cluster_name}
|
||||
EOF
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,12 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
kubernetes = {
|
||||
source = "hashicorp/kubernetes"
|
||||
version = ">=2.31.0"
|
||||
}
|
||||
proxmox = {
|
||||
source = "bpg/proxmox"
|
||||
version = ">=0.60.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
variable "proxmox" {
|
||||
type = object({
|
||||
cluster_name = string
|
||||
endpoint = string
|
||||
insecure = bool
|
||||
})
|
||||
}
|
||||
23
remodel/tofu/kubernetes/bootstrap/sealed-secrets/config.tf
Normal file
23
remodel/tofu/kubernetes/bootstrap/sealed-secrets/config.tf
Normal file
@@ -0,0 +1,23 @@
|
||||
resource "kubernetes_namespace" "sealed-secrets" {
|
||||
metadata {
|
||||
name = "sealed-secrets"
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_secret" "sealed-secrets-key" {
|
||||
depends_on = [ kubernetes_namespace.sealed-secrets ]
|
||||
type = "kubernetes.io/tls"
|
||||
|
||||
metadata {
|
||||
name = "sealed-secrets-bootstrap-key"
|
||||
namespace = "sealed-secrets"
|
||||
labels = {
|
||||
"sealedsecrets.bitnami.com/sealed-secrets-key" = "active"
|
||||
}
|
||||
}
|
||||
|
||||
data = {
|
||||
"tls.crt" = var.sealed_secrets_cert.cert
|
||||
"tls.key" = var.sealed_secrets_cert.key
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,8 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
kubernetes = {
|
||||
source = "hashicorp/kubernetes"
|
||||
version = ">=2.31.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,6 @@
|
||||
variable "sealed_secrets_cert" {
|
||||
type = object({
|
||||
cert = string
|
||||
key = string
|
||||
})
|
||||
}
|
||||
58
remodel/tofu/kubernetes/main.tf
Normal file
58
remodel/tofu/kubernetes/main.tf
Normal file
@@ -0,0 +1,58 @@
|
||||
module "talos" {
|
||||
source = "./talos"
|
||||
|
||||
providers = {
|
||||
proxmox = proxmox
|
||||
}
|
||||
|
||||
talos_image = var.talos_image
|
||||
cluster_config = var.cluster_config
|
||||
cilium = {
|
||||
values = file("${path.module}/../../k8s/infra/network/cilium/values.yaml")
|
||||
install = file("${path.module}/bootstrap/cilium/install.yaml")
|
||||
}
|
||||
}
|
||||
|
||||
module "proxmox_csi_plugin" {
|
||||
source = "./bootstrap/proxmox-csi-plugin"
|
||||
|
||||
providers = {
|
||||
proxmox = proxmox
|
||||
kubernetes = kubernetes
|
||||
}
|
||||
|
||||
proxmox = var.proxmox
|
||||
}
|
||||
|
||||
module "sealed_secrets" {
|
||||
source = "./bootstrap/sealed-secrets"
|
||||
|
||||
providers = {
|
||||
kubernetes = kubernetes
|
||||
}
|
||||
|
||||
// openssl req -x509 -days 365 -nodes -newkey rsa:4096 -keyout tls.key -out tls.cert -subj "/CN=sealed-secret/O=sealed-secret"
|
||||
sealed_secrets_cert = {
|
||||
cert = file("${path.module}/tls.cert")
|
||||
key = file("${path.module}/tls.key")
|
||||
}
|
||||
}
|
||||
|
||||
resource "local_file" "machine_configs" {
|
||||
for_each = module.talos.talos_machine_config
|
||||
content = each.value.machine_configuration
|
||||
filename = "output/talos-machine-config-${each.key}.yaml"
|
||||
file_permission = "0600"
|
||||
}
|
||||
|
||||
resource "local_file" "talos_config" {
|
||||
content = module.talos.talos_client_configuration.talos_config
|
||||
filename = "output/talos-config.yaml"
|
||||
file_permission = "0600"
|
||||
}
|
||||
|
||||
resource "local_file" "kube_config" {
|
||||
content = module.talos.talos_kube_config.kubeconfig_raw
|
||||
filename = "output/kube-config.yaml"
|
||||
file_permission = "0600"
|
||||
}
|
||||
38
remodel/tofu/kubernetes/providers.tf
Normal file
38
remodel/tofu/kubernetes/providers.tf
Normal file
@@ -0,0 +1,38 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
kubernetes = {
|
||||
source = "hashicorp/kubernetes"
|
||||
version = "2.31.0"
|
||||
}
|
||||
proxmox = {
|
||||
source = "bpg/proxmox"
|
||||
version = "0.60.0"
|
||||
}
|
||||
talos = {
|
||||
source = "siderolabs/talos"
|
||||
version = "0.5.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "proxmox" {
|
||||
endpoint = var.proxmox.endpoint
|
||||
insecure = var.proxmox.insecure
|
||||
|
||||
api_token = var.proxmox.api_token
|
||||
ssh {
|
||||
agent = true
|
||||
username = var.proxmox.username
|
||||
}
|
||||
}
|
||||
|
||||
provider "kubernetes" {
|
||||
host = module.talos.talos_kube_config.kubernetes_client_configuration.host
|
||||
client_certificate = base64decode(module.talos.talos_kube_config.kubernetes_client_configuration.client_certificate)
|
||||
client_key = base64decode(module.talos.talos_kube_config.kubernetes_client_configuration.client_key)
|
||||
cluster_ca_certificate = base64decode(module.talos.talos_kube_config.kubernetes_client_configuration.ca_certificate)
|
||||
# ignore_labels = [
|
||||
# "app.kubernetes.io/.*",
|
||||
# "kustomize.toolkit.fluxcd.io/.*",
|
||||
# ]
|
||||
}
|
||||
@@ -1,9 +1,10 @@
|
||||
# Download the Talos image to each distinct Proxmox node
|
||||
resource "proxmox_virtual_environment_download_file" "talos_nocloud_image" {
|
||||
for_each = toset(var.host_machines)
|
||||
for_each = toset(distinct([for k, v in var.cluster_config.nodes : v.host_node]))
|
||||
|
||||
node_name = each.key
|
||||
content_type = "iso"
|
||||
datastore_id = var.proxmox_node.image_datastore
|
||||
datastore_id = var.talos_image.datastore
|
||||
|
||||
file_name = "talos-${var.talos_image.version}-nocloud-amd64.img"
|
||||
url = "${var.talos_image.base_url}/${var.talos_image.version}/nocloud-amd64.raw.gz"
|
||||
@@ -0,0 +1,29 @@
|
||||
machine:
|
||||
network:
|
||||
hostname: ${hostname}
|
||||
nodeLabels:
|
||||
topology.kubernetes.io/region: ${cluster_name}
|
||||
topology.kubernetes.io/zone: ${node_name}
|
||||
|
||||
cluster:
|
||||
allowSchedulingOnControlPlanes: true
|
||||
network:
|
||||
cni:
|
||||
name: none
|
||||
proxy:
|
||||
disabled: true
|
||||
inlineManifests:
|
||||
- name: cilium-values
|
||||
contents: |
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cilium-values
|
||||
namespace: kube-system
|
||||
data:
|
||||
values.yaml: |-
|
||||
${indent(10, cilium_values)}
|
||||
- name: cilium-bootstrap
|
||||
contents: |
|
||||
${indent(6, cilium_install)}
|
||||
@@ -0,0 +1,6 @@
|
||||
machine:
|
||||
network:
|
||||
hostname: ${hostname}
|
||||
nodeLabels:
|
||||
topology.kubernetes.io/region: ${cluster_name}
|
||||
topology.kubernetes.io/zone: ${node_name}
|
||||
13
remodel/tofu/kubernetes/talos/output.tf
Normal file
13
remodel/tofu/kubernetes/talos/output.tf
Normal file
@@ -0,0 +1,13 @@
|
||||
output "talos_machine_config" {
|
||||
value = data.talos_machine_configuration.machine_configuration
|
||||
}
|
||||
|
||||
output "talos_client_configuration" {
|
||||
value = data.talos_client_configuration.talos_config
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "talos_kube_config" {
|
||||
value = data.talos_cluster_kubeconfig.kubeconfig
|
||||
sensitive = true
|
||||
}
|
||||
12
remodel/tofu/kubernetes/talos/providers.tf
Normal file
12
remodel/tofu/kubernetes/talos/providers.tf
Normal file
@@ -0,0 +1,12 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
proxmox = {
|
||||
source = "bpg/proxmox"
|
||||
version = ">=0.60.0"
|
||||
}
|
||||
talos = {
|
||||
source = "siderolabs/talos"
|
||||
version = ">=0.5.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
65
remodel/tofu/kubernetes/talos/talos-config.tf
Normal file
65
remodel/tofu/kubernetes/talos/talos-config.tf
Normal file
@@ -0,0 +1,65 @@
|
||||
resource "talos_machine_secrets" "machine_secrets" {
|
||||
talos_version = var.cluster_config.talos_version
|
||||
}
|
||||
|
||||
data "talos_client_configuration" "talos_config" {
|
||||
cluster_name = var.cluster_config.cluster_name
|
||||
client_configuration = talos_machine_secrets.machine_secrets.client_configuration
|
||||
endpoints = [for k, v in var.cluster_config.nodes : v.ip if v.machine_type == "controlplane"]
|
||||
}
|
||||
|
||||
data "talos_machine_configuration" "machine_configuration" {
|
||||
for_each = var.cluster_config.nodes
|
||||
cluster_name = var.cluster_config.cluster_name
|
||||
cluster_endpoint = var.cluster_config.endpoint
|
||||
machine_type = each.value.machine_type
|
||||
machine_secrets = talos_machine_secrets.machine_secrets.machine_secrets
|
||||
talos_version = var.cluster_config.talos_version
|
||||
config_patches = each.value.machine_type == "controlplane" ? [
|
||||
templatefile("${path.module}/machine-config/control-plane.yaml.tftpl", {
|
||||
hostname = each.key
|
||||
cluster_name = var.cluster_config.proxmox_cluster
|
||||
node_name = each.value.host_node
|
||||
cilium_values = var.cilium.values
|
||||
cilium_install = var.cilium.install
|
||||
})
|
||||
] : [
|
||||
templatefile("${path.module}/machine-config/worker.yaml.tftpl", {
|
||||
hostname = each.key
|
||||
cluster_name = var.cluster_config.proxmox_cluster
|
||||
node_name = each.value.host_node
|
||||
})
|
||||
]
|
||||
}
|
||||
|
||||
resource "talos_machine_configuration_apply" "talos_config_apply" {
|
||||
depends_on = [proxmox_virtual_environment_vm.talos_vm]
|
||||
for_each = var.cluster_config.nodes
|
||||
node = each.value.ip
|
||||
client_configuration = talos_machine_secrets.machine_secrets.client_configuration
|
||||
machine_configuration_input = data.talos_machine_configuration.machine_configuration[each.key].machine_configuration
|
||||
}
|
||||
|
||||
resource "talos_machine_bootstrap" "talos_bootstrap" {
|
||||
depends_on = [talos_machine_configuration_apply.talos_config_apply]
|
||||
client_configuration = talos_machine_secrets.machine_secrets.client_configuration
|
||||
node = [for k, v in var.cluster_config.nodes : v.ip if v.machine_type == "controlplane"][0]
|
||||
}
|
||||
|
||||
data "talos_cluster_health" "health" {
|
||||
depends_on = [talos_machine_bootstrap.talos_bootstrap]
|
||||
client_configuration = data.talos_client_configuration.talos_config.client_configuration
|
||||
control_plane_nodes = [for k, v in var.cluster_config.nodes : v.ip if v.machine_type == "controlplane"]
|
||||
worker_nodes = [for k, v in var.cluster_config.nodes : v.ip if v.machine_type == "worker"]
|
||||
endpoints = data.talos_client_configuration.talos_config.endpoints
|
||||
timeouts = {
|
||||
read = "10m"
|
||||
}
|
||||
}
|
||||
|
||||
data "talos_cluster_kubeconfig" "kubeconfig" {
|
||||
#depends_on = [talos_machine_bootstrap.talos_bootstrap]
|
||||
depends_on = [talos_machine_bootstrap.talos_bootstrap, data.talos_cluster_health.health]
|
||||
client_configuration = talos_machine_secrets.machine_secrets.client_configuration
|
||||
node = [for k, v in var.cluster_config.nodes : v.ip if v.machine_type == "controlplane"][0]
|
||||
}
|
||||
36
remodel/tofu/kubernetes/talos/variables.tf
Normal file
36
remodel/tofu/kubernetes/talos/variables.tf
Normal file
@@ -0,0 +1,36 @@
|
||||
variable "talos_image" {
|
||||
type = object({
|
||||
base_url = string
|
||||
version = string
|
||||
datastore = string
|
||||
})
|
||||
}
|
||||
|
||||
variable "cluster_config" {
|
||||
description = "Talos node configuration"
|
||||
type = object({
|
||||
|
||||
cluster_name = string
|
||||
proxmox_cluster = string
|
||||
endpoint = string
|
||||
talos_version = string
|
||||
|
||||
nodes = map(object({
|
||||
host_node = string
|
||||
machine_type = string
|
||||
ip = string
|
||||
mac_address = string
|
||||
vm_id = number
|
||||
cpu = number
|
||||
ram_dedicated = number
|
||||
igpu = optional(bool, false)
|
||||
}))
|
||||
})
|
||||
}
|
||||
|
||||
variable "cilium" {
|
||||
type = object({
|
||||
values = string
|
||||
install = string
|
||||
})
|
||||
}
|
||||
@@ -1,11 +1,11 @@
|
||||
resource "proxmox_virtual_environment_vm" "controlplane" {
|
||||
for_each = var.node_data.controlplanes
|
||||
resource "proxmox_virtual_environment_vm" "talos_vm" {
|
||||
for_each = var.cluster_config.nodes
|
||||
|
||||
node_name = each.value.host_node
|
||||
|
||||
name = each.key
|
||||
description = "Talos Kubernetes Control Plane"
|
||||
tags = ["k8s", "control-plane"]
|
||||
description = each.value.machine_type == "controlplane" ? "Talos Control Plane" : "Talos Worker"
|
||||
tags = each.value.machine_type == "controlplane" ? ["k8s", "control-plane"] : ["k8s", "worker"]
|
||||
on_boot = true
|
||||
vm_id = each.value.vm_id
|
||||
|
||||
@@ -51,7 +51,6 @@ resource "proxmox_virtual_environment_vm" "controlplane" {
|
||||
|
||||
initialization {
|
||||
datastore_id = "local-zfs"
|
||||
# meta_data_file_id = proxmox_virtual_environment_file.controlplane-config[each.key].id
|
||||
ip_config {
|
||||
ipv4 {
|
||||
address = "${each.value.ip}/24"
|
||||
41
remodel/tofu/kubernetes/variables.tf
Normal file
41
remodel/tofu/kubernetes/variables.tf
Normal file
@@ -0,0 +1,41 @@
|
||||
variable "proxmox" {
|
||||
type = object({
|
||||
name = string
|
||||
cluster_name = string
|
||||
endpoint = string
|
||||
insecure = bool
|
||||
username = string
|
||||
api_token = string
|
||||
})
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "talos_image" {
|
||||
type = object({
|
||||
base_url = string
|
||||
version = string
|
||||
datastore = string
|
||||
})
|
||||
}
|
||||
|
||||
variable "cluster_config" {
|
||||
description = "Talos node configuration"
|
||||
type = object({
|
||||
|
||||
cluster_name = string
|
||||
proxmox_cluster = string
|
||||
endpoint = string
|
||||
talos_version = string
|
||||
|
||||
nodes = map(object({
|
||||
host_node = string
|
||||
machine_type = string
|
||||
ip = string
|
||||
mac_address = string
|
||||
vm_id = number
|
||||
cpu = number
|
||||
ram_dedicated = number
|
||||
igpu = optional(bool, false)
|
||||
}))
|
||||
})
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
machine:
|
||||
network:
|
||||
hostname: ${hostname}
|
||||
cluster:
|
||||
allowSchedulingOnControlPlanes: true
|
||||
network:
|
||||
cni:
|
||||
name: none
|
||||
proxy:
|
||||
disabled: true
|
||||
${inlineManifests}
|
||||
@@ -1,7 +0,0 @@
|
||||
machine:
|
||||
customization:
|
||||
systemExtensions:
|
||||
officialExtensions:
|
||||
- siderolabs/i915-ucode
|
||||
- siderolabs/intel-ucode
|
||||
- siderolabs/qemu-guest-agent
|
||||
@@ -1,3 +0,0 @@
|
||||
machine:
|
||||
network:
|
||||
hostname: ${hostname}
|
||||
@@ -1,45 +0,0 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
proxmox = {
|
||||
source = "bpg/proxmox"
|
||||
version = "0.60.0"
|
||||
}
|
||||
talos = {
|
||||
source = "siderolabs/talos"
|
||||
version = "0.5.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "proxmox" {
|
||||
endpoint = var.proxmox_node.endpoint
|
||||
insecure = var.proxmox_node.insecure
|
||||
|
||||
api_token = var.proxmox_node.api_token
|
||||
ssh {
|
||||
agent = true
|
||||
username = var.proxmox_node.username
|
||||
}
|
||||
}
|
||||
|
||||
output "talosconfig" {
|
||||
value = data.talos_client_configuration.talosconfig.talos_config
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "kubeconfig" {
|
||||
value = data.talos_cluster_kubeconfig.kubeconfig
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
resource "local_file" "talos-config" {
|
||||
content = data.talos_client_configuration.talosconfig.talos_config
|
||||
filename = "output/talos-config.yaml"
|
||||
file_permission = "0600"
|
||||
}
|
||||
|
||||
resource "local_file" "kube-config" {
|
||||
content = data.talos_cluster_kubeconfig.kubeconfig.kubeconfig_raw
|
||||
filename = "output/kube-config.yaml"
|
||||
file_permission = "0600"
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
resource "proxmox_virtual_environment_role" "csi" {
|
||||
role_id = "csi"
|
||||
privileges = [
|
||||
"VM.Audit",
|
||||
"VM.Config.Disk",
|
||||
"Datastore.Allocate",
|
||||
"Datastore.AllocateSpace",
|
||||
"Datastore.Audit"
|
||||
]
|
||||
}
|
||||
|
||||
resource "proxmox_virtual_environment_user" "kubernetes-csi" {
|
||||
user_id = "kubernetes-csi@pve"
|
||||
acl {
|
||||
path = "/"
|
||||
propagate = true
|
||||
role_id = proxmox_virtual_environment_role.csi.role_id
|
||||
}
|
||||
}
|
||||
@@ -1,86 +0,0 @@
|
||||
resource "talos_machine_secrets" "machine_secrets" {
|
||||
talos_version = var.cluster.talos_version
|
||||
}
|
||||
|
||||
data "talos_client_configuration" "talosconfig" {
|
||||
cluster_name = var.cluster.name
|
||||
client_configuration = talos_machine_secrets.machine_secrets.client_configuration
|
||||
endpoints = [for k, v in var.node_data.controlplanes : v.ip]
|
||||
}
|
||||
|
||||
data "talos_machine_configuration" "control-plane" {
|
||||
for_each = var.node_data.controlplanes
|
||||
cluster_name = var.cluster.name
|
||||
cluster_endpoint = var.cluster.endpoint
|
||||
machine_type = "controlplane"
|
||||
machine_secrets = talos_machine_secrets.machine_secrets.machine_secrets
|
||||
talos_version = var.cluster.talos_version
|
||||
config_patches = [
|
||||
templatefile("${path.module}/machine-config/control-plane.yaml.tftpl", {
|
||||
hostname = each.key
|
||||
inlineManifests = indent(2,
|
||||
yamlencode(
|
||||
{
|
||||
inlineManifests : [
|
||||
{
|
||||
name : "cilium-bootstrap",
|
||||
contents : file("${path.module}/bootstrap/cilium-install.yaml")
|
||||
}
|
||||
]
|
||||
}))
|
||||
})
|
||||
]
|
||||
}
|
||||
|
||||
resource "talos_machine_configuration_apply" "ctrl_config_apply" {
|
||||
depends_on = [proxmox_virtual_environment_vm.controlplane]
|
||||
for_each = var.node_data.controlplanes
|
||||
node = each.value.ip
|
||||
client_configuration = talos_machine_secrets.machine_secrets.client_configuration
|
||||
machine_configuration_input = data.talos_machine_configuration.control-plane[each.key].machine_configuration
|
||||
}
|
||||
|
||||
data "talos_machine_configuration" "worker" {
|
||||
for_each = var.node_data.workers
|
||||
cluster_name = var.cluster.name
|
||||
cluster_endpoint = var.cluster.endpoint
|
||||
machine_type = "worker"
|
||||
machine_secrets = talos_machine_secrets.machine_secrets.machine_secrets
|
||||
talos_version = var.cluster.talos_version
|
||||
config_patches = [
|
||||
templatefile("${path.module}/machine-config/worker.yaml.tftpl", {
|
||||
hostname = each.key
|
||||
})
|
||||
]
|
||||
}
|
||||
|
||||
resource "talos_machine_configuration_apply" "worker_config_apply" {
|
||||
depends_on = [proxmox_virtual_environment_vm.workers]
|
||||
for_each = var.node_data.workers
|
||||
node = each.value.ip
|
||||
client_configuration = talos_machine_secrets.machine_secrets.client_configuration
|
||||
machine_configuration_input = data.talos_machine_configuration.worker[each.key].machine_configuration
|
||||
}
|
||||
|
||||
resource "talos_machine_bootstrap" "bootstrap" {
|
||||
depends_on = [talos_machine_configuration_apply.ctrl_config_apply]
|
||||
client_configuration = talos_machine_secrets.machine_secrets.client_configuration
|
||||
node = [for k, v in var.node_data.controlplanes : v.ip][0]
|
||||
}
|
||||
|
||||
data "talos_cluster_health" "health" {
|
||||
depends_on = [talos_machine_configuration_apply.ctrl_config_apply]
|
||||
client_configuration = data.talos_client_configuration.talosconfig.client_configuration
|
||||
control_plane_nodes = [for k, v in var.node_data.controlplanes : v.ip]
|
||||
worker_nodes = [for k, v in var.node_data.workers : v.ip]
|
||||
endpoints = data.talos_client_configuration.talosconfig.endpoints
|
||||
timeouts = {
|
||||
read = "10m"
|
||||
}
|
||||
}
|
||||
|
||||
data "talos_cluster_kubeconfig" "kubeconfig" {
|
||||
depends_on = [talos_machine_bootstrap.bootstrap, data.talos_cluster_health.health]
|
||||
client_configuration = talos_machine_secrets.machine_secrets.client_configuration
|
||||
node = [for k, v in var.node_data.controlplanes : v.ip][0]
|
||||
}
|
||||
@@ -1,53 +0,0 @@
|
||||
variable "proxmox_node" {
|
||||
type = object({
|
||||
name = string
|
||||
endpoint = string
|
||||
insecure = bool
|
||||
username = string
|
||||
api_token = string
|
||||
image_datastore = string
|
||||
})
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "talos_image" {
|
||||
type = object({
|
||||
base_url = string
|
||||
version = string
|
||||
})
|
||||
}
|
||||
|
||||
variable "host_machines" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "cluster" {
|
||||
type = object({
|
||||
name = string
|
||||
endpoint = string
|
||||
talos_version = string
|
||||
})
|
||||
}
|
||||
|
||||
variable "node_data" {
|
||||
description = "A map of node data"
|
||||
type = object({
|
||||
controlplanes = map(object({
|
||||
ip = string
|
||||
mac_address = string
|
||||
host_node = string
|
||||
vm_id = number
|
||||
cpu = number
|
||||
ram_dedicated = number
|
||||
igpu = optional(bool, false)
|
||||
}))
|
||||
workers = map(object({
|
||||
ip = string
|
||||
mac_address = string
|
||||
host_node = string
|
||||
vm_id = number
|
||||
cpu = number
|
||||
ram_dedicated = number
|
||||
}))
|
||||
})
|
||||
}
|
||||
@@ -1,75 +0,0 @@
|
||||
resource "proxmox_virtual_environment_vm" "workers" {
|
||||
for_each = var.node_data.workers
|
||||
|
||||
node_name = each.value.host_node
|
||||
|
||||
name = each.key
|
||||
description = "Talos Kubernetes Worker"
|
||||
tags = ["k8s", "worker"]
|
||||
on_boot = true
|
||||
vm_id = each.value.vm_id
|
||||
|
||||
machine = "q35"
|
||||
scsi_hardware = "virtio-scsi-single"
|
||||
bios = "seabios"
|
||||
|
||||
agent {
|
||||
enabled = true
|
||||
}
|
||||
|
||||
cpu {
|
||||
cores = each.value.cpu
|
||||
type = "host"
|
||||
}
|
||||
|
||||
memory {
|
||||
dedicated = each.value.ram_dedicated
|
||||
}
|
||||
|
||||
network_device {
|
||||
bridge = "vmbr0"
|
||||
mac_address = each.value.mac_address
|
||||
}
|
||||
|
||||
disk {
|
||||
datastore_id = "local-zfs"
|
||||
interface = "scsi0"
|
||||
iothread = true
|
||||
cache = "writethrough"
|
||||
discard = "on"
|
||||
ssd = true
|
||||
file_id = proxmox_virtual_environment_download_file.talos_nocloud_image[each.value.host_node].id
|
||||
file_format = "raw"
|
||||
size = 20
|
||||
}
|
||||
|
||||
boot_order = ["scsi0"]
|
||||
|
||||
operating_system {
|
||||
type = "l26" # Linux Kernel 2.6 - 6.X.
|
||||
}
|
||||
|
||||
initialization {
|
||||
datastore_id = "local-zfs"
|
||||
# meta_data_file_id = proxmox_virtual_environment_file.worker-config[each.key].id
|
||||
ip_config {
|
||||
ipv4 {
|
||||
address = "${each.value.ip}/24"
|
||||
gateway = "192.168.1.1"
|
||||
}
|
||||
ipv6 {
|
||||
address = "dhcp"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# hostpci {
|
||||
# # Passthrough iGPU
|
||||
# device = "hostpci0"
|
||||
# #id = "0000:00:02"
|
||||
# mapping = "iGPU"
|
||||
# pcie = true
|
||||
# rombar = true
|
||||
# xvga = false
|
||||
# }
|
||||
}
|
||||
Reference in New Issue
Block a user