add devcontainer configuration and update Proxmox setup with helm dependencies

Signed-off-by: Fabian Schurig <fabian.a.schurig@gmail.com>
This commit is contained in:
Fabian Schurig
2025-03-21 13:40:45 +00:00
committed by Serge
parent 91cf55d8e0
commit 900f03f966
8 changed files with 116 additions and 12 deletions

10
.devcontainer/Dockerfile Normal file
View File

@@ -0,0 +1,10 @@
FROM mcr.microsoft.com/devcontainers/base:ubuntu-24.04
# Install age-keygen
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
&& apt-get -y install --no-install-recommends age \
&& apt-get autoremove -y && apt-get clean -y && rm -rf /var/lib/apt/lists/*
# Install yq
RUN wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -O /usr/bin/yq && \
chmod +x /usr/bin/yq

View File

@@ -0,0 +1,57 @@
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
// README at: https://github.com/devcontainers/templates/tree/main/src/python
{
"name": "Ubuntu 24.04",
// Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
"build": {
"dockerfile": "Dockerfile",
"context": ".."
},
"runArgs": ["--network=host"],
"features": {
"ghcr.io/fabianschurig/devcontainer-features/oh-my-posh:latest": {
"theme": "https://raw.githubusercontent.com/JanDeDobbeleer/oh-my-posh/refs/heads/main/themes/powerlevel10k_rainbow.omp.json"
},
"ghcr.io/devcontainers/features/kubectl-helm-minikube:1": {
"minikube": "none"
},
"ghcr.io/devcontainers/features/terraform:1": {},
"ghcr.io/devcontainers-extra/features/ansible:2": {},
"ghcr.io/hspaans/devcontainer-features/ansible-lint:1": {},
"ghcr.io/goldsam/dev-container-features/flux2:1": {},
"ghcr.io/devcontainers-extra/features/talosctl:1": {},
"ghcr.io/devcontainers-extra/features/sops:1": {}
},
// Use 'forwardPorts' to make a list of ports inside the container available locally.
// "forwardPorts": [],
// Use 'postCreateCommand' to run commands after the container is created.
"postCreateCommand": "",
// Configure tool-specific properties.
"customizations": {
"vscode": {
"settings": {
"terminal.integrated.defaultProfile.linux": "zsh",
"terminal.integrated.profiles.linux": {
"zsh": {
"path": "/usr/bin/zsh"
}
}
},
"extensions": [
"github.copilot",
"github.copilot-chat",
"wholroyd.jinja",
"eamodio.gitlens",
"github.vscode-github-actions"
]
}
},
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
// "remoteUser": "root"
"containerEnv": {
}
}

View File

@@ -63,6 +63,12 @@ kubeconfig: ## Download kubeconfig
nodes: ## Show kubernetes nodes
@kubectl get nodes -owide --sort-by '{.metadata.name}' --label-columns topology.kubernetes.io/region,topology.kubernetes.io/zone,node.kubernetes.io/instance-type
helm-install-deps:
helm repo add cilium https://helm.cilium.io/
helm repo add metrics-server https://kubernetes-sigs.github.io/metrics-server/
helm plugin list | grep -q diff || helm plugin install https://github.com/databus23/helm-diff
helm plugin list | grep -q secrets || helm plugin install https://github.com/jkroepke/helm-secrets
system:
helm --kubeconfig=kubeconfig upgrade -i --namespace=kube-system --version=1.16.4 -f deployments/cilium.yaml \
cilium cilium/cilium

View File

@@ -42,10 +42,20 @@ Follow this link [README](images/README.md) to make it.
Create Proxmox role and accounts.
This credentials will use by Proxmox CCM and CSI.
```shell
# On the proxmox host node
pveum user add terraform@pve
pveum role add Terraform -privs "Datastore.Allocate Datastore.AllocateSpace Datastore.AllocateTemplate Datastore.Audit Permissions.Modify Pool.Allocate Realm.AllocateUser SDN.Use Sys.Audit Sys.Console Sys.Modify User.Modify VM.Allocate VM.Audit VM.Clone VM.Config.CDROM VM.Config.CPU VM.Config.Cloudinit VM.Config.Disk VM.Config.HWType VM.Config.Memory VM.Config.Network VM.Config.Options VM.Migrate VM.Monitor VM.PowerMgmt"
pveum aclmod / -user terraform@pve -role Terraform
pveum user token add terraform@pve provider --privsep=0
```
Use the shown api key when you are asked to on `terraform apply` and later in the `.env.yaml`.
```shell
cd init
terraform init -upgrade
terraform apply
terraform apply --var 'proxmox_host=192.168.178.XX' --var 'proxmox_token_id=terraform@pve!provider'
cd ..
```
## Bootstrap cluster
@@ -94,6 +104,7 @@ machine:
First we need to define our cluster:
```hcl
# ~/terraform-talos/proxmox/terraform.tfvars
# Proxmox API host
proxmox_host = "node1.example.com"
@@ -167,6 +178,22 @@ make create-age
export SOPS_AGE_KEY_FILE=age.key.txt
```
Adjust the `.sops.yaml` file to use your newly generated public key from `age.key.txt`.
```shell
# created: 2025-03-19T15:50:15+01:00
# public key: age1ngvggfld4elq68926uczkes9rcqfjhnqn0tr6l8avyp4h46qzucqvx3sdf
AGE-SECRET-KEY-<your-secret>
```
Create the `.env.yaml` and encrypt it with sops.
```shell
cat > .env.yaml <<EOF
PROXMOX_VE_API_TOKEN: "terraform@pve!provider=xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
EOF
sops --encrypt -i .env.yaml
```
Create all configs
```shell
@@ -189,6 +216,11 @@ Receive `kubeconfig` file
make kubeconfig
```
Install all needed dependencies and plugins for helm
```shell
make helm-install-deps
```
```shell
make system system-base
```

View File

@@ -3,9 +3,9 @@ provider "proxmox" {
endpoint = "https://${var.proxmox_host}:8006/"
insecure = true
# api_token = data.sops_file.envs.data["PROXMOX_VE_API_TOKEN"]
username = "root@pam"
password = data.sops_file.envs.data["PROXMOX_VE_PASSWORD"]
api_token = data.sops_file.envs.data["PROXMOX_VE_API_TOKEN"]
# username = "root@pam"
# password = data.sops_file.envs.data["PROXMOX_VE_PASSWORD"]
ssh {
username = "root"

View File

@@ -3,6 +3,5 @@ provider "proxmox" {
endpoint = "https://${var.proxmox_host}:8006/"
insecure = true
username = var.proxmox_token_id
password = var.proxmox_token_secret
api_token = "${var.proxmox_token_id}=${var.proxmox_token_secret}"
}

View File

@@ -62,7 +62,7 @@ resource "proxmox_virtual_environment_file" "worker_machineconfig" {
resource "proxmox_virtual_environment_file" "worker_metadata" {
for_each = local.workers
node_name = each.value.node_name
node_name = each.value.zone
content_type = "snippets"
datastore_id = "local"
@@ -82,7 +82,7 @@ resource "proxmox_virtual_environment_file" "worker_metadata" {
resource "proxmox_virtual_environment_vm" "worker" {
for_each = local.workers
name = each.value.name
node_name = each.value.node_name
node_name = each.value.zone
vm_id = each.value.id
description = "Talos worker node"
@@ -202,7 +202,7 @@ resource "proxmox_virtual_environment_vm" "worker" {
resource "proxmox_virtual_environment_firewall_options" "worker" {
for_each = lookup(var.security_groups, "worker", "") == "" ? {} : local.workers
node_name = each.value.node_name
node_name = each.value.zone
vm_id = each.value.id
enabled = true
@@ -221,7 +221,7 @@ resource "proxmox_virtual_environment_firewall_options" "worker" {
resource "proxmox_virtual_environment_firewall_rules" "worker" {
for_each = lookup(var.security_groups, "worker", "") == "" ? {} : local.workers
node_name = each.value.node_name
node_name = each.value.zone
vm_id = each.value.id
rule {

View File

@@ -26,7 +26,7 @@ variable "vpc_main_cidr" {
variable "release" {
type = string
description = "The version of the Talos image"
default = "1.8.4"
default = "1.9.5"
}
data "sops_file" "tfvars" {
@@ -86,7 +86,7 @@ variable "instances" {
type = map(any)
default = {
"all" = {
version = "v1.31.4"
version = "v1.32.3"
},
"hvm-1" = {
enabled = false,