proxmox and cloud-init

This commit is contained in:
Serge Logvinov
2023-04-03 17:57:27 +03:00
parent d1713f1f64
commit 6510482557
18 changed files with 380 additions and 119 deletions

View File

@@ -3,3 +3,9 @@
* https://console.cloud.google.com/apis/library/cloudresourcemanager.googleapis.com
* https://console.cloud.google.com/apis/library/compute.googleapis.com
* https://console.cloud.google.com/apis/library/servicenetworking.googleapis.com
```shell
gcloud auth login
gcloud auth application-default login
```

View File

@@ -18,7 +18,7 @@ create-lb: ## Create load balancer
terraform refresh
create-config: ## Genereate talos configs
talosctl gen config --output-dir _cfgs --with-docs=false --with-examples=false talos-k8s-hetzner https://${ENDPOINT}:6443
talosctl gen config --output-dir _cfgs --with-docs=false --with-examples=false ${CLUSTERNAME} https://${ENDPOINT}:6443
talosctl --talosconfig _cfgs/talosconfig config endpoint ${ENDPOINT}
create-templates:

View File

@@ -1,19 +1,24 @@
ENDPOINT=192.168.10.10
CLUSTERNAME:="talos-k8s-proxmox"
help:
@awk 'BEGIN {FS = ":.*?## "} /^[0-9a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
create-config: ## Genereate talos configs
talosctl gen config --output-dir _cfgs --with-docs=false --with-examples=false talos-k8s-proxmox https://${ENDPOINT}:6443
talosctl gen config --output-dir _cfgs --with-docs=false --with-examples=false --config-patch-worker @templates/worker.patch.yaml ${CLUSTERNAME} https://${ENDPOINT}:6443
talosctl --talosconfig _cfgs/talosconfig config endpoint ${ENDPOINT}
create-templates:
@yq ea -P '. as $$item ireduce ({}; . * $$item )' _cfgs/controlplane.yaml templates/controlplane.yaml.tpl > templates/controlplane.yaml
@echo 'podSubnets: "10.32.0.0/12,fd00:10:32::/102"' > _cfgs/tfstate.vars
@echo 'serviceSubnets: "10.200.0.0/22,fd40:10:200::/112"' >> _cfgs/tfstate.vars
@echo 'nodeSubnets: "172.16.0.0/12"' >> _cfgs/tfstate.vars
@echo 'apiDomain: api.cluster.local' >> _cfgs/tfstate.vars
@yq eval '.cluster.network.dnsDomain' _cfgs/controlplane.yaml | awk '{ print "domain: "$$1}' >> _cfgs/tfstate.vars
@yq eval '.cluster.clusterName' _cfgs/controlplane.yaml | awk '{ print "clusterName: "$$1}' >> _cfgs/tfstate.vars
@yq eval '.cluster.id' _cfgs/controlplane.yaml | awk '{ print "clusterID: "$$1}' >> _cfgs/tfstate.vars
@yq eval '.cluster.secret' _cfgs/controlplane.yaml | awk '{ print "clusterSecret: "$$1}'>> _cfgs/tfstate.vars
@yq eval '.machine.token' _cfgs/controlplane.yaml | awk '{ print "tokenMachine: "$$1}' >> _cfgs/tfstate.vars
@yq eval '.machine.ca.crt' _cfgs/controlplane.yaml | awk '{ print "caMachine: "$$1}' >> _cfgs/tfstate.vars
@yq eval '.cluster.token' _cfgs/controlplane.yaml | awk '{ print "token: "$$1}' >> _cfgs/tfstate.vars
@@ -21,5 +26,14 @@ create-templates:
@yq eval -o=json '{"kubernetes": .}' _cfgs/tfstate.vars > terraform.tfvars.json
create-kubeconfig:
talosctl --talosconfig _cfgs/talosconfig --nodes ${ENDPOINT} kubeconfig
create-controlplane-bootstrap:
talosctl --talosconfig _cfgs/talosconfig config endpoint ${ENDPOINT}
talosctl --talosconfig _cfgs/talosconfig --nodes 172.16.0.11 bootstrap
create-controlplane: ## Bootstrap first controlplane node
terraform apply -auto-approve -target=hcloud_server.controlplane
create-kubeconfig: ## Prepare kubeconfig
talosctl --talosconfig _cfgs/talosconfig --nodes 172.16.0.11 kubeconfig .
kubectl --kubeconfig=kubeconfig config set clusters.${CLUSTERNAME}.server https://${ENDPOINT}:6443
kubectl --kubeconfig=kubeconfig config set-context --current --namespace=kube-system

View File

@@ -1,7 +1,18 @@
# provider "proxmox" {
# virtual_environment {
# endpoint = "https://${var.proxmox_host}:8006/"
# insecure = true
# username = var.proxmox_token_id
# password = var.proxmox_token_secret
# }
# }
provider "proxmox" {
pm_api_url = "https://${var.proxmox_host}:8006/api2/json"
pm_api_token_id = var.proxmox_token_id
pm_api_token_secret = var.proxmox_token_secret
pm_tls_insecure = true
pm_debug = true
}

View File

@@ -0,0 +1,78 @@
# resource "null_resource" "controlplane_machineconfig" {
# count = lookup(var.controlplane, "count", 0)
# connection {
# type = "ssh"
# user = "root"
# host = var.proxmox_host
# }
# provisioner "file" {
# content = templatefile("${path.module}/templates/controlplane.yaml",
# merge(var.kubernetes, {
# name = "controlplane-${count.index + 1}"
# type = "controlplane"
# ipv4_local = "192.168.10.11"
# ipv4_vip = "192.168.10.10"
# nodeSubnets = "${var.vpc_main_cidr}"
# })
# )
# destination = "/var/lib/vz/snippets/controlplane-${count.index + 1}.yml"
# }
# }
# resource "proxmox_vm_qemu" "controlplane" {
# count = lookup(var.controlplane, "count", 0)
# name = "controlplane-${count.index + 1}"
# target_node = var.proxmox_nodename
# clone = var.proxmox_image
# # preprovision = false
# define_connection_info = false
# os_type = "ubuntu"
# ipconfig0 = "ip=${cidrhost(var.vpc_main_cidr, 11 + count.index)}/24,gw=${local.gwv4}"
# cicustom = "user=local:snippets/controlplane-${count.index + 1}.yml"
# cloudinit_cdrom_storage = var.proxmox_storage
# onboot = false
# cpu = "host,flags=+aes"
# cores = 2
# sockets = 1
# memory = 2048
# scsihw = "virtio-scsi-pci"
# vga {
# memory = 0
# type = "serial0"
# }
# serial {
# id = 0
# type = "socket"
# }
# network {
# model = "virtio"
# bridge = var.proxmox_bridge
# firewall = false
# }
# boot = "order=scsi0"
# disk {
# type = "scsi"
# storage = var.proxmox_storage
# size = "16G"
# cache = "writethrough"
# ssd = 1
# backup = 0
# }
# lifecycle {
# ignore_changes = [
# desc,
# define_connection_info,
# ]
# }
# depends_on = [null_resource.controlplane_machineconfig]
# }

View File

@@ -1,78 +0,0 @@
resource "null_resource" "controlplane_machineconfig" {
count = lookup(var.controlplane, "count", 0)
connection {
type = "ssh"
user = "root"
host = var.proxmox_host
}
provisioner "file" {
content = templatefile("${path.module}/templates/controlplane.yaml",
merge(var.kubernetes, {
name = "master-${count.index + 1}"
type = "controlplane"
ipv4_local = "192.168.10.11"
ipv4_vip = "192.168.10.10"
nodeSubnets = "${var.vpc_main_cidr}"
})
)
destination = "/var/lib/vz/snippets/master-${count.index + 1}.yml"
}
}
resource "proxmox_vm_qemu" "controlplane" {
count = lookup(var.controlplane, "count", 0)
name = "master-${count.index + 1}"
target_node = var.proxmox_nodename
clone = var.proxmox_image
# preprovision = false
define_connection_info = false
os_type = "ubuntu"
ipconfig0 = "ip=${cidrhost(var.vpc_main_cidr, 11 + count.index)}/24,gw=${local.gwv4}"
cicustom = "user=local:snippets/master-${count.index + 1}.yml"
cloudinit_cdrom_storage = var.proxmox_storage
onboot = false
cpu = "host,flags=+aes"
cores = 2
sockets = 1
memory = 2048
scsihw = "virtio-scsi-pci"
vga {
memory = 0
type = "serial0"
}
serial {
id = 0
type = "socket"
}
network {
model = "virtio"
bridge = var.proxmox_bridge
firewall = false
}
boot = "order=scsi0"
disk {
type = "scsi"
storage = var.proxmox_storage
size = "16G"
cache = "writethrough"
ssd = 1
backup = 0
}
lifecycle {
ignore_changes = [
desc,
define_connection_info,
]
}
depends_on = [null_resource.controlplane_machineconfig]
}

View File

@@ -1,37 +1,165 @@
locals {
worker_prefix = "worker"
zones = [for k, v in var.instances : k]
workers = { for k in flatten([
for zone in local.zones : [
for inx in range(lookup(try(var.instances[zone], {}), "worker_count", 0)) : {
id : lookup(try(var.instances[zone], {}), "worker_id", 9000) + inx
name : "${local.worker_prefix}-${lower(substr(zone, -1, -1))}${1 + inx}"
zone : zone
node_name : zone
cpu : lookup(try(var.instances[zone], {}), "worker_cpu", 1)
mem : lookup(try(var.instances[zone], {}), "worker_mem", 2048)
ipv4 : "${cidrhost(var.vpc_main_cidr, 81 + inx)}/24"
gwv4 : local.gwv4
}
]
]) : k.name => k }
}
output "workers" {
value = local.workers
}
resource "null_resource" "worker_machineconfig" {
count = lookup(var.worker, "count", 0)
for_each = var.instances
connection {
type = "ssh"
user = "root"
host = var.proxmox_host
host = "${each.key}.${var.proxmox_domain}"
}
provisioner "file" {
# content = file("init.yaml")
source = "worker.yaml"
destination = "/var/lib/vz/snippets/worker-${count.index + 1}.yml"
source = "${path.module}/_cfgs/worker.yaml"
destination = "/var/lib/vz/snippets/${local.worker_prefix}.yaml"
}
triggers = {
params = filemd5("${path.module}/_cfgs/worker.yaml")
}
}
resource "null_resource" "worker_metadata" {
for_each = local.workers
connection {
type = "ssh"
user = "root"
host = "${each.value.node_name}.${var.proxmox_domain}"
}
provisioner "file" {
content = templatefile("${path.module}/templates/metadata.yaml", {
hostname : each.value.name,
id : each.value.id,
type : "qemu",
zone : each.value.zone,
region : var.region,
})
destination = "/var/lib/vz/snippets/${each.value.name}.metadata.yaml"
}
triggers = {
params = join(",", [for k, v in local.workers[each.key] : "${k}-${v}"])
}
}
# resource "proxmox_virtual_environment_vm" "talos" {
# for_each = local.workers
# name = each.value.name
# tags = ["talos"]
# node_name = each.value.node_name
# vm_id = each.value.id
# initialization {
# datastore_id = "local"
# ip_config {
# ipv6 {
# address = "slaac"
# # gateway = ""
# }
# }
# ip_config {
# ipv4 {
# address = "2.3.4.5/24"
# }
# }
# user_data_file_id = ""
# }
# clone {
# vm_id = 102
# datastore_id = var.proxmox_storage
# }
# disk {
# datastore_id = var.proxmox_storage
# interface = "scsi0"
# ssd = true
# size = 32
# file_format = "raw"
# }
# cpu {
# cores = each.value.cpu
# sockets = 1
# type = "host"
# flags = ["+aes"]
# }
# memory {
# dedicated = each.value.mem
# }
# network_device {
# model = "virtio"
# bridge = "vmbr0"
# # firewall = true
# }
# network_device {
# model = "virtio"
# bridge = "vmbr1"
# }
# operating_system {
# type = "l26"
# }
# agent {
# enabled = false
# }
# serial_device {}
# lifecycle {
# ignore_changes = [
# tags,
# cpu,
# memory,
# network_device,
# ]
# }
# depends_on = [null_resource.worker_machineconfig, null_resource.worker_metadata]
# }
resource "proxmox_vm_qemu" "worker" {
count = lookup(var.worker, "count", 0)
name = "worker-${count.index + 1}"
target_node = var.proxmox_nodename
for_each = local.workers
name = each.value.name
vmid = each.value.id
target_node = each.value.node_name
clone = var.proxmox_image
# preprovision = false
agent = 0
define_connection_info = false
os_type = "ubuntu"
ipconfig0 = "ip=${cidrhost(var.vpc_main_cidr, 21 + count.index)}/24,gw=${local.gwv4}"
cicustom = "user=local:snippets/worker-${count.index + 1}.yml"
qemu_os = "l26"
ipconfig0 = "ip6=auto"
ipconfig1 = "ip=${each.value.ipv4},gw=${each.value.gwv4}"
cicustom = "user=local:snippets/${local.worker_prefix}.yaml,meta=local:snippets/${each.value.name}.metadata.yaml"
cloudinit_cdrom_storage = var.proxmox_storage
onboot = false
cpu = "host,flags=+aes"
cores = 1
sockets = 1
memory = 1024
cores = each.value.cpu
memory = each.value.mem
scsihw = "virtio-scsi-pci"
vga {
@@ -45,27 +173,36 @@ resource "proxmox_vm_qemu" "worker" {
network {
model = "virtio"
bridge = var.proxmox_bridge
bridge = "vmbr0"
firewall = true
}
network {
model = "virtio"
bridge = "vmbr1"
}
boot = "order=scsi0"
disk {
type = "scsi"
storage = var.proxmox_storage
size = "16G"
size = "32G"
cache = "writethrough"
ssd = 1
backup = 0
backup = false
}
lifecycle {
ignore_changes = [
boot,
network,
desc,
numa,
agent,
ipconfig0,
ipconfig1,
define_connection_info,
]
}
depends_on = [null_resource.worker_machineconfig]
depends_on = [null_resource.worker_machineconfig, null_resource.worker_metadata]
}

View File

@@ -1,5 +1,5 @@
locals {
gwv4 = cidrhost(var.vpc_main_cidr, -3)
gwv4 = cidrhost(var.vpc_main_cidr, 1)
lbv4_local = cidrhost(var.vpc_main_cidr, 10)
}

View File

@@ -11,6 +11,7 @@ Apply a few changes to the proxmox node.
ansible-galaxy role install git+https://github.com/sergelogvinov/ansible-role-system.git,main
ansible-galaxy role install git+https://github.com/sergelogvinov/ansible-role-users.git,main
ansible-galaxy role install git+https://github.com/sergelogvinov/ansible-role-iptables.git,main
ansible-galaxy role install git+https://github.com/sergelogvinov/ansible-role-dnsmasq.git,main
```
2. Update inventory file, replace the host ip here `ansible_host`

View File

@@ -15,6 +15,11 @@ system_sysctl:
#
dnsmasq_configs: ["proxmox"]
dnsmasq_interfaces: ["vmbr0"]
#
iptables_apply_changes: false
iptables_configuration_template: iptables_proxmox.j2
iptables6_configuration_template: iptables6_proxmox.j2

View File

@@ -4,3 +4,4 @@
- ansible-role-system
- ansible-role-users
- ansible-role-iptables
- ansible-role-dnsmasq

View File

@@ -0,0 +1,5 @@
hostname: ${hostname}
instance-id: ${id}
instance-type: ${type}
region: ${region}
zone: ${zone}

View File

@@ -0,0 +1,23 @@
machine:
kubelet:
extraArgs:
cloud-provider: external
rotate-server-certificates: true
clusterDNS:
- 169.254.2.53
- ${cidrhost(split(",",serviceSubnets)[0], 10)}
network:
interfaces:
- interface: dummy0
addresses:
- 169.254.2.53/32
extraHostEntries:
- ip: ${lbv4}
aliases:
- ${apiDomain}
sysctls:
net.core.somaxconn: 65535
net.core.netdev_max_backlog: 4096
cluster:
proxy:
disabled: true

View File

@@ -1,4 +1,10 @@
variable "proxmox_domain" {
description = "Proxmox host"
type = string
default = "example.com"
}
variable "proxmox_host" {
description = "Proxmox host"
type = string
@@ -13,6 +19,7 @@ variable "proxmox_nodename" {
variable "proxmox_image" {
description = "Proxmox source image name"
type = string
default = "talos"
}
variable "proxmox_storage" {
@@ -20,11 +27,6 @@ variable "proxmox_storage" {
type = string
}
variable "proxmox_bridge" {
description = "Proxmox bridge name"
type = string
}
variable "proxmox_token_id" {
description = "Proxmox token id"
type = string
@@ -35,6 +37,12 @@ variable "proxmox_token_secret" {
type = string
}
variable "region" {
description = "Proxmox host"
type = string
default = "nova"
}
variable "kubernetes" {
type = map(string)
default = {
@@ -65,10 +73,39 @@ variable "controlplane" {
}
}
variable "worker" {
description = "Property of worker"
variable "instances" {
description = "Map of region's properties"
type = map(any)
default = {
count = 0,
"node1" = {
web_id = 1000
web_count = 0,
web_cpu = 2,
web_mem = 4096,
worker_id = 1050
worker_count = 0,
worker_cpu = 2,
worker_mem = 4096,
},
"node2" = {
web_id = 2000
web_count = 0,
web_cpu = 2,
web_mem = 4096,
worker_id = 2050
worker_count = 0,
worker_cpu = 2,
worker_mem = 4096,
}
"node3" = {
web_id = 3000
web_count = 0,
web_cpu = 2,
web_mem = 4096,
worker_id = 3050
worker_count = 0,
worker_cpu = 2,
worker_mem = 4096,
}
}
}

View File

@@ -2,8 +2,12 @@ terraform {
required_providers {
proxmox = {
source = "Telmate/proxmox"
version = "~> 2.7.4"
version = "~> 2.9.14"
}
# proxmox = {
# source = "bpg/proxmox"
# version = "0.17.0-rc1"
# }
}
required_version = ">= 1.0"
}

View File

@@ -0,0 +1,2 @@
* https://developer.hashicorp.com/packer/plugins/builders/proxmox/iso
* https://github.com/Telmate/proxmox-api-go

View File

@@ -2,7 +2,7 @@
packer {
required_plugins {
proxmox = {
version = ">= 1.0.1"
version = ">= 1.1.2"
source = "github.com/hashicorp/proxmox"
}
}
@@ -15,15 +15,20 @@ source "proxmox" "talos" {
node = var.proxmox_nodename
insecure_skip_tls_verify = true
iso_file = "local:iso/archlinux-2021.10.01-x86_64.iso"
# iso_url = "https://mirror.rackspace.com/archlinux/iso/2021.10.01/archlinux-2021.10.01-x86_64.iso"
# iso_checksum = "sha1:77a20dcd9d838398cebb2c7c15f46946bdc3855e"
iso_file = "local:iso/archlinux-2023.03.01-x86_64.iso"
# iso_url = "https://mirror.rackspace.com/archlinux/iso/2023.03.01/archlinux-2023.03.01-x86_64.iso"
# iso_checksum = "sha1:3ae7c83eca8bd698b4e54c49d43e8de5dc8a4456"
# iso_storage_pool = "local"
unmount_iso = true
scsi_controller = "virtio-scsi-pci"
network_adapters {
bridge = "vmbr0"
bridge = "vmbr0"
model = "virtio"
firewall = true
}
network_adapters {
bridge = "vmbr1"
model = "virtio"
}
disks {
@@ -31,18 +36,28 @@ source "proxmox" "talos" {
storage_pool = var.proxmox_storage
storage_pool_type = var.proxmox_storage_type
format = "raw"
disk_size = "1G"
disk_size = "5G"
cache_mode = "writethrough"
}
memory = 2048
cpu_type = "host"
memory = 3072
vga {
type = "serial0"
}
serials = ["socket"]
ssh_username = "root"
ssh_password = "packer"
ssh_timeout = "15m"
qemu_agent = true
ssh_bastion_host = var.proxmox_host
ssh_bastion_username = "root"
ssh_bastion_agent_auth = true
template_name = "talos"
template_description = "Talos system disk"
template_description = "Talos system disk, version ${var.talos_version}"
boot_wait = "15s"
boot_command = [
@@ -68,7 +83,7 @@ build {
sources = ["source.proxmox.talos"]
provisioner "file" {
source = "../../../talos/_out/nocloud-amd64.raw.xz"
source = "nocloud-amd64.raw.xz"
destination = "/tmp/talos.raw.xz"
}
provisioner "shell" {

View File

@@ -25,7 +25,7 @@ variable "proxmox_storage_type" {
variable "talos_version" {
type = string
default = "v1.3.3"
default = "v1.3.6"
}
locals {