mirror of
https://github.com/optim-enterprises-bv/terraform-talos.git
synced 2025-11-01 18:58:39 +00:00
add cpuaffinity module
This commit is contained in:
@@ -101,13 +101,13 @@ vpc_main_cidr = "172.16.0.0/24"
|
|||||||
# We will create one control-plane node on the Proxmox node `node1` (count = 1)
|
# We will create one control-plane node on the Proxmox node `node1` (count = 1)
|
||||||
controlplane = {
|
controlplane = {
|
||||||
"node1" = {
|
"node1" = {
|
||||||
id = 500
|
id = 500,
|
||||||
count = 1,
|
count = 1,
|
||||||
cpu = 2,
|
cpu = 2,
|
||||||
mem = 6144,
|
mem = 6144,
|
||||||
},
|
},
|
||||||
"node2" = {
|
"node2" = {
|
||||||
id = 520
|
id = 520,
|
||||||
count = 0,
|
count = 0,
|
||||||
cpu = 2,
|
cpu = 2,
|
||||||
mem = 6144,
|
mem = 6144,
|
||||||
@@ -117,21 +117,23 @@ controlplane = {
|
|||||||
# One web and worker node:
|
# One web and worker node:
|
||||||
instances = {
|
instances = {
|
||||||
"node1" = {
|
"node1" = {
|
||||||
web_id = 1000
|
enabled = true,
|
||||||
|
web_id = 1000,
|
||||||
web_count = 1,
|
web_count = 1,
|
||||||
web_cpu = 2,
|
web_cpu = 2,
|
||||||
web_mem = 4096,
|
web_mem = 4096,
|
||||||
worker_id = 1050
|
worker_id = 1050,
|
||||||
worker_count = 1,
|
worker_count = 1,
|
||||||
worker_cpu = 2,
|
worker_cpu = 2,
|
||||||
worker_mem = 4096,
|
worker_mem = 4096,
|
||||||
},
|
},
|
||||||
"node2" = {
|
"node2" = {
|
||||||
web_id = 2000
|
enabled = true,
|
||||||
|
web_id = 2000,
|
||||||
web_count = 0,
|
web_count = 0,
|
||||||
web_cpu = 2,
|
web_cpu = 2,
|
||||||
web_mem = 4096,
|
web_mem = 4096,
|
||||||
worker_id = 2050
|
worker_id = 2050,
|
||||||
worker_count = 0,
|
worker_count = 0,
|
||||||
worker_cpu = 2,
|
worker_cpu = 2,
|
||||||
worker_mem = 4096,
|
worker_mem = 4096,
|
||||||
@@ -159,9 +161,14 @@ Receive `kubeconfig` file
|
|||||||
make kubeconfig
|
make kubeconfig
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Test the cluster
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
|
export KUBECONFIG=kubeconfig
|
||||||
|
|
||||||
kubectl get nodes -o wide
|
kubectl get nodes -o wide
|
||||||
kubectl get pods -o wide -A
|
kubectl get pods -o wide -A
|
||||||
|
kubectl get csistoragecapacities -ocustom-columns=CLASS:.storageClassName,AVAIL:.capacity,ZONE:.nodeTopology.matchLabels -A
|
||||||
```
|
```
|
||||||
|
|
||||||
Resault:
|
Resault:
|
||||||
|
|||||||
8
proxmox/cpuaffinity/outputs.tf
Normal file
8
proxmox/cpuaffinity/outputs.tf
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
|
||||||
|
output "arch" {
|
||||||
|
value = { for k, v in local.vm_arch : k => {
|
||||||
|
cpus : v.cpus
|
||||||
|
numa : { for numa in range(length(var.cpu_affinity)) : numa => v.numa[numa] if length(v.numa[numa]) > 0 }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
41
proxmox/cpuaffinity/variables.tf
Normal file
41
proxmox/cpuaffinity/variables.tf
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
|
||||||
|
variable "cpu_affinity" {
|
||||||
|
description = "CPU numa affinity list"
|
||||||
|
type = list(string)
|
||||||
|
default = ["0-15,64-79", "16-31,80-95", "32-47,96-111", "48-63,112-127"]
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "vms" {
|
||||||
|
type = number
|
||||||
|
default = 2
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "cpus" {
|
||||||
|
type = number
|
||||||
|
default = 16
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "shift" {
|
||||||
|
type = number
|
||||||
|
default = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
locals {
|
||||||
|
server_cpus = [for i in var.cpu_affinity :
|
||||||
|
flatten([for r in split(",", i) : (strcontains(r, "-") ? range(split("-", r)[0], split("-", r)[1] + 1, 1) : [r])])
|
||||||
|
]
|
||||||
|
|
||||||
|
cpus = [for k, v in local.server_cpus :
|
||||||
|
flatten([flatten([for r in range(length(v) / 2) : [v[r], v[r + length(v) / 2]]])])
|
||||||
|
]
|
||||||
|
|
||||||
|
shift = var.shift * length(local.cpus[0])
|
||||||
|
|
||||||
|
vm_arch = { for k in flatten([
|
||||||
|
for inx in range(var.vms) : {
|
||||||
|
inx : inx
|
||||||
|
cpus : slice(flatten(local.cpus), inx * var.cpus + local.shift, (inx + 1) * var.cpus + local.shift)
|
||||||
|
numa : { for numa in range(length(var.cpu_affinity)) : numa => setintersection(local.cpus[numa], slice(flatten(local.cpus), inx * var.cpus + local.shift, (inx + 1) * var.cpus + local.shift)) }
|
||||||
|
}
|
||||||
|
]) : k.inx => k }
|
||||||
|
}
|
||||||
@@ -6,20 +6,11 @@ locals {
|
|||||||
dbs = { for k in flatten([
|
dbs = { for k in flatten([
|
||||||
for zone in local.zones : [
|
for zone in local.zones : [
|
||||||
for inx in range(lookup(try(var.instances[zone], {}), "db_count", 0)) : {
|
for inx in range(lookup(try(var.instances[zone], {}), "db_count", 0)) : {
|
||||||
|
inx : inx
|
||||||
id : lookup(try(var.instances[zone], {}), "db_id", 9000) + inx
|
id : lookup(try(var.instances[zone], {}), "db_id", 9000) + inx
|
||||||
name : "${local.db_prefix}-${format("%02d", index(local.zones, zone))}${format("%x", 10 + inx)}"
|
name : "${local.db_prefix}-${format("%02d", index(local.zones, zone))}${format("%x", 10 + inx)}"
|
||||||
zone : zone
|
zone : zone
|
||||||
cpu : lookup(try(var.instances[zone], {}), "db_cpu", 1)
|
cpu : lookup(try(var.instances[zone], {}), "db_cpu", 1)
|
||||||
cpus : lookup(try(var.instances[zone], {}), "db_affinity", "") != "" ? lookup(var.instances[zone], "db_affinity") : join(",", slice(
|
|
||||||
flatten(local.cpus[zone]),
|
|
||||||
2 * data.proxmox_virtual_environment_node.node[zone].cpu_count - (inx + 1) * lookup(try(var.instances[zone], {}), "db_cpu", 1),
|
|
||||||
2 * data.proxmox_virtual_environment_node.node[zone].cpu_count - inx * lookup(try(var.instances[zone], {}), "db_cpu", 1)
|
|
||||||
))
|
|
||||||
numas : [0]
|
|
||||||
# range(
|
|
||||||
# length(local.cpu_numa[zone]) - (inx + 1) * lookup(try(var.instances[zone], {}), "db_numas", 1),
|
|
||||||
# length(local.cpu_numa[zone]) - inx * lookup(try(var.instances[zone], {}), "db_numas", 1)
|
|
||||||
# )
|
|
||||||
mem : lookup(try(var.instances[zone], {}), "db_mem", 2048)
|
mem : lookup(try(var.instances[zone], {}), "db_mem", 2048)
|
||||||
|
|
||||||
hvv4 = cidrhost(local.subnets[zone], 0)
|
hvv4 = cidrhost(local.subnets[zone], 0)
|
||||||
@@ -34,6 +25,19 @@ locals {
|
|||||||
]) : k.name => k }
|
]) : k.name => k }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module "db_affinity" {
|
||||||
|
for_each = { for zone in local.zones : zone => {
|
||||||
|
zone : zone
|
||||||
|
vms : lookup(try(var.instances[zone], {}), "db_count", 0)
|
||||||
|
} if lookup(try(var.instances[zone], {}), "db_count", 0) > 0 }
|
||||||
|
|
||||||
|
source = "./cpuaffinity"
|
||||||
|
cpu_affinity = var.nodes[each.value.zone].cpu
|
||||||
|
vms = each.value.vms
|
||||||
|
cpus = lookup(try(var.instances[each.value.zone], {}), "db_cpu", 1)
|
||||||
|
# shift = length(var.nodes[each.value.zone].cpu) - 1
|
||||||
|
}
|
||||||
|
|
||||||
resource "proxmox_virtual_environment_file" "db_machineconfig" {
|
resource "proxmox_virtual_environment_file" "db_machineconfig" {
|
||||||
for_each = local.dbs
|
for_each = local.dbs
|
||||||
node_name = each.value.zone
|
node_name = each.value.zone
|
||||||
@@ -92,7 +96,7 @@ resource "proxmox_virtual_environment_vm" "db" {
|
|||||||
cpu {
|
cpu {
|
||||||
architecture = "x86_64"
|
architecture = "x86_64"
|
||||||
cores = each.value.cpu
|
cores = each.value.cpu
|
||||||
affinity = each.value.cpus
|
affinity = join(",", module.db_affinity[each.value.zone].arch[each.value.inx].cpus)
|
||||||
sockets = 1
|
sockets = 1
|
||||||
numa = true
|
numa = true
|
||||||
type = "host"
|
type = "host"
|
||||||
@@ -103,10 +107,10 @@ resource "proxmox_virtual_environment_vm" "db" {
|
|||||||
# keep_hugepages = true
|
# keep_hugepages = true
|
||||||
}
|
}
|
||||||
dynamic "numa" {
|
dynamic "numa" {
|
||||||
for_each = { for idx, numa in each.value.numas : numa => {
|
for_each = { for idx, numa in module.db_affinity[each.value.zone].arch[each.value.inx].numa : idx => {
|
||||||
device = "numa${idx}"
|
device = "numa${index(keys(module.db_affinity[each.value.zone].arch[each.value.inx].numa), idx)}"
|
||||||
cpus = "${idx * (each.value.cpu / length(each.value.numas))}-${(idx + 1) * (each.value.cpu / length(each.value.numas)) - 1}"
|
cpus = "${idx * (each.value.cpu / length(module.db_affinity[each.value.zone].arch[each.value.inx].numa))}-${(idx + 1) * (each.value.cpu / length(module.db_affinity[each.value.zone].arch[each.value.inx].numa)) - 1}"
|
||||||
mem = each.value.mem / length(each.value.numas)
|
mem = each.value.mem / length(module.db_affinity[each.value.zone].arch[each.value.inx].numa)
|
||||||
} }
|
} }
|
||||||
content {
|
content {
|
||||||
device = numa.value.device
|
device = numa.value.device
|
||||||
|
|||||||
@@ -6,15 +6,11 @@ locals {
|
|||||||
webs = { for k in flatten([
|
webs = { for k in flatten([
|
||||||
for zone in local.zones : [
|
for zone in local.zones : [
|
||||||
for inx in range(lookup(try(var.instances[zone], {}), "web_count", 0)) : {
|
for inx in range(lookup(try(var.instances[zone], {}), "web_count", 0)) : {
|
||||||
|
inx : inx
|
||||||
id : lookup(try(var.instances[zone], {}), "web_id", 9000) + inx
|
id : lookup(try(var.instances[zone], {}), "web_id", 9000) + inx
|
||||||
name : "${local.web_prefix}-${format("%02d", index(local.zones, zone))}${format("%x", 10 + inx)}"
|
name : "${local.web_prefix}-${format("%02d", index(local.zones, zone))}${format("%x", 10 + inx)}"
|
||||||
zone : zone
|
zone : zone
|
||||||
cpu : lookup(try(var.instances[zone], {}), "web_cpu", 1)
|
cpu : lookup(try(var.instances[zone], {}), "web_cpu", 1)
|
||||||
cpus : join(",", slice(
|
|
||||||
flatten(local.cpus[zone]),
|
|
||||||
inx * lookup(try(var.instances[zone], {}), "web_cpu", 1), (inx + 1) * lookup(try(var.instances[zone], {}), "web_cpu", 1)
|
|
||||||
))
|
|
||||||
numas : [0] # [inx]
|
|
||||||
mem : lookup(try(var.instances[zone], {}), "web_mem", 2048)
|
mem : lookup(try(var.instances[zone], {}), "web_mem", 2048)
|
||||||
|
|
||||||
hvv4 = cidrhost(local.subnets[zone], 0)
|
hvv4 = cidrhost(local.subnets[zone], 0)
|
||||||
@@ -29,6 +25,18 @@ locals {
|
|||||||
]) : k.name => k }
|
]) : k.name => k }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module "web_affinity" {
|
||||||
|
for_each = { for zone in local.zones : zone => {
|
||||||
|
zone : zone
|
||||||
|
vms : lookup(try(var.instances[zone], {}), "web_count", 0)
|
||||||
|
} if lookup(try(var.instances[zone], {}), "web_count", 0) > 0 }
|
||||||
|
|
||||||
|
source = "./cpuaffinity"
|
||||||
|
cpu_affinity = var.nodes[each.value.zone].cpu
|
||||||
|
vms = each.value.vms
|
||||||
|
cpus = lookup(try(var.instances[each.value.zone], {}), "web_cpu", 1)
|
||||||
|
}
|
||||||
|
|
||||||
resource "proxmox_virtual_environment_file" "web_machineconfig" {
|
resource "proxmox_virtual_environment_file" "web_machineconfig" {
|
||||||
for_each = local.webs
|
for_each = local.webs
|
||||||
node_name = each.value.zone
|
node_name = each.value.zone
|
||||||
@@ -109,7 +117,7 @@ resource "proxmox_virtual_environment_vm" "web" {
|
|||||||
cpu {
|
cpu {
|
||||||
architecture = "x86_64"
|
architecture = "x86_64"
|
||||||
cores = each.value.cpu
|
cores = each.value.cpu
|
||||||
affinity = each.value.cpus
|
affinity = join(",", module.web_affinity[each.value.zone].arch[each.value.inx].cpus)
|
||||||
sockets = 1
|
sockets = 1
|
||||||
numa = true
|
numa = true
|
||||||
type = "host"
|
type = "host"
|
||||||
@@ -120,10 +128,10 @@ resource "proxmox_virtual_environment_vm" "web" {
|
|||||||
# keep_hugepages = true
|
# keep_hugepages = true
|
||||||
}
|
}
|
||||||
dynamic "numa" {
|
dynamic "numa" {
|
||||||
for_each = { for idx, numa in each.value.numas : numa => {
|
for_each = { for idx, numa in module.web_affinity[each.value.zone].arch[each.value.inx].numa : idx => {
|
||||||
device = "numa${idx}"
|
device = "numa${index(keys(module.web_affinity[each.value.zone].arch[each.value.inx].numa), idx)}"
|
||||||
cpus = "0-${each.value.cpu - 1}"
|
cpus = "${idx * (each.value.cpu / length(module.web_affinity[each.value.zone].arch[each.value.inx].numa))}-${(idx + 1) * (each.value.cpu / length(module.web_affinity[each.value.zone].arch[each.value.inx].numa)) - 1}"
|
||||||
mem = each.value.mem
|
mem = each.value.mem / length(module.web_affinity[each.value.zone].arch[each.value.inx].numa)
|
||||||
} }
|
} }
|
||||||
content {
|
content {
|
||||||
device = numa.value.device
|
device = numa.value.device
|
||||||
|
|||||||
@@ -6,16 +6,11 @@ locals {
|
|||||||
workers = { for k in flatten([
|
workers = { for k in flatten([
|
||||||
for zone in local.zones : [
|
for zone in local.zones : [
|
||||||
for inx in range(lookup(try(var.instances[zone], {}), "worker_count", 0)) : {
|
for inx in range(lookup(try(var.instances[zone], {}), "worker_count", 0)) : {
|
||||||
|
inx : inx
|
||||||
id : lookup(try(var.instances[zone], {}), "worker_id", 9000) + inx
|
id : lookup(try(var.instances[zone], {}), "worker_id", 9000) + inx
|
||||||
name : "${local.worker_prefix}-${format("%02d", index(local.zones, zone))}${format("%x", 10 + inx)}"
|
name : "${local.worker_prefix}-${format("%02d", index(local.zones, zone))}${format("%x", 10 + inx)}"
|
||||||
zone : zone
|
zone : zone
|
||||||
node_name : zone
|
|
||||||
cpu : lookup(try(var.instances[zone], {}), "worker_cpu", 1)
|
cpu : lookup(try(var.instances[zone], {}), "worker_cpu", 1)
|
||||||
cpus : join(",", slice(
|
|
||||||
flatten(local.cpus[zone]),
|
|
||||||
(inx + 2) * lookup(try(var.instances[zone], {}), "worker_cpu", 1), (inx + 3) * lookup(try(var.instances[zone], {}), "worker_cpu", 1)
|
|
||||||
))
|
|
||||||
numas : [0] # [2 + inx]
|
|
||||||
mem : lookup(try(var.instances[zone], {}), "worker_mem", 2048)
|
mem : lookup(try(var.instances[zone], {}), "worker_mem", 2048)
|
||||||
|
|
||||||
hvv4 = cidrhost(local.subnets[zone], 0)
|
hvv4 = cidrhost(local.subnets[zone], 0)
|
||||||
@@ -30,9 +25,21 @@ locals {
|
|||||||
]) : k.name => k }
|
]) : k.name => k }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module "worker_affinity" {
|
||||||
|
for_each = { for zone in local.zones : zone => {
|
||||||
|
zone : zone
|
||||||
|
vms : lookup(try(var.instances[zone], {}), "worker_count", 0)
|
||||||
|
} if lookup(try(var.instances[zone], {}), "worker_count", 0) > 0 }
|
||||||
|
|
||||||
|
source = "./cpuaffinity"
|
||||||
|
cpu_affinity = var.nodes[each.value.zone].cpu
|
||||||
|
vms = each.value.vms
|
||||||
|
cpus = lookup(try(var.instances[each.value.zone], {}), "worker_cpu", 1)
|
||||||
|
}
|
||||||
|
|
||||||
resource "proxmox_virtual_environment_file" "worker_machineconfig" {
|
resource "proxmox_virtual_environment_file" "worker_machineconfig" {
|
||||||
for_each = local.workers
|
for_each = local.workers
|
||||||
node_name = each.value.node_name
|
node_name = each.value.zone
|
||||||
content_type = "snippets"
|
content_type = "snippets"
|
||||||
datastore_id = "local"
|
datastore_id = "local"
|
||||||
|
|
||||||
@@ -88,7 +95,7 @@ resource "proxmox_virtual_environment_vm" "worker" {
|
|||||||
cpu {
|
cpu {
|
||||||
architecture = "x86_64"
|
architecture = "x86_64"
|
||||||
cores = each.value.cpu
|
cores = each.value.cpu
|
||||||
affinity = each.value.cpus
|
affinity = join(",", module.worker_affinity[each.value.zone].arch[each.value.inx].cpus)
|
||||||
sockets = 1
|
sockets = 1
|
||||||
numa = true
|
numa = true
|
||||||
type = "host"
|
type = "host"
|
||||||
@@ -99,10 +106,10 @@ resource "proxmox_virtual_environment_vm" "worker" {
|
|||||||
# keep_hugepages = true
|
# keep_hugepages = true
|
||||||
}
|
}
|
||||||
dynamic "numa" {
|
dynamic "numa" {
|
||||||
for_each = { for idx, numa in each.value.numas : numa => {
|
for_each = { for idx, numa in module.worker_affinity[each.value.zone].arch[each.value.inx].numa : idx => {
|
||||||
device = "numa${idx}"
|
device = "numa${index(keys(module.worker_affinity[each.value.zone].arch[each.value.inx].numa), idx)}"
|
||||||
cpus = "0-${each.value.cpu - 1}"
|
cpus = "${idx * (each.value.cpu / length(module.worker_affinity[each.value.zone].arch[each.value.inx].numa))}-${(idx + 1) * (each.value.cpu / length(module.worker_affinity[each.value.zone].arch[each.value.inx].numa)) - 1}"
|
||||||
mem = each.value.mem
|
mem = each.value.mem / length(module.worker_affinity[each.value.zone].arch[each.value.inx].numa)
|
||||||
} }
|
} }
|
||||||
content {
|
content {
|
||||||
device = numa.value.device
|
device = numa.value.device
|
||||||
|
|||||||
Reference in New Issue
Block a user