ensure to create multiple worker node (#17)

* ensure to create multiple worker node

* fixed issues depends on creating cluster with multiple nodes
This commit is contained in:
Armen Hakobian
2024-06-04 19:23:15 +04:00
committed by GitHub
parent e1e162cb19
commit c586832e1c
6 changed files with 39 additions and 45 deletions

View File

@@ -1,10 +1,10 @@
interanl:
internal:
controlplane:
cpu_cores: 2
memory: 4096
disk_size: 10
worker_node:
cpu_cores: 4
memory: 4096
cpu_cores: 8
disc_size: 200
worker_nodes:
count: 1
cpu_cores: 2
memory: 2048
disc_size: 10

View File

@@ -2,7 +2,7 @@ module "argocd" {
source = "../../modules/argocd"
git_repository_ssh_url = data.terraform_remote_state.infrastructure.outputs.github_repo_url[var.cluster_name].http_clone_url
git_repository_ssh_url = data.terraform_remote_state.infrastructure.outputs.github_repo_url.http_clone_url
registry = var.image_registry
argocd_chart_name = var.argocd_chart_name

View File

@@ -1,26 +1,24 @@
resource "github_repository" "argocd_applications" {
depends_on = [proxmox_vm_qemu.controlplane, proxmox_vm_qemu.worker]
for_each = local.clusters
name = var.cluster_name
description = "This repo is for the ArgoCD Applications."
name = var.cluster_name
description = "This repo is for the ArgoCD Applications."
vulnerability_alerts = true
template {
owner = "infraheads"
repository = "turnk8s_template_repo"
include_all_branches = true
include_all_branches = false
}
}
resource "github_repository_file" "argocd_application" {
for_each = local.clusters
repository = github_repository.argocd_applications[each.key].name
repository = github_repository.argocd_applications.name
branch = "main"
file = "argocd_applications/infraheads.yaml"
content = templatefile("${path.module}/templates/argocd_application.yaml.tpl",
{
sourceRepoURL = github_repository.argocd_applications[each.key].http_clone_url
sourceRepoURL = github_repository.argocd_applications.http_clone_url
}
)
}

View File

@@ -1,7 +1,18 @@
locals {
proxmox_api_url = "https://${var.proxmox_ip}:8006/api2/json"
proxmox_target_node = var.proxmox_ip == "192.168.1.5" ? "pve01" : "pve02"
proxmox_api_url = "https://${var.proxmox_ip}:8006/api2/json"
proxmox_target_node = var.proxmox_ip == "192.168.1.5" ? "pve01" : "pve02"
clusters = try({ tostring(var.cluster_name) = yamldecode(file("../../config.yaml"))[var.cluster_name] }, {})
talos_iso = "local:iso/metal-amd64-qemu-${var.talos_version}.iso"
clusters = try({ tostring(var.cluster_name) = yamldecode(file("../../config.yaml"))[var.cluster_name] }, {})
talos_iso = "local:iso/metal-amd64-qemu-${var.talos_version}.iso"
worker = flatten([
for cluster_key, cluster in local.clusters : [
for i in range(cluster.worker_nodes.count):
{
cpu_cores = cluster.worker_nodes.cpu_cores
disc_size = cluster.worker_nodes.disc_size
memory = cluster.worker_nodes.memory
}
]
])
}

View File

@@ -7,7 +7,7 @@ resource "talos_machine_secrets" "talos_secrets" {
data "talos_client_configuration" "cp_cc" {
for_each = local.clusters
cluster_name = var.cluster_name
cluster_name = each.key
client_configuration = talos_machine_secrets.talos_secrets.client_configuration
nodes = [proxmox_vm_qemu.controlplane[each.key].default_ipv4_address]
endpoints = [proxmox_vm_qemu.controlplane[each.key].default_ipv4_address]
@@ -36,8 +36,7 @@ data "talos_machine_configuration" "cp_mc" {
# Applies machine configuration to the control plane
resource "talos_machine_configuration_apply" "cp_mca" {
# depends_on = [data.talos_machine_configuration.cp_mc]
for_each = local.clusters
for_each = local.clusters
client_configuration = talos_machine_secrets.talos_secrets.client_configuration
machine_configuration_input = data.talos_machine_configuration.cp_mc[each.key].machine_configuration
@@ -47,7 +46,7 @@ resource "talos_machine_configuration_apply" "cp_mca" {
# Bootstraps the etcd cluster on the control plane
resource "talos_machine_bootstrap" "cp_mb" {
depends_on = [talos_machine_configuration_apply.cp_mca]
for_each = local.clusters
for_each = local.clusters
node = proxmox_vm_qemu.controlplane[each.key].default_ipv4_address
client_configuration = talos_machine_secrets.talos_secrets.client_configuration
@@ -56,7 +55,7 @@ resource "talos_machine_bootstrap" "cp_mb" {
# Retrieves the kubeconfig for a Talos cluster
data "talos_cluster_kubeconfig" "cp_ck" {
depends_on = [talos_machine_bootstrap.cp_mb]
for_each = local.clusters
for_each = local.clusters
client_configuration = talos_machine_secrets.talos_secrets.client_configuration
node = proxmox_vm_qemu.controlplane[each.key].default_ipv4_address
@@ -85,23 +84,9 @@ data "talos_machine_configuration" "worker_mc" {
# Applies machine configuration to the worker node
resource "talos_machine_configuration_apply" "worker_mca" {
# count = local.input_vars.worker_node.count
for_each = local.clusters
for_each = { for idx, worker in local.worker : idx => worker }
client_configuration = talos_machine_secrets.talos_secrets.client_configuration
machine_configuration_input = data.talos_machine_configuration.worker_mc[each.key].machine_configuration
machine_configuration_input = data.talos_machine_configuration.worker_mc[var.cluster_name].machine_configuration
node = proxmox_vm_qemu.worker[each.key].default_ipv4_address
}
data "talos_cluster_health" "cluster_health" {
depends_on = [data.talos_cluster_kubeconfig.cp_ck]
for_each = local.clusters
client_configuration = talos_machine_secrets.talos_secrets.client_configuration
control_plane_nodes = [proxmox_vm_qemu.controlplane[each.key].default_ipv4_address]
worker_nodes = [proxmox_vm_qemu.worker[each.key].default_ipv4_address]
endpoints = [proxmox_vm_qemu.controlplane[each.key].default_ipv4_address]
timeouts = {
read = "1h"
}
}

View File

@@ -1,17 +1,17 @@
resource "proxmox_vm_qemu" "worker" {
for_each = local.clusters
for_each = { for idx, worker in local.worker : idx => worker }
name = "${var.cluster_name}-worker-index"
name = "${var.cluster_name}-worker-${each.key}"
target_node = local.proxmox_target_node
iso = local.talos_iso
cores = each.value.worker_node.cpu_cores
cores = each.value.cpu_cores
sockets = var.worker_sockets
cpu = var.worker_cpu
qemu_os = var.worker_qemu_os
scsihw = var.worker_scsihw
memory = each.value.worker_node.memory
memory = each.value.memory
agent = 1
disks {
@@ -19,7 +19,7 @@ resource "proxmox_vm_qemu" "worker" {
scsi0 {
disk {
storage = var.worker_disk_storage
size = each.value.worker_node.disc_size
size = each.value.disc_size
iothread = true
asyncio = "native"
}