mirror of
https://github.com/optim-enterprises-bv/terraform-talos.git
synced 2025-10-30 17:58:32 +00:00
Gallery fixes
This commit is contained in:
@@ -4,3 +4,10 @@
|
||||
# name = "talos-amd64-${each.key}"
|
||||
# resource_group_name = local.resource_group
|
||||
# }
|
||||
|
||||
data "azurerm_shared_image_version" "talos" {
|
||||
name = "latest"
|
||||
image_name = "talos"
|
||||
gallery_name = "293f5f4eea925204"
|
||||
resource_group_name = local.resource_group
|
||||
}
|
||||
|
||||
@@ -68,6 +68,9 @@ resource "azurerm_storage_blob" "talos" {
|
||||
storage_container_name = azurerm_storage_container.images.name
|
||||
type = "Page"
|
||||
source = "${path.module}/disk.vhd"
|
||||
metadata = {
|
||||
md5 = filemd5("${path.module}/disk.vhd")
|
||||
}
|
||||
}
|
||||
|
||||
resource "azurerm_image" "talos" {
|
||||
@@ -88,16 +91,20 @@ resource "azurerm_image" "talos" {
|
||||
}
|
||||
|
||||
resource "azurerm_shared_image_version" "talos" {
|
||||
name = "0.0.2"
|
||||
name = "0.0.3"
|
||||
location = var.regions[0]
|
||||
resource_group_name = data.azurerm_resource_group.kubernetes.name
|
||||
gallery_name = azurerm_shared_image.talos.gallery_name
|
||||
image_name = azurerm_shared_image.talos.name
|
||||
managed_image_id = azurerm_image.talos.id
|
||||
|
||||
target_region {
|
||||
name = var.regions[0]
|
||||
regional_replica_count = 1
|
||||
storage_account_type = "Standard_LRS"
|
||||
dynamic "target_region" {
|
||||
for_each = var.regions
|
||||
|
||||
content {
|
||||
name = target_region.value
|
||||
regional_replica_count = 1
|
||||
storage_account_type = "Standard_LRS"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,9 +21,9 @@ module "controlplane" {
|
||||
instance_count = lookup(try(var.controlplane[each.key], {}), "count", 0)
|
||||
instance_resource_group = local.resource_group
|
||||
instance_type = lookup(try(var.controlplane[each.key], {}), "instance_type", "Standard_B2s")
|
||||
# instance_image = data.azurerm_image.talos[each.key].id
|
||||
instance_tags = merge(var.tags, { type = "infra" })
|
||||
instance_secgroup = local.network_secgroup[each.key].controlplane
|
||||
instance_image = data.azurerm_shared_image_version.talos.id
|
||||
instance_tags = merge(var.tags, { type = "infra" })
|
||||
instance_secgroup = local.network_secgroup[each.key].controlplane
|
||||
instance_params = merge(var.kubernetes, {
|
||||
lbv4 = local.network_public[each.key].controlplane_lb[0]
|
||||
lbv6 = try(local.network_public[each.key].controlplane_lb[1], "")
|
||||
|
||||
@@ -55,13 +55,13 @@ resource "azurerm_linux_virtual_machine_scale_set" "web" {
|
||||
disk_size_gb = 50
|
||||
}
|
||||
|
||||
# source_image_id = data.azurerm_image.talos[each.key].id
|
||||
source_image_reference {
|
||||
publisher = "talos"
|
||||
offer = "Talos"
|
||||
sku = "1.0-dev"
|
||||
version = "latest"
|
||||
}
|
||||
source_image_id = data.azurerm_shared_image_version.talos.id
|
||||
# source_image_reference {
|
||||
# publisher = "talos"
|
||||
# offer = "Talos"
|
||||
# sku = "1.0-dev"
|
||||
# version = "latest"
|
||||
# }
|
||||
|
||||
tags = merge(var.tags, { type = "web" })
|
||||
|
||||
|
||||
@@ -1,84 +1,83 @@
|
||||
|
||||
# locals {
|
||||
# worker_labels = "topology.kubernetes.io/zone=azure,project.io/node-pool=worker"
|
||||
# }
|
||||
locals {
|
||||
worker_labels = "topology.kubernetes.io/zone=azure,project.io/node-pool=worker"
|
||||
}
|
||||
|
||||
# resource "azurerm_linux_virtual_machine_scale_set" "worker" {
|
||||
# for_each = { for idx, name in local.regions : name => idx }
|
||||
# location = each.key
|
||||
resource "azurerm_linux_virtual_machine_scale_set" "worker" {
|
||||
for_each = { for idx, name in local.regions : name => idx }
|
||||
location = each.key
|
||||
|
||||
# instances = lookup(try(var.instances[each.key], {}), "worker_count", 0)
|
||||
# name = "worker-${lower(each.key)}"
|
||||
# computer_name_prefix = "worker-${lower(each.key)}-"
|
||||
# resource_group_name = local.resource_group
|
||||
# sku = lookup(try(var.instances[each.key], {}), "worker_instance_type", "Standard_B2s")
|
||||
# extensions_time_budget = "PT30M"
|
||||
# provision_vm_agent = false
|
||||
# # availability_set_id = var.instance_availability_set
|
||||
instances = lookup(try(var.instances[each.key], {}), "worker_count", 0)
|
||||
name = "worker-${lower(each.key)}"
|
||||
computer_name_prefix = "worker-${lower(each.key)}-"
|
||||
resource_group_name = local.resource_group
|
||||
sku = lookup(try(var.instances[each.key], {}), "worker_instance_type", "Standard_B2s")
|
||||
provision_vm_agent = false
|
||||
overprovision = false
|
||||
|
||||
# network_interface {
|
||||
# name = "worker-${lower(each.key)}"
|
||||
# primary = true
|
||||
# ip_configuration {
|
||||
# name = "worker-${lower(each.key)}-v4"
|
||||
# primary = true
|
||||
# version = "IPv4"
|
||||
# subnet_id = local.network_private[each.key].network_id
|
||||
# }
|
||||
# ip_configuration {
|
||||
# name = "worker-${lower(each.key)}-v6"
|
||||
# version = "IPv6"
|
||||
# subnet_id = local.network_private[each.key].network_id
|
||||
# }
|
||||
# }
|
||||
# availability_set_id = var.instance_availability_set
|
||||
|
||||
# custom_data = base64encode(templatefile("${path.module}/templates/worker.yaml.tpl",
|
||||
# merge(var.kubernetes, {
|
||||
# lbv4 = local.network_public[each.key].controlplane_lb[0]
|
||||
# labels = "topology.kubernetes.io/region=${each.key},${local.worker_labels}"
|
||||
# nodeSubnets = [local.network_private[each.key].cidr[0]]
|
||||
# })
|
||||
# ))
|
||||
network_interface {
|
||||
name = "worker-${lower(each.key)}"
|
||||
primary = true
|
||||
ip_configuration {
|
||||
name = "worker-${lower(each.key)}-v4"
|
||||
primary = true
|
||||
version = "IPv4"
|
||||
subnet_id = local.network_private[each.key].network_id
|
||||
}
|
||||
ip_configuration {
|
||||
name = "worker-${lower(each.key)}-v6"
|
||||
version = "IPv6"
|
||||
subnet_id = local.network_private[each.key].network_id
|
||||
}
|
||||
}
|
||||
|
||||
# os_disk {
|
||||
# caching = "ReadOnly"
|
||||
# storage_account_type = "StandardSSD_LRS"
|
||||
# disk_size_gb = 50
|
||||
custom_data = base64encode(templatefile("${path.module}/templates/worker.yaml.tpl",
|
||||
merge(var.kubernetes, {
|
||||
lbv4 = local.network_public[each.key].controlplane_lb[0]
|
||||
labels = "topology.kubernetes.io/region=${each.key},${local.worker_labels}"
|
||||
nodeSubnets = [local.network_private[each.key].cidr[0]]
|
||||
})
|
||||
))
|
||||
|
||||
# dynamic "diff_disk_settings" {
|
||||
# for_each = var.vm_os_ephemeral ? ["Local"] : []
|
||||
# content {
|
||||
# option = diff_disk_settings.value
|
||||
# placement = "ResourceDisk"
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
admin_username = "talos"
|
||||
admin_ssh_key {
|
||||
username = "talos"
|
||||
public_key = file("~/.ssh/terraform.pub")
|
||||
}
|
||||
|
||||
# disable_password_authentication = false
|
||||
# admin_password = "talos4PWD"
|
||||
# admin_username = "talos"
|
||||
# admin_ssh_key {
|
||||
# username = "talos"
|
||||
# public_key = file("~/.ssh/terraform.pub")
|
||||
# }
|
||||
os_disk {
|
||||
caching = "ReadOnly"
|
||||
storage_account_type = "StandardSSD_LRS"
|
||||
disk_size_gb = 50
|
||||
|
||||
# source_image_id = data.azurerm_image.talos[each.key].id
|
||||
# # source_image_reference {
|
||||
# # publisher = "Debian"
|
||||
# # offer = "debian-11"
|
||||
# # sku = "11-gen2"
|
||||
# # version = "latest"
|
||||
# # }
|
||||
# dynamic "diff_disk_settings" {
|
||||
# for_each = lookup(try(var.instances[each.key], {}), "worker_instance_type", "Standard_B2s") var.vm_os_ephemeral ? ["Local"] : []
|
||||
# content {
|
||||
# option = diff_disk_settings.value
|
||||
# placement = "ResourceDisk"
|
||||
# }
|
||||
# }
|
||||
}
|
||||
|
||||
# tags = merge(var.tags, { type = "worker" })
|
||||
source_image_id = data.azurerm_shared_image_version.talos.id
|
||||
# source_image_reference {
|
||||
# publisher = "talos"
|
||||
# offer = "Talos"
|
||||
# sku = "1.0-dev"
|
||||
# version = "latest"
|
||||
# }
|
||||
|
||||
# automatic_instance_repair {
|
||||
# ~ enabled = true
|
||||
# ~ grace_period = "PT30M"
|
||||
# }
|
||||
tags = merge(var.tags, { type = "worker" })
|
||||
|
||||
# boot_diagnostics {}
|
||||
# lifecycle {
|
||||
# ignore_changes = [admin_username, admin_ssh_key, os_disk, source_image_id, tags]
|
||||
# }
|
||||
# }
|
||||
# automatic_instance_repair {
|
||||
# enabled = true
|
||||
# grace_period = "PT30M"
|
||||
# }
|
||||
|
||||
boot_diagnostics {}
|
||||
lifecycle {
|
||||
ignore_changes = [admin_username, admin_ssh_key, os_disk, source_image_id, tags]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,6 +7,6 @@ output "controlplane_endpoints" {
|
||||
|
||||
output "controlplane_bootstrap" {
|
||||
description = "Kubernetes controlplane bootstrap command"
|
||||
value = var.instance_count > 0 ? "talosctl apply-config --insecure --nodes ${try([for ip in azurerm_public_ip.controlplane_v4 : ip.ip_address if ip.ip_address != ""], [])[0]} --file _cfgs/controlplane-${lower(var.region)}-1.yaml" : ""
|
||||
value = var.instance_count > 0 ? "talosctl apply-config --insecure --nodes ${try([for ip in azurerm_public_ip.controlplane_v4 : ip.ip_address], [""])[0]} --file _cfgs/controlplane-${lower(var.region)}-1.yaml" : ""
|
||||
depends_on = [azurerm_linux_virtual_machine.controlplane]
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user