azure: Allow workers with NvmeDisk Ephemeral OS disks

* Several v6 SKU types come with ephemeral OS disks with Nvme so
you get faster local storage and avoid managed disk costs
* Ensure worker_disk_size is set to the appropriate size for the
SKU's ephemeral storage, since you pay for it either way
* Requires https://github.com/hashicorp/terraform-provider-azurerm/pull/30044
This commit is contained in:
Dalton Hubble
2025-07-01 09:26:13 -07:00
parent bd4147c844
commit bdaa1d02c2
9 changed files with 62 additions and 16 deletions

View File

@@ -4,6 +4,8 @@ Notable changes between versions.
## Latest
## v1.33.2
* Kubernetes [v1.33.2](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.33.md#v1332)
* Update CoreDNS from v1.12.0 to v1.12.1
* Update Cilium from v1.17.4 to [v1.17.5](https://github.com/cilium/cilium/releases/tag/v1.17.5)
@@ -16,6 +18,8 @@ Notable changes between versions.
* Change Azure VMSS instance update policy (i.e. upgrade policy) from Manual to Rolling
* Set a rolling upgrade policy so that changes to the worker node pool are rolled out gradually. Previously, the VMSS model could change, but instances would not receive it until manually replaced
* Define Azure automatic instance repair using Application Health Extension probes to 10256 (kube-proxy or Cilium equivalent) to match the strategy used on Google Cloud
* Add `worker_ephemeral_placement` variable to allow workers with `NvmeDisk` Ephemeral OS disks ([docs](https://learn.microsoft.com/en-us/azure/virtual-machines/ephemeral-os-disks))
* Requires `azurerm` provider with NvmeDisk support ([#30044](https://github.com/hashicorp/terraform-provider-azurerm/pull/30044))
### Google Cloud

View File

@@ -81,6 +81,16 @@ variable "worker_ephemeral_disk" {
default = false
}
variable "worker_ephemeral_disk_placement" {
type = string
description = "Ephemeral disk placement setting"
default = "ResourceDisk"
validation {
condition = contains(["ResourceDisk", "NvmeDisk"], var.worker_ephemeral_disk_placement)
error_message = "ephemeral_placement must be ResourceDisk or NvmeDisk."
}
}
variable "worker_priority" {
type = string
description = "Set worker priority to Spot to use reduced cost surplus capacity, with the tradeoff that instances can be deallocated at any time."

View File

@@ -10,13 +10,14 @@ module "workers" {
backend_address_pool_ids = local.backend_address_pool_ids
# instances
os_image = var.os_image
worker_count = var.worker_count
vm_type = var.worker_type
disk_type = var.worker_disk_type
disk_size = var.worker_disk_size
ephemeral_disk = var.worker_ephemeral_disk
priority = var.worker_priority
os_image = var.os_image
worker_count = var.worker_count
vm_type = var.worker_type
disk_type = var.worker_disk_type
disk_size = var.worker_disk_size
ephemeral_disk = var.worker_ephemeral_disk
ephemeral_disk_placement = var.worker_ephemeral_disk_placement
priority = var.worker_priority
# configuration
kubeconfig = module.bootstrap.kubeconfig-kubelet

View File

@@ -70,6 +70,16 @@ variable "ephemeral_disk" {
default = false
}
variable "ephemeral_disk_placement" {
type = string
description = "Ephemeral disk placement setting"
default = "ResourceDisk"
validation {
condition = contains(["ResourceDisk", "NvmeDisk"], var.ephemeral_disk_placement)
error_message = "ephemeral_placement must be ResourceDisk or NvmeDisk."
}
}
variable "priority" {
type = string
description = "Set priority to Spot to use reduced cost surplus capacity, with the tradeoff that instances can be evicted at any time."

View File

@@ -23,7 +23,7 @@ resource "azurerm_orchestrated_virtual_machine_scale_set" "workers" {
for_each = var.ephemeral_disk ? [1] : []
content {
option = "Local"
placement = "ResourceDisk"
placement = var.ephemeral_disk_placement
}
}
}

View File

@@ -87,6 +87,16 @@ variable "worker_ephemeral_disk" {
default = false
}
variable "worker_ephemeral_disk_placement" {
type = string
description = "Ephemeral disk placement setting"
default = "ResourceDisk"
validation {
condition = contains(["ResourceDisk", "NvmeDisk"], var.worker_ephemeral_disk_placement)
error_message = "ephemeral_placement must be ResourceDisk or NvmeDisk."
}
}
variable "worker_priority" {
type = string
description = "Set worker priority to Spot to use reduced cost surplus capacity, with the tradeoff that instances can be deallocated at any time."

View File

@@ -9,13 +9,14 @@ module "workers" {
security_group_id = azurerm_network_security_group.worker.id
backend_address_pool_ids = local.backend_address_pool_ids
worker_count = var.worker_count
vm_type = var.worker_type
os_image = var.os_image
disk_type = var.worker_disk_type
disk_size = var.worker_disk_size
ephemeral_disk = var.worker_ephemeral_disk
priority = var.worker_priority
worker_count = var.worker_count
vm_type = var.worker_type
os_image = var.os_image
disk_type = var.worker_disk_type
disk_size = var.worker_disk_size
ephemeral_disk = var.worker_ephemeral_disk
ephemeral_disk_placement = var.worker_ephemeral_disk_placement
priority = var.worker_priority
# configuration
kubeconfig = module.bootstrap.kubeconfig-kubelet

View File

@@ -76,6 +76,16 @@ variable "ephemeral_disk" {
default = false
}
variable "ephemeral_disk_placement" {
type = string
description = "Ephemeral disk placement setting"
default = "ResourceDisk"
validation {
condition = contains(["ResourceDisk", "NvmeDisk"], var.ephemeral_disk_placement)
error_message = "ephemeral_placement must be ResourceDisk or NvmeDisk."
}
}
variable "priority" {
type = string
description = "Set priority to Spot to use reduced cost surplus capacity, with the tradeoff that instances can be evicted at any time."

View File

@@ -27,7 +27,7 @@ resource "azurerm_orchestrated_virtual_machine_scale_set" "workers" {
for_each = var.ephemeral_disk ? [1] : []
content {
option = "Local"
placement = "ResourceDisk"
placement = var.ephemeral_disk_placement
}
}
}