Worker node + cosmetic

This commit is contained in:
Serge Logvinov
2021-12-19 14:14:54 +02:00
parent 37cea512d3
commit 2a98f8c7b5
6 changed files with 170 additions and 94 deletions

View File

@@ -7,7 +7,6 @@ provider "oci" {
tenancy_ocid = var.tenancy_ocid
user_ocid = var.user_ocid
fingerprint = var.fingerprint
private_key_path = "~/.oci/oci_api_key.pem"
region = var.region
private_key_path = var.key_file
region = var.region
}

View File

@@ -12,13 +12,6 @@ resource "oci_core_instance_pool" "web" {
primary_subnet_id = local.network_public[local.zone].id
}
# load_balancers {
# backend_set_name = oci_network_load_balancer_backend_set.web.name
# load_balancer_id = oci_network_load_balancer_network_load_balancer.web.id
# port = 80
# vnic_selection = "PrimaryVnic"
# }
lifecycle {
ignore_changes = [
# size,
@@ -49,13 +42,13 @@ resource "oci_core_instance_configuration" "web" {
}
metadata = {
user_data = templatefile("${path.module}/templates/web.yaml.tpl",
user_data = base64encode(templatefile("${path.module}/templates/web.yaml.tpl",
merge(var.kubernetes, {
lbv4 = local.lbv4_local
lbv4_web = local.lbv4_web
nodeSubnets = local.network_public[local.zone].cidr_block
})
)
))
}
source_details {
@@ -67,7 +60,7 @@ resource "oci_core_instance_configuration" "web" {
display_name = "${var.project}-web"
assign_private_dns_record = false
assign_public_ip = true
nsg_ids = [local.nsg_talos, local.nsg_cilium, local.nsg_web, local.nsg_contolplane]
nsg_ids = [local.nsg_talos, local.nsg_cilium, local.nsg_web]
subnet_id = local.network_public[local.zone].id
skip_source_dest_check = true
}
@@ -99,7 +92,7 @@ data "oci_core_instance_pool_instances" "web" {
}
resource "oci_network_load_balancer_backend" "web_http" {
for_each = { for instances in data.oci_core_instance_pool_instances.web.instances.* : instances.display_name => instances.id }
for_each = local.lbv4_web_enable ? { for instances in data.oci_core_instance_pool_instances.web.instances.* : instances.display_name => instances.id } : {}
backend_set_name = oci_network_load_balancer_backend_set.web_http[0].name
network_load_balancer_id = oci_network_load_balancer_network_load_balancer.web[0].id
@@ -114,7 +107,7 @@ resource "oci_network_load_balancer_backend" "web_http" {
}
resource "oci_network_load_balancer_backend" "web_https" {
for_each = { for instances in data.oci_core_instance_pool_instances.web.instances.* : instances.display_name => instances.id }
for_each = local.lbv4_web_enable ? { for instances in data.oci_core_instance_pool_instances.web.instances.* : instances.display_name => instances.id } : {}
backend_set_name = oci_network_load_balancer_backend_set.web_https[0].name
network_load_balancer_id = oci_network_load_balancer_network_load_balancer.web[0].id

View File

@@ -1,75 +1,84 @@
# resource "oci_core_instance_pool" "workers" {
# compartment_id = var.compartment_ocid
# instance_configuration_id = oci_core_instance_configuration.workers.id
# size = 0
# state = "RUNNING"
# display_name = "${var.project}-workers"
resource "oci_core_instance_pool" "workers" {
compartment_id = var.compartment_ocid
instance_configuration_id = oci_core_instance_configuration.workers.id
size = lookup(var.instances[local.zone], "worker_count", 0)
state = "RUNNING"
display_name = "${var.project}-workers"
# placement_configurations {
# availability_domain = local.network_public["jNdv:eu-amsterdam-1-AD-1"].availability_domain
# fault_domains = data.oci_identity_fault_domains.fault_domains.fault_domains.*.name
# primary_subnet_id = local.network_public["jNdv:eu-amsterdam-1-AD-1"].id
# }
placement_configurations {
availability_domain = local.network_private[local.zone].availability_domain
fault_domains = data.oci_identity_fault_domains.domains.fault_domains.*.name
primary_subnet_id = local.network_private[local.zone].id
}
# lifecycle {
# ignore_changes = [
# size,
# state,
# defined_tags
# ]
# }
# }
lifecycle {
ignore_changes = [
state,
defined_tags
]
}
}
# resource "oci_core_instance_configuration" "workers" {
# compartment_id = var.compartment_ocid
# display_name = "${var.project}-workers"
resource "oci_core_instance_configuration" "workers" {
compartment_id = var.compartment_ocid
display_name = "${var.project}-workers"
# instance_details {
# instance_type = "compute"
instance_details {
instance_type = "compute"
# launch_details {
# compartment_id = var.compartment_ocid
# shape = "VM.Standard.E2.1.Micro"
# display_name = "${var.project}-workers"
# is_pv_encryption_in_transit_enabled = true
# preferred_maintenance_action = "LIVE_MIGRATE"
# launch_mode = "NATIVE"
launch_details {
compartment_id = var.compartment_ocid
display_name = "${var.project}-workers"
is_pv_encryption_in_transit_enabled = true
preferred_maintenance_action = "LIVE_MIGRATE"
launch_mode = "NATIVE"
# metadata = {
# ssh_authorized_keys = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDd+wfWIKi1dDZuCsd/zNw2n4WuHHa21N/Ltmo3umH2d local"
# }
shape = lookup(var.instances[local.zone], "worker_instance_shape", "VM.Standard.E2.1.Micro")
shape_config {
ocpus = lookup(var.instances[local.zone], "worker_instance_ocpus", 1)
memory_in_gbs = lookup(var.instances[local.zone], "worker_instance_memgb", 1)
}
# source_details {
# source_type = "image"
# image_id = data.oci_core_images.talos_x64.images[0].id
# boot_volume_size_in_gbs = "50"
# }
# create_vnic_details {
# display_name = "${var.project}-workers"
# assign_private_dns_record = false
# assign_public_ip = true
# nsg_ids = [local.nsg_talos, local.nsg_web]
# subnet_id = local.network_public["jNdv:eu-amsterdam-1-AD-1"].id
# }
metadata = {
user_data = base64encode(templatefile("${path.module}/templates/worker.yaml.tpl",
merge(var.kubernetes, {
lbv4 = local.lbv4_local
nodeSubnets = local.network_private[local.zone].cidr_block
})
))
}
# agent_config {
# is_management_disabled = false
# is_monitoring_disabled = false
# }
# launch_options {
# network_type = "PARAVIRTUALIZED"
# }
# instance_options {
# are_legacy_imds_endpoints_disabled = true
# }
# availability_config {
# recovery_action = "RESTORE_INSTANCE"
# }
# }
# }
source_details {
source_type = "image"
image_id = data.oci_core_images.talos_x64.images[0].id
boot_volume_size_in_gbs = "50"
}
create_vnic_details {
display_name = "${var.project}-workers"
assign_private_dns_record = false
assign_public_ip = false
nsg_ids = [local.nsg_talos, local.nsg_cilium]
subnet_id = local.network_private[local.zone].id
}
# lifecycle {
# create_before_destroy = "true"
# }
# }
agent_config {
is_management_disabled = false
is_monitoring_disabled = false
}
launch_options {
network_type = "PARAVIRTUALIZED"
}
instance_options {
are_legacy_imds_endpoints_disabled = true
}
availability_config {
recovery_action = "RESTORE_INSTANCE"
}
}
}
lifecycle {
create_before_destroy = "true"
}
}

View File

@@ -21,17 +21,37 @@ output "network_nat" {
output "network_lb" {
description = "The lb network"
value = oci_core_subnet.regional_lb
value = {
id = oci_core_subnet.regional_lb.id
cidr_block = oci_core_subnet.regional_lb.cidr_block
virtual_router_ip = oci_core_subnet.regional_lb.virtual_router_ip
ipv6cidr_block = oci_core_subnet.regional_lb.ipv6cidr_block
ipv6virtual_router_ip = oci_core_subnet.regional_lb.ipv6virtual_router_ip
}
}
output "network_public" {
description = "The public network"
value = oci_core_subnet.public
value = { for az, network in oci_core_subnet.public : az => {
id = network.id
cidr_block = network.cidr_block
virtual_router_ip = network.virtual_router_ip
ipv6cidr_block = network.ipv6cidr_block
ipv6virtual_router_ip = network.ipv6virtual_router_ip
availability_domain = network.availability_domain
} }
}
output "network_private" {
description = "The private network"
value = oci_core_subnet.private
value = { for az, network in oci_core_subnet.private : az => {
id = network.id
cidr_block = network.cidr_block
virtual_router_ip = network.virtual_router_ip
ipv6cidr_block = network.ipv6cidr_block
ipv6virtual_router_ip = network.ipv6virtual_router_ip
availability_domain = network.availability_domain
} }
}
output "nsg_cilium" {

View File

@@ -0,0 +1,53 @@
version: v1alpha1
debug: false
persist: true
machine:
type: worker
token: ${tokenMachine}
ca:
crt: ${caMachine}
certSANs: []
kubelet:
extraArgs:
cloud-provider: external
rotate-server-certificates: true
clusterDNS:
- 169.254.2.53
- 10.200.16.10
nodeIP:
validSubnets: ${format("%#v",split(",",nodeSubnets))}
network:
interfaces:
- interface: dummy0
addresses:
- 169.254.2.53/32
- fd00::169:254:2:53/128
install:
wipe: true
sysctls:
net.core.somaxconn: 65535
net.core.netdev_max_backlog: 4096
net.ipv4.tcp_keepalive_time: 600
net.ipv4.tcp_keepalive_intvl: 60
fs.inotify.max_user_instances: 256
systemDiskEncryption:
state:
provider: luks2
keys:
- nodeID: {}
slot: 0
time:
servers:
- 169.254.169.254
cluster:
controlPlane:
endpoint: https://${lbv4}:6443
clusterName: ${clusterName}
network:
dnsDomain: ${domain}
serviceSubnets: ${format("%#v",split(",",serviceSubnets))}
proxy:
disabled: true
token: ${token}
ca:
crt: ${ca}

View File

@@ -3,6 +3,9 @@ variable "compartment_ocid" {}
variable "tenancy_ocid" {}
variable "user_ocid" {}
variable "fingerprint" {}
variable "key_file" {
default = "~/.oci/oci_public.pem"
}
variable "project" {
type = string
@@ -26,16 +29,15 @@ locals {
project = data.terraform_remote_state.prepare.outputs.project
zone = data.terraform_remote_state.prepare.outputs.zones[0]
nsg_contolplane_lb = data.terraform_remote_state.prepare.outputs.nsg_contolplane_lb
network_lb = data.terraform_remote_state.prepare.outputs.network_lb
nsg_cilium = data.terraform_remote_state.prepare.outputs.nsg_cilium
nsg_talos = data.terraform_remote_state.prepare.outputs.nsg_talos
nsg_contolplane = data.terraform_remote_state.prepare.outputs.nsg_contolplane
nsg_web = data.terraform_remote_state.prepare.outputs.nsg_web
network_lb = data.terraform_remote_state.prepare.outputs.network_lb
network_public = data.terraform_remote_state.prepare.outputs.network_public
network_private = data.terraform_remote_state.prepare.outputs.network_private
nsg_contolplane_lb = data.terraform_remote_state.prepare.outputs.nsg_contolplane_lb
nsg_contolplane = data.terraform_remote_state.prepare.outputs.nsg_contolplane
nsg_web = data.terraform_remote_state.prepare.outputs.nsg_web
nsg_cilium = data.terraform_remote_state.prepare.outputs.nsg_cilium
nsg_talos = data.terraform_remote_state.prepare.outputs.nsg_talos
}
variable "kubernetes" {
@@ -52,7 +54,7 @@ variable "kubernetes" {
token = ""
ca = ""
}
# sensitive = true
sensitive = true
}
variable "controlplane" {
@@ -72,9 +74,9 @@ variable "instances" {
default = {
"jNdv:eu-amsterdam-1-AD-1" = {
web_count = 0,
web_instance_shape = "VM.Standard.E4.Flex",
web_instance_shape = "VM.Standard.E2.1.Micro",
web_instance_ocpus = 1,
web_instance_memgb = 3,
web_instance_memgb = 1,
worker_count = 0,
worker_instance_shape = "VM.Standard.E2.1.Micro",
worker_instance_ocpus = 1,