mirror of
https://github.com/optim-enterprises-bv/terraform-talos.git
synced 2025-11-01 18:58:39 +00:00
Local dns + lb-l7
This commit is contained in:
151
oracle/deployments/coredns-local.yaml
Normal file
151
oracle/deployments/coredns-local.yaml
Normal file
@@ -0,0 +1,151 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: coredns-local
|
||||
namespace: kube-system
|
||||
data:
|
||||
empty.db: |
|
||||
@ 60 IN SOA localnet. root.localnet. (
|
||||
1 ; serial
|
||||
60 ; refresh
|
||||
60 ; retry
|
||||
60 ; expiry
|
||||
60 ) ; minimum
|
||||
;
|
||||
@ IN NS localnet.
|
||||
|
||||
hosts: |
|
||||
# static hosts
|
||||
169.254.2.53 dns.local
|
||||
fd00::169:254:2:53 dns.local
|
||||
|
||||
Corefile.local: |
|
||||
(empty) {
|
||||
file /etc/coredns/empty.db
|
||||
}
|
||||
|
||||
.:53 {
|
||||
errors
|
||||
bind 169.254.2.53 fd00::169:254:2:53
|
||||
|
||||
health 127.0.0.1:8091 {
|
||||
lameduck 5s
|
||||
}
|
||||
|
||||
hosts /etc/coredns/hosts {
|
||||
reload 60s
|
||||
fallthrough
|
||||
}
|
||||
|
||||
kubernetes cluster.local in-addr.arpa ip6.arpa {
|
||||
endpoint https://api.cluster.local:6443
|
||||
kubeconfig /etc/coredns/kubeconfig.conf coredns
|
||||
pods insecure
|
||||
ttl 60
|
||||
}
|
||||
prometheus :9153
|
||||
|
||||
forward . /etc/resolv.conf {
|
||||
policy sequential
|
||||
expire 30s
|
||||
}
|
||||
|
||||
cache 300
|
||||
loop
|
||||
reload
|
||||
loadbalance
|
||||
}
|
||||
kubeconfig.conf: |-
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
server: https://api.cluster.local:6443
|
||||
name: default
|
||||
contexts:
|
||||
- context:
|
||||
cluster: default
|
||||
namespace: kube-system
|
||||
user: coredns
|
||||
name: coredns
|
||||
current-context: coredns
|
||||
users:
|
||||
- name: coredns
|
||||
user:
|
||||
tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: coredns-local
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns-local
|
||||
kubernetes.io/name: CoreDNS
|
||||
spec:
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
minReadySeconds: 15
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-dns-local
|
||||
kubernetes.io/name: CoreDNS
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns-local
|
||||
kubernetes.io/name: CoreDNS
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "9153"
|
||||
spec:
|
||||
priorityClassName: system-node-critical
|
||||
serviceAccount: coredns
|
||||
serviceAccountName: coredns
|
||||
enableServiceLinks: false
|
||||
tolerations:
|
||||
- key: node.cloudprovider.kubernetes.io/uninitialized
|
||||
effect: NoSchedule
|
||||
value: "true"
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: coredns
|
||||
image: coredns/coredns:1.8.6
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 64Mi
|
||||
args: [ "-conf", "/etc/coredns/Corefile.local" ]
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/coredns
|
||||
readOnly: true
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /health
|
||||
port: 8091
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
drop:
|
||||
- all
|
||||
readOnlyRootFilesystem: true
|
||||
dnsPolicy: Default
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: coredns-local
|
||||
@@ -57,7 +57,6 @@ resource "oci_core_instance" "contolplane" {
|
||||
are_all_plugins_disabled = true
|
||||
is_management_disabled = true
|
||||
is_monitoring_disabled = true
|
||||
|
||||
}
|
||||
availability_config {
|
||||
is_live_migration_preferred = true
|
||||
|
||||
@@ -12,6 +12,13 @@ resource "oci_core_instance_pool" "web" {
|
||||
primary_subnet_id = local.network_public[local.zone].id
|
||||
}
|
||||
|
||||
load_balancers {
|
||||
backend_set_name = oci_load_balancer_backend_set.web.name
|
||||
load_balancer_id = oci_load_balancer.web.id
|
||||
port = 80
|
||||
vnic_selection = "primaryvnic"
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
state,
|
||||
@@ -70,8 +77,9 @@ resource "oci_core_instance_configuration" "web" {
|
||||
}
|
||||
|
||||
agent_config {
|
||||
is_management_disabled = false
|
||||
is_monitoring_disabled = false
|
||||
are_all_plugins_disabled = true
|
||||
is_management_disabled = true
|
||||
is_monitoring_disabled = true
|
||||
}
|
||||
launch_options {
|
||||
network_type = "PARAVIRTUALIZED"
|
||||
@@ -90,10 +98,10 @@ resource "oci_core_instance_configuration" "web" {
|
||||
}
|
||||
}
|
||||
|
||||
data "oci_core_instance_pool_instances" "web" {
|
||||
compartment_id = var.compartment_ocid
|
||||
instance_pool_id = oci_core_instance_pool.web.id
|
||||
}
|
||||
# data "oci_core_instance_pool_instances" "web" {
|
||||
# compartment_id = var.compartment_ocid
|
||||
# instance_pool_id = oci_core_instance_pool.web.id
|
||||
# }
|
||||
|
||||
# locals {
|
||||
# lbv4_web_instances = local.lbv4_web_enable && length(data.oci_core_instance_pool_instances.web.instances) > 0
|
||||
@@ -104,32 +112,32 @@ data "oci_core_instance_pool_instances" "web" {
|
||||
# vnic_id = data.oci_core_vnic_attachments.contolplane[count.index].vnic_attachments[0]["vnic_id"]
|
||||
# }
|
||||
|
||||
resource "oci_network_load_balancer_backend" "web_http" {
|
||||
for_each = local.lbv4_web_enable ? { for instances in data.oci_core_instance_pool_instances.web.instances.* : instances.display_name => instances.id } : {}
|
||||
# resource "oci_network_load_balancer_backend" "web_http" {
|
||||
# for_each = local.lbv4_web_enable ? { for instances in data.oci_core_instance_pool_instances.web.instances.* : instances.display_name => instances.id } : {}
|
||||
|
||||
backend_set_name = oci_network_load_balancer_backend_set.web_http[0].name
|
||||
network_load_balancer_id = oci_network_load_balancer_network_load_balancer.web[0].id
|
||||
port = 80
|
||||
# backend_set_name = oci_network_load_balancer_backend_set.web_http[0].name
|
||||
# network_load_balancer_id = oci_network_load_balancer_network_load_balancer.web[0].id
|
||||
# port = 80
|
||||
|
||||
name = "web-http-lb"
|
||||
target_id = each.value
|
||||
# name = "web-http-lb"
|
||||
# target_id = each.value
|
||||
|
||||
depends_on = [
|
||||
oci_core_instance_pool.web
|
||||
]
|
||||
}
|
||||
# depends_on = [
|
||||
# oci_core_instance_pool.web
|
||||
# ]
|
||||
# }
|
||||
|
||||
resource "oci_network_load_balancer_backend" "web_https" {
|
||||
for_each = local.lbv4_web_enable ? { for instances in data.oci_core_instance_pool_instances.web.instances.* : instances.display_name => instances.id } : {}
|
||||
# resource "oci_network_load_balancer_backend" "web_https" {
|
||||
# for_each = local.lbv4_web_enable ? { for instances in data.oci_core_instance_pool_instances.web.instances.* : instances.display_name => instances.id } : {}
|
||||
|
||||
backend_set_name = oci_network_load_balancer_backend_set.web_https[0].name
|
||||
network_load_balancer_id = oci_network_load_balancer_network_load_balancer.web[0].id
|
||||
port = 443
|
||||
# backend_set_name = oci_network_load_balancer_backend_set.web_https[0].name
|
||||
# network_load_balancer_id = oci_network_load_balancer_network_load_balancer.web[0].id
|
||||
# port = 443
|
||||
|
||||
name = "web-https-lb"
|
||||
target_id = each.value
|
||||
# name = "web-https-lb"
|
||||
# target_id = each.value
|
||||
|
||||
depends_on = [
|
||||
oci_core_instance_pool.web
|
||||
]
|
||||
}
|
||||
# depends_on = [
|
||||
# oci_core_instance_pool.web
|
||||
# ]
|
||||
# }
|
||||
|
||||
@@ -1,90 +1,90 @@
|
||||
|
||||
resource "oci_core_instance_pool" "worker" {
|
||||
compartment_id = var.compartment_ocid
|
||||
instance_configuration_id = oci_core_instance_configuration.worker.id
|
||||
size = lookup(var.instances[local.zone], "worker_count", 0)
|
||||
state = "RUNNING"
|
||||
display_name = "${var.project}-worker"
|
||||
# resource "oci_core_instance_pool" "worker" {
|
||||
# compartment_id = var.compartment_ocid
|
||||
# instance_configuration_id = oci_core_instance_configuration.worker.id
|
||||
# size = lookup(var.instances[local.zone], "worker_count", 0)
|
||||
# state = "RUNNING"
|
||||
# display_name = "${var.project}-worker"
|
||||
|
||||
placement_configurations {
|
||||
availability_domain = local.network_private[local.zone].availability_domain
|
||||
fault_domains = data.oci_identity_fault_domains.domains.fault_domains.*.name
|
||||
primary_subnet_id = local.network_private[local.zone].id
|
||||
}
|
||||
# placement_configurations {
|
||||
# availability_domain = local.network_private[local.zone].availability_domain
|
||||
# fault_domains = data.oci_identity_fault_domains.domains.fault_domains.*.name
|
||||
# primary_subnet_id = local.network_private[local.zone].id
|
||||
# }
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
state,
|
||||
defined_tags
|
||||
]
|
||||
}
|
||||
}
|
||||
# lifecycle {
|
||||
# ignore_changes = [
|
||||
# state,
|
||||
# defined_tags
|
||||
# ]
|
||||
# }
|
||||
# }
|
||||
|
||||
locals {
|
||||
worker_labels = "topology.kubernetes.io/region=${var.region},topology.kubernetes.io/zone=${local.zone_label},project.io/node-pool=worker"
|
||||
}
|
||||
# locals {
|
||||
# worker_labels = "topology.kubernetes.io/region=${var.region},topology.kubernetes.io/zone=${local.zone_label},project.io/node-pool=worker"
|
||||
# }
|
||||
|
||||
resource "oci_core_instance_configuration" "worker" {
|
||||
compartment_id = var.compartment_ocid
|
||||
display_name = "${var.project}-worker"
|
||||
# resource "oci_core_instance_configuration" "worker" {
|
||||
# compartment_id = var.compartment_ocid
|
||||
# display_name = "${var.project}-worker"
|
||||
|
||||
instance_details {
|
||||
instance_type = "compute"
|
||||
# instance_details {
|
||||
# instance_type = "compute"
|
||||
|
||||
launch_details {
|
||||
compartment_id = var.compartment_ocid
|
||||
display_name = "${var.project}-worker"
|
||||
is_pv_encryption_in_transit_enabled = true
|
||||
preferred_maintenance_action = "LIVE_MIGRATE"
|
||||
launch_mode = "NATIVE"
|
||||
# launch_details {
|
||||
# compartment_id = var.compartment_ocid
|
||||
# display_name = "${var.project}-worker"
|
||||
# is_pv_encryption_in_transit_enabled = true
|
||||
# preferred_maintenance_action = "LIVE_MIGRATE"
|
||||
# launch_mode = "NATIVE"
|
||||
|
||||
shape = lookup(var.instances[local.zone], "worker_instance_shape", "VM.Standard.E2.1.Micro")
|
||||
shape_config {
|
||||
ocpus = lookup(var.instances[local.zone], "worker_instance_ocpus", 1)
|
||||
memory_in_gbs = lookup(var.instances[local.zone], "worker_instance_memgb", 1)
|
||||
}
|
||||
# shape = lookup(var.instances[local.zone], "worker_instance_shape", "VM.Standard.E2.1.Micro")
|
||||
# shape_config {
|
||||
# ocpus = lookup(var.instances[local.zone], "worker_instance_ocpus", 1)
|
||||
# memory_in_gbs = lookup(var.instances[local.zone], "worker_instance_memgb", 1)
|
||||
# }
|
||||
|
||||
metadata = {
|
||||
user_data = base64encode(templatefile("${path.module}/templates/worker.yaml.tpl",
|
||||
merge(var.kubernetes, {
|
||||
lbv4 = local.lbv4_local
|
||||
clusterDns = cidrhost(split(",", var.kubernetes["serviceSubnets"])[0], 10)
|
||||
nodeSubnets = local.network_private[local.zone].cidr_block
|
||||
labels = local.worker_labels
|
||||
})
|
||||
))
|
||||
}
|
||||
# metadata = {
|
||||
# user_data = base64encode(templatefile("${path.module}/templates/worker.yaml.tpl",
|
||||
# merge(var.kubernetes, {
|
||||
# lbv4 = local.lbv4_local
|
||||
# clusterDns = cidrhost(split(",", var.kubernetes["serviceSubnets"])[0], 10)
|
||||
# nodeSubnets = local.network_private[local.zone].cidr_block
|
||||
# labels = local.worker_labels
|
||||
# })
|
||||
# ))
|
||||
# }
|
||||
|
||||
source_details {
|
||||
source_type = "image"
|
||||
image_id = data.oci_core_images.talos_x64.images[0].id
|
||||
boot_volume_size_in_gbs = "50"
|
||||
}
|
||||
create_vnic_details {
|
||||
display_name = "${var.project}-worker"
|
||||
assign_private_dns_record = false
|
||||
assign_public_ip = false
|
||||
nsg_ids = [local.nsg_talos, local.nsg_cilium, local.nsg_worker]
|
||||
subnet_id = local.network_private[local.zone].id
|
||||
}
|
||||
# source_details {
|
||||
# source_type = "image"
|
||||
# image_id = data.oci_core_images.talos_x64.images[0].id
|
||||
# boot_volume_size_in_gbs = "50"
|
||||
# }
|
||||
# create_vnic_details {
|
||||
# display_name = "${var.project}-worker"
|
||||
# assign_private_dns_record = false
|
||||
# assign_public_ip = false
|
||||
# nsg_ids = [local.nsg_talos, local.nsg_cilium, local.nsg_worker]
|
||||
# subnet_id = local.network_private[local.zone].id
|
||||
# }
|
||||
|
||||
agent_config {
|
||||
is_management_disabled = false
|
||||
is_monitoring_disabled = false
|
||||
}
|
||||
launch_options {
|
||||
network_type = "PARAVIRTUALIZED"
|
||||
}
|
||||
instance_options {
|
||||
are_legacy_imds_endpoints_disabled = true
|
||||
}
|
||||
availability_config {
|
||||
recovery_action = "RESTORE_INSTANCE"
|
||||
}
|
||||
}
|
||||
}
|
||||
# agent_config {
|
||||
# is_management_disabled = false
|
||||
# is_monitoring_disabled = false
|
||||
# }
|
||||
# launch_options {
|
||||
# network_type = "PARAVIRTUALIZED"
|
||||
# }
|
||||
# instance_options {
|
||||
# are_legacy_imds_endpoints_disabled = true
|
||||
# }
|
||||
# availability_config {
|
||||
# recovery_action = "RESTORE_INSTANCE"
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = "true"
|
||||
}
|
||||
}
|
||||
# lifecycle {
|
||||
# create_before_destroy = "true"
|
||||
# }
|
||||
# }
|
||||
|
||||
35
oracle/network-lb-l7.tf
Normal file
35
oracle/network-lb-l7.tf
Normal file
@@ -0,0 +1,35 @@
|
||||
|
||||
resource "oci_load_balancer" "web" {
|
||||
compartment_id = var.compartment_ocid
|
||||
display_name = "${local.project}-web-lb-l7"
|
||||
shape = "flexible"
|
||||
shape_details {
|
||||
maximum_bandwidth_in_mbps = 10
|
||||
minimum_bandwidth_in_mbps = 10
|
||||
}
|
||||
|
||||
subnet_ids = [local.network_lb.id]
|
||||
network_security_group_ids = [local.nsg_web]
|
||||
}
|
||||
|
||||
resource "oci_load_balancer_listener" "web_http" {
|
||||
load_balancer_id = oci_load_balancer.web.id
|
||||
name = "${local.project}-web-http"
|
||||
default_backend_set_name = oci_load_balancer_backend_set.web.name
|
||||
port = 80
|
||||
protocol = "HTTP"
|
||||
}
|
||||
|
||||
resource "oci_load_balancer_backend_set" "web" {
|
||||
name = "${local.project}-web-lb-l7"
|
||||
load_balancer_id = oci_load_balancer.web.id
|
||||
policy = "ROUND_ROBIN"
|
||||
|
||||
health_checker {
|
||||
retries = 2
|
||||
protocol = "HTTP"
|
||||
port = 80
|
||||
url_path = "/healthz"
|
||||
return_code = 200
|
||||
}
|
||||
}
|
||||
@@ -1,11 +1,11 @@
|
||||
|
||||
locals {
|
||||
lbv4_enable = false
|
||||
lbv4_enable = true
|
||||
lbv4 = local.lbv4_enable ? [for ip in oci_network_load_balancer_network_load_balancer.contolplane[0].ip_addresses : ip.ip_address if ip.is_public][0] : "127.0.0.1"
|
||||
lbv4_local = local.lbv4_enable ? [for ip in oci_network_load_balancer_network_load_balancer.contolplane[0].ip_addresses : ip.ip_address if !ip.is_public][0] : cidrhost(local.network_public[local.zone].cidr_block, 11)
|
||||
|
||||
lbv4_web_enable = false
|
||||
lbv4_web = local.lbv4_web_enable ? [for ip in oci_network_load_balancer_network_load_balancer.web[0].ip_addresses : ip.ip_address if ip.is_public][0] : "127.0.0.1"
|
||||
lbv4_web = local.lbv4_web_enable ? [for ip in oci_network_load_balancer_network_load_balancer.web[0].ip_addresses : ip.ip_address if ip.is_public][0] : oci_load_balancer.web.ip_addresses[0]
|
||||
}
|
||||
|
||||
resource "oci_dns_rrset" "lbv4_local" {
|
||||
|
||||
@@ -14,7 +14,7 @@ machine:
|
||||
node-labels: ${labels}
|
||||
clusterDNS:
|
||||
- 169.254.2.53
|
||||
- 10.200.16.10
|
||||
- ${clusterDns}
|
||||
nodeIP:
|
||||
validSubnets: ${format("%#v",split(",",nodeSubnets))}
|
||||
network:
|
||||
|
||||
Reference in New Issue
Block a user