mirror of
https://github.com/optim-enterprises-bv/terraform-talos.git
synced 2025-10-29 17:42:47 +00:00
Merge branch 'main' of github.com:sergelogvinov/terraform-talos into main
This commit is contained in:
@@ -10,20 +10,22 @@ create-network: ## Create networks
|
||||
|
||||
create-lb: ## Create loadbalancer
|
||||
terraform init
|
||||
terraform apply -auto-approve -target=output.controlplane_endpoint
|
||||
terraform apply -auto-approve -target=output.controlplane_endpoint -target=oci_network_load_balancer_network_load_balancer.contolplane
|
||||
terraform apply -auto-approve -target=oci_network_load_balancer_network_load_balancer.web
|
||||
|
||||
create-config: ## Genereate talos configs
|
||||
talosctl gen config --output-dir _cfgs --with-docs=false --with-examples=false talos-k8s-oracle https://${ENDPOINT}:6443
|
||||
talosctl --talosconfig _cfgs/talosconfig config endpoint ${ENDPOINT}
|
||||
|
||||
create-templates:
|
||||
@yq ea -P '. as $$item ireduce ({}; . * $$item )' _cfgs/controlplane.yaml templates/controlplane.yaml.tpl > templates/controlplane.yaml
|
||||
@echo 'podSubnets: "10.32.0.0/12,fd00:10:32::/102"' > _cfgs/tfstate.vars
|
||||
@echo 'serviceSubnets: "10.200.0.0/22,fd40:10:200::/112"' >> _cfgs/tfstate.vars
|
||||
@echo 'nodeSubnets: "172.16.0.0/12"' >> _cfgs/tfstate.vars
|
||||
@echo 'apiDomain: api.cluster.local' >> _cfgs/tfstate.vars
|
||||
@yq eval '.cluster.network.dnsDomain' _cfgs/controlplane.yaml | awk '{ print "domain: "$$1}' >> _cfgs/tfstate.vars
|
||||
@yq eval '.cluster.clusterName' _cfgs/controlplane.yaml | awk '{ print "clusterName: "$$1}' >> _cfgs/tfstate.vars
|
||||
@yq eval '.cluster.id' _cfgs/controlplane.yaml | awk '{ print "clusterID: "$$1}' >> _cfgs/tfstate.vars
|
||||
@yq eval '.cluster.secret' _cfgs/controlplane.yaml | awk '{ print "clusterSecret: "$$1}'>> _cfgs/tfstate.vars
|
||||
@yq eval '.machine.token' _cfgs/controlplane.yaml | awk '{ print "tokenMachine: "$$1}' >> _cfgs/tfstate.vars
|
||||
@yq eval '.machine.ca.crt' _cfgs/controlplane.yaml | awk '{ print "caMachine: "$$1}' >> _cfgs/tfstate.vars
|
||||
@yq eval '.cluster.token' _cfgs/controlplane.yaml | awk '{ print "token: "$$1}' >> _cfgs/tfstate.vars
|
||||
@@ -31,8 +33,14 @@ create-templates:
|
||||
|
||||
@yq eval -o=json '{"kubernetes": .}' _cfgs/tfstate.vars > terraform.tfvars.json
|
||||
|
||||
create-controlplane: ## Bootstrap controlplane node
|
||||
terraform apply -auto-approve -target=oci_core_instance.controlplane
|
||||
|
||||
create-kubeconfig:
|
||||
talosctl --talosconfig _cfgs/talosconfig --nodes 172.16.1.11 kubeconfig
|
||||
talosctl --talosconfig _cfgs/talosconfig --nodes 172.16.1.11 kubeconfig .
|
||||
|
||||
create-deployments:
|
||||
helm template --namespace=kube-system --version=1.11.0 -f deployments/cilium.yaml cilium cilium/cilium > deployments/cilium_result.yaml
|
||||
helm template --namespace=kube-system --version=1.11.0 -f deployments/cilium.yaml cilium \
|
||||
cilium/cilium > deployments/cilium_result.yaml
|
||||
helm template --namespace=ingress-nginx --version=4.0.16 -f deployments/ingress.yaml ingress-nginx \
|
||||
ingress-nginx/ingress-nginx > deployments/ingress_result.yaml
|
||||
|
||||
@@ -30,6 +30,7 @@ data "oci_core_images" "talos_arm" {
|
||||
# }
|
||||
|
||||
data "oci_identity_fault_domains" "domains" {
|
||||
for_each = { for idx, ad in local.zones : ad => idx }
|
||||
compartment_id = var.compartment_ocid
|
||||
availability_domain = local.network_public[local.zone].availability_domain
|
||||
availability_domain = local.network_public[each.key].availability_domain
|
||||
}
|
||||
|
||||
4
oracle/deployments/ingress-ns.yaml
Normal file
4
oracle/deployments/ingress-ns.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: ingress-nginx
|
||||
118
oracle/deployments/ingress.yaml
Normal file
118
oracle/deployments/ingress.yaml
Normal file
@@ -0,0 +1,118 @@
|
||||
|
||||
controller:
|
||||
kind: DaemonSet
|
||||
|
||||
hostNetwork: true
|
||||
hostPort:
|
||||
enabled: false
|
||||
ports:
|
||||
http: 80
|
||||
https: 443
|
||||
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
|
||||
publishService:
|
||||
enabled: false
|
||||
|
||||
config:
|
||||
worker-processes: "auto"
|
||||
worker-cpu-affinity: "auto"
|
||||
error-log-level: "error"
|
||||
|
||||
server-tokens: "false"
|
||||
http-redirect-code: "301"
|
||||
|
||||
disable-ipv6-dns: "true"
|
||||
|
||||
use-gzip: "true"
|
||||
use-geoip: "false"
|
||||
use-geoip2: "false"
|
||||
|
||||
use-forwarded-headers: "true"
|
||||
# curl https://www.cloudflare.com/ips-v4 2>/dev/null | tr '\n' ','
|
||||
proxy-real-ip-cidr: "173.245.48.0/20,103.21.244.0/22,103.22.200.0/22,103.31.4.0/22,141.101.64.0/18,108.162.192.0/18,190.93.240.0/20,188.114.96.0/20,197.234.240.0/22,198.41.128.0/17,162.158.0.0/15,172.64.0.0/13,131.0.72.0/22,104.16.0.0/13,104.24.0.0/14,172.16.0.0/12"
|
||||
|
||||
enable-access-log-for-default-backend: "true"
|
||||
log-format-escape-json: "true"
|
||||
log-format-upstream: '{"ip":"$remote_addr", "ssl":"$ssl_protocol", "method":"$request_method", "proto":"$scheme", "host":"$host", "uri":"$request_uri", "status":$status, "size":$bytes_sent, "agent":"$http_user_agent", "referer":"$http_referer", "namespace":"$namespace"}'
|
||||
|
||||
upstream-keepalive-connections: "64"
|
||||
proxy-connect-timeout: "10"
|
||||
proxy-read-timeout: "120"
|
||||
proxy-send-timeout: "120"
|
||||
|
||||
ssl-protocols: "TLSv1.2 TLSv1.3"
|
||||
|
||||
http-snippet: |
|
||||
proxy_cache_path /tmp/static levels=1:2 use_temp_path=off keys_zone=static:16m inactive=24h max_size=512m;
|
||||
|
||||
hsts: "true"
|
||||
hsts-max-age: "31536000"
|
||||
hsts-include-subdomains: "true"
|
||||
hsts-preload: "true"
|
||||
proxy-hide-headers: "strict-transport-security"
|
||||
proxy-headers-hash-bucket-size: "128"
|
||||
|
||||
server-name-hash-bucket-size: "64"
|
||||
server-name-hash-max-size: "512"
|
||||
|
||||
limit-req-status-code: "429"
|
||||
|
||||
client-header-timeout: "30"
|
||||
client-body-timeout: "30"
|
||||
|
||||
minReadySeconds: 15
|
||||
|
||||
podAnnotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "10254"
|
||||
|
||||
extraEnvs:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
|
||||
livenessProbe:
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 30
|
||||
readinessProbe:
|
||||
periodSeconds: 30
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1
|
||||
memory: 1Gi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: project.io/node-pool
|
||||
operator: In
|
||||
values:
|
||||
- web
|
||||
|
||||
service:
|
||||
enabled: true
|
||||
type: ClusterIP
|
||||
clusterIP: None
|
||||
|
||||
admissionWebhooks:
|
||||
enabled: false
|
||||
metrics:
|
||||
enabled: false
|
||||
|
||||
revisionHistoryLimit: 2
|
||||
|
||||
defaultBackend:
|
||||
enabled: false
|
||||
440
oracle/deployments/ingress_result.yaml
Normal file
440
oracle/deployments/ingress_result.yaml
Normal file
@@ -0,0 +1,440 @@
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.0.16
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.1.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
automountServiceAccountToken: true
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-configmap.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.0.16
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.1.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
data:
|
||||
allow-snippet-annotations: "true"
|
||||
client-body-timeout: "30"
|
||||
client-header-timeout: "30"
|
||||
disable-ipv6-dns: "true"
|
||||
enable-access-log-for-default-backend: "true"
|
||||
error-log-level: "error"
|
||||
hsts: "true"
|
||||
hsts-include-subdomains: "true"
|
||||
hsts-max-age: "31536000"
|
||||
hsts-preload: "true"
|
||||
http-redirect-code: "301"
|
||||
http-snippet: "proxy_cache_path /tmp/static levels=1:2 use_temp_path=off keys_zone=static:16m inactive=24h max_size=512m;\n"
|
||||
limit-req-status-code: "429"
|
||||
log-format-escape-json: "true"
|
||||
log-format-upstream: "{\"ip\":\"$remote_addr\", \"ssl\":\"$ssl_protocol\", \"method\":\"$request_method\", \"proto\":\"$scheme\", \"host\":\"$host\", \"uri\":\"$request_uri\", \"status\":$status, \"size\":$bytes_sent, \"agent\":\"$http_user_agent\", \"referer\":\"$http_referer\", \"namespace\":\"$namespace\"}"
|
||||
proxy-connect-timeout: "10"
|
||||
proxy-headers-hash-bucket-size: "128"
|
||||
proxy-hide-headers: "strict-transport-security"
|
||||
proxy-read-timeout: "120"
|
||||
proxy-real-ip-cidr: "173.245.48.0/20,103.21.244.0/22,103.22.200.0/22,103.31.4.0/22,141.101.64.0/18,108.162.192.0/18,190.93.240.0/20,188.114.96.0/20,197.234.240.0/22,198.41.128.0/17,162.158.0.0/15,172.64.0.0/13,131.0.72.0/22,104.16.0.0/13,104.24.0.0/14,172.16.0.0/12"
|
||||
proxy-send-timeout: "120"
|
||||
server-name-hash-bucket-size: "64"
|
||||
server-name-hash-max-size: "512"
|
||||
server-tokens: "false"
|
||||
ssl-protocols: "TLSv1.2 TLSv1.3"
|
||||
upstream-keepalive-connections: "64"
|
||||
use-forwarded-headers: "true"
|
||||
use-geoip: "false"
|
||||
use-geoip2: "false"
|
||||
use-gzip: "true"
|
||||
worker-cpu-affinity: "auto"
|
||||
worker-processes: "auto"
|
||||
---
|
||||
# Source: ingress-nginx/templates/clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.0.16
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.1.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: ingress-nginx
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
- endpoints
|
||||
- nodes
|
||||
- pods
|
||||
- secrets
|
||||
- namespaces
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingressclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
# Source: ingress-nginx/templates/clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.0.16
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.1.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: ingress-nginx
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: ingress-nginx
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ingress-nginx
|
||||
namespace: "ingress-nginx"
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-role.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.0.16
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.1.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
- pods
|
||||
- secrets
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingressclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
resourceNames:
|
||||
- ingress-controller-leader
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-rolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.0.16
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.1.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: ingress-nginx
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ingress-nginx
|
||||
namespace: "ingress-nginx"
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-service.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.0.16
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.1.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
type: ClusterIP
|
||||
clusterIP: None
|
||||
ipFamilyPolicy: SingleStack
|
||||
ipFamilies:
|
||||
- IPv4
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
appProtocol: http
|
||||
- name: https
|
||||
port: 443
|
||||
protocol: TCP
|
||||
targetPort: https
|
||||
appProtocol: https
|
||||
selector:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-daemonset.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.0.16
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.1.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
revisionHistoryLimit: 2
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
minReadySeconds: 15
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/port: "10254"
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
spec:
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
containers:
|
||||
- name: controller
|
||||
image: "k8s.gcr.io/ingress-nginx/controller:v1.1.1@sha256:0bc88eb15f9e7f84e8e56c14fa5735aaa488b840983f87bd79b1054190e660de"
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /wait-shutdown
|
||||
args:
|
||||
- /nginx-ingress-controller
|
||||
- --election-id=ingress-controller-leader
|
||||
- --controller-class=k8s.io/ingress-nginx
|
||||
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
runAsUser: 101
|
||||
allowPrivilegeEscalation: true
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: LD_PRELOAD
|
||||
value: /usr/local/lib/libmimalloc.so
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
livenessProbe:
|
||||
failureThreshold: 5
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 30
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
readinessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 30
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
protocol: TCP
|
||||
- name: https
|
||||
containerPort: 443
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1
|
||||
memory: 1Gi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
hostNetwork: true
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: project.io/node-pool
|
||||
operator: In
|
||||
values:
|
||||
- web
|
||||
serviceAccountName: ingress-nginx
|
||||
terminationGracePeriodSeconds: 300
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-ingressclass.yaml
|
||||
# We don't support namespaced ingressClass yet
|
||||
# So a ClusterRole and a ClusterRoleBinding is required
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: IngressClass
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.0.16
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.1.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: nginx
|
||||
spec:
|
||||
controller: k8s.io/ingress-nginx
|
||||
@@ -28,7 +28,7 @@ resource "oci_core_image" "talos_amd64" {
|
||||
object_name = oci_objectstorage_object.talos_amd64.object
|
||||
|
||||
operating_system = "Talos"
|
||||
operating_system_version = "0.14.0"
|
||||
operating_system_version = "0.15.0"
|
||||
source_image_type = "QCOW2"
|
||||
}
|
||||
|
||||
@@ -50,7 +50,7 @@ resource "oci_core_image" "talos_arm64" {
|
||||
object_name = oci_objectstorage_object.talos_arm64.object
|
||||
|
||||
operating_system = "Talos"
|
||||
operating_system_version = "0.14.0"
|
||||
operating_system_version = "0.15.0"
|
||||
source_image_type = "QCOW2"
|
||||
}
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ variable "tenancy_ocid" {}
|
||||
variable "user_ocid" {}
|
||||
variable "fingerprint" {}
|
||||
variable "key_file" {
|
||||
default = "~/.oci/oci_public.pem"
|
||||
default = "~/.oci/oci_main_terraform_public.pem"
|
||||
}
|
||||
|
||||
variable "region" {
|
||||
|
||||
@@ -3,7 +3,7 @@ terraform {
|
||||
required_providers {
|
||||
oci = {
|
||||
source = "hashicorp/oci"
|
||||
version = "4.56.0"
|
||||
version = "4.61.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
5
oracle/init/Makefile
Normal file
5
oracle/init/Makefile
Normal file
@@ -0,0 +1,5 @@
|
||||
|
||||
init:
|
||||
terraform init
|
||||
terraform apply -target=null_resource.terraform_key -auto-approve
|
||||
terraform apply -auto-approve
|
||||
@@ -7,7 +7,6 @@ provider "oci" {
|
||||
tenancy_ocid = var.tenancy_ocid
|
||||
user_ocid = var.user_ocid
|
||||
fingerprint = var.fingerprint
|
||||
private_key_path = "~/.oci/oci_api_key.pem"
|
||||
|
||||
region = var.region
|
||||
private_key_path = var.key_file
|
||||
region = var.region
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
variable "tenancy_ocid" {}
|
||||
variable "user_ocid" {}
|
||||
variable "fingerprint" {}
|
||||
variable "key_file" {}
|
||||
variable "region" {
|
||||
description = "the OCI region where resources will be created"
|
||||
type = string
|
||||
|
||||
@@ -3,7 +3,7 @@ terraform {
|
||||
required_providers {
|
||||
oci = {
|
||||
source = "hashicorp/oci"
|
||||
version = "4.57.0"
|
||||
version = "4.61.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ resource "oci_core_ipv6" "contolplane" {
|
||||
}
|
||||
|
||||
locals {
|
||||
contolplane_labels = "topology.kubernetes.io/region=${var.region},topology.kubernetes.io/zone=${local.zone_label}"
|
||||
contolplane_labels = "topology.kubernetes.io/region=${var.region}"
|
||||
}
|
||||
|
||||
resource "oci_core_instance" "contolplane" {
|
||||
@@ -20,8 +20,8 @@ resource "oci_core_instance" "contolplane" {
|
||||
compartment_id = var.compartment_ocid
|
||||
display_name = "${local.project}-contolplane-${count.index + 1}"
|
||||
defined_tags = merge(var.tags, { "Kubernetes.Type" = "infra", "Kubernetes.Role" = "contolplane" })
|
||||
availability_domain = local.zone
|
||||
fault_domain = element(data.oci_identity_fault_domains.domains.fault_domains, count.index).name
|
||||
availability_domain = local.zones[count.index % local.zone_count]
|
||||
fault_domain = element(data.oci_identity_fault_domains.domains[element(local.zones, count.index)].fault_domains, floor(count.index / local.zone_count)).name
|
||||
|
||||
shape = lookup(var.controlplane, "type", "VM.Standard.E4.Flex")
|
||||
shape_config {
|
||||
@@ -35,7 +35,7 @@ resource "oci_core_instance" "contolplane" {
|
||||
name = "contolplane-${count.index + 1}"
|
||||
lbv4 = local.lbv4
|
||||
lbv4_local = local.lbv4_local
|
||||
nodeSubnets = local.network_public[local.zone].cidr_block
|
||||
nodeSubnets = local.network_public[element(local.zones, count.index)].cidr_block
|
||||
labels = local.contolplane_labels
|
||||
ccm = base64encode("useInstancePrincipals: true\nloadBalancer:\n disabled: true")
|
||||
})
|
||||
@@ -49,8 +49,8 @@ resource "oci_core_instance" "contolplane" {
|
||||
}
|
||||
create_vnic_details {
|
||||
assign_public_ip = true
|
||||
subnet_id = local.network_public[local.zone].id
|
||||
private_ip = cidrhost(local.network_public[local.zone].cidr_block, 11 + count.index)
|
||||
subnet_id = local.network_public[element(local.zones, count.index)].id
|
||||
private_ip = cidrhost(local.network_public[element(local.zones, count.index)].cidr_block, 11 + floor(count.index / local.zone_count))
|
||||
nsg_ids = [local.nsg_talos, local.nsg_cilium, local.nsg_contolplane]
|
||||
}
|
||||
|
||||
@@ -79,6 +79,7 @@ resource "oci_core_instance" "contolplane" {
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
fault_domain,
|
||||
shape_config,
|
||||
defined_tags,
|
||||
create_vnic_details["defined_tags"],
|
||||
|
||||
@@ -1,16 +1,17 @@
|
||||
|
||||
resource "oci_core_instance_pool" "web" {
|
||||
for_each = { for idx, ad in local.zones : ad => idx + 1 }
|
||||
compartment_id = var.compartment_ocid
|
||||
instance_configuration_id = oci_core_instance_configuration.web.id
|
||||
size = lookup(var.instances[local.zone], "web_count", 0)
|
||||
instance_configuration_id = oci_core_instance_configuration.web[each.key].id
|
||||
size = lookup(var.instances[each.key], "web_count", 0)
|
||||
state = "RUNNING"
|
||||
display_name = "${var.project}-web"
|
||||
display_name = "${var.project}-web-${each.value}"
|
||||
defined_tags = merge(var.tags, { "Kubernetes.Role" = "web" })
|
||||
|
||||
placement_configurations {
|
||||
availability_domain = local.network_public[local.zone].availability_domain
|
||||
fault_domains = data.oci_identity_fault_domains.domains.fault_domains.*.name
|
||||
primary_subnet_id = local.network_public[local.zone].id
|
||||
availability_domain = local.network_public[each.key].availability_domain
|
||||
fault_domains = data.oci_identity_fault_domains.domains[each.key].fault_domains.*.name
|
||||
primary_subnet_id = local.network_public[each.key].id
|
||||
}
|
||||
|
||||
load_balancers {
|
||||
@@ -19,22 +20,30 @@ resource "oci_core_instance_pool" "web" {
|
||||
port = 80
|
||||
vnic_selection = "primaryvnic"
|
||||
}
|
||||
load_balancers {
|
||||
backend_set_name = oci_load_balancer_backend_set.webs.name
|
||||
load_balancer_id = oci_load_balancer.web.id
|
||||
port = 443
|
||||
vnic_selection = "primaryvnic"
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
state,
|
||||
defined_tags
|
||||
defined_tags,
|
||||
load_balancers
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
web_labels = "topology.kubernetes.io/region=${var.region},topology.kubernetes.io/zone=${local.zone_label},project.io/node-pool=web"
|
||||
web_labels = "topology.kubernetes.io/region=${var.region},project.io/node-pool=web"
|
||||
}
|
||||
|
||||
resource "oci_core_instance_configuration" "web" {
|
||||
for_each = { for idx, ad in local.zones : ad => idx + 1 }
|
||||
compartment_id = var.compartment_ocid
|
||||
display_name = "${var.project}-web"
|
||||
display_name = "${var.project}-web-${each.value}"
|
||||
defined_tags = merge(var.tags, { "Kubernetes.Role" = "web" })
|
||||
|
||||
instance_details {
|
||||
@@ -47,10 +56,10 @@ resource "oci_core_instance_configuration" "web" {
|
||||
preferred_maintenance_action = "LIVE_MIGRATE"
|
||||
launch_mode = "NATIVE"
|
||||
|
||||
shape = lookup(var.instances[local.zone], "web_instance_shape", "VM.Standard.E2.1.Micro")
|
||||
shape = lookup(var.instances[each.key], "web_instance_shape", "VM.Standard.E2.1.Micro")
|
||||
shape_config {
|
||||
ocpus = lookup(var.instances[local.zone], "web_instance_ocpus", 1)
|
||||
memory_in_gbs = lookup(var.instances[local.zone], "web_instance_memgb", 1)
|
||||
ocpus = lookup(var.instances[each.key], "web_instance_ocpus", 1)
|
||||
memory_in_gbs = lookup(var.instances[each.key], "web_instance_memgb", 1)
|
||||
}
|
||||
|
||||
metadata = {
|
||||
@@ -58,8 +67,8 @@ resource "oci_core_instance_configuration" "web" {
|
||||
merge(var.kubernetes, {
|
||||
lbv4 = local.lbv4_local
|
||||
clusterDns = cidrhost(split(",", var.kubernetes["serviceSubnets"])[0], 10)
|
||||
nodeSubnets = local.network_public[local.zone].cidr_block
|
||||
labels = local.web_labels
|
||||
nodeSubnets = local.network_public[each.key].cidr_block
|
||||
labels = "${local.web_labels},topology.kubernetes.io/zone=${split(":", each.key)[1]}"
|
||||
})
|
||||
))
|
||||
}
|
||||
@@ -74,7 +83,7 @@ resource "oci_core_instance_configuration" "web" {
|
||||
assign_private_dns_record = false
|
||||
assign_public_ip = true
|
||||
nsg_ids = [local.nsg_talos, local.nsg_cilium, local.nsg_web]
|
||||
subnet_id = local.network_public[local.zone].id
|
||||
subnet_id = local.network_public[each.key].id
|
||||
skip_source_dest_check = true
|
||||
}
|
||||
|
||||
|
||||
@@ -24,7 +24,15 @@ resource "oci_load_balancer_listener" "web_http" {
|
||||
name = "${local.project}-web-http"
|
||||
default_backend_set_name = oci_load_balancer_backend_set.web.name
|
||||
port = 80
|
||||
protocol = "HTTP"
|
||||
protocol = "TCP"
|
||||
}
|
||||
|
||||
resource "oci_load_balancer_listener" "web_https" {
|
||||
load_balancer_id = oci_load_balancer.web.id
|
||||
name = "${local.project}-web-https"
|
||||
default_backend_set_name = oci_load_balancer_backend_set.webs.name
|
||||
port = 443
|
||||
protocol = "TCP"
|
||||
}
|
||||
|
||||
resource "oci_load_balancer_backend_set" "web" {
|
||||
@@ -40,3 +48,17 @@ resource "oci_load_balancer_backend_set" "web" {
|
||||
return_code = 200
|
||||
}
|
||||
}
|
||||
|
||||
resource "oci_load_balancer_backend_set" "webs" {
|
||||
name = "${local.project}-webs-lb-l7"
|
||||
load_balancer_id = oci_load_balancer.web.id
|
||||
policy = "ROUND_ROBIN"
|
||||
|
||||
health_checker {
|
||||
retries = 2
|
||||
protocol = "HTTP"
|
||||
port = 80
|
||||
url_path = "/healthz"
|
||||
return_code = 200
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
locals {
|
||||
lbv4_enable = true
|
||||
lbv4 = local.lbv4_enable ? [for ip in oci_network_load_balancer_network_load_balancer.contolplane[0].ip_addresses : ip.ip_address if ip.is_public][0] : "127.0.0.1"
|
||||
lbv4_local = local.lbv4_enable ? [for ip in oci_network_load_balancer_network_load_balancer.contolplane[0].ip_addresses : ip.ip_address if !ip.is_public][0] : cidrhost(local.network_public[local.zone].cidr_block, 11)
|
||||
lbv4_local = local.lbv4_enable ? [for ip in oci_network_load_balancer_network_load_balancer.contolplane[0].ip_addresses : ip.ip_address if !ip.is_public][0] : cidrhost(local.network_public[0].cidr_block, 11)
|
||||
|
||||
lbv4_web_enable = false
|
||||
lbv4_web = local.lbv4_web_enable ? [for ip in oci_network_load_balancer_network_load_balancer.web[0].ip_addresses : ip.ip_address if ip.is_public][0] : oci_load_balancer.web.ip_addresses[0]
|
||||
|
||||
@@ -342,7 +342,7 @@ resource "oci_core_network_security_group_security_rule" "web_kubelet" {
|
||||
}
|
||||
}
|
||||
}
|
||||
resource "oci_core_network_security_group_security_rule" "web_http_health_check" {
|
||||
resource "oci_core_network_security_group_security_rule" "web_http_lb" {
|
||||
for_each = toset([oci_core_vcn.main.cidr_block])
|
||||
|
||||
network_security_group_id = oci_core_network_security_group.web.id
|
||||
@@ -358,6 +358,23 @@ resource "oci_core_network_security_group_security_rule" "web_http_health_check"
|
||||
}
|
||||
}
|
||||
}
|
||||
resource "oci_core_network_security_group_security_rule" "web_https_lb" {
|
||||
for_each = toset([oci_core_vcn.main.cidr_block])
|
||||
|
||||
network_security_group_id = oci_core_network_security_group.web.id
|
||||
protocol = "6"
|
||||
direction = "INGRESS"
|
||||
source = each.value
|
||||
stateless = false
|
||||
|
||||
tcp_options {
|
||||
destination_port_range {
|
||||
min = 443
|
||||
max = 443
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "oci_core_network_security_group_security_rule" "web_http_admin" {
|
||||
for_each = toset(var.whitelist_admins)
|
||||
|
||||
@@ -374,6 +391,22 @@ resource "oci_core_network_security_group_security_rule" "web_http_admin" {
|
||||
}
|
||||
}
|
||||
}
|
||||
resource "oci_core_network_security_group_security_rule" "web_https_admin" {
|
||||
for_each = toset(var.whitelist_admins)
|
||||
|
||||
network_security_group_id = oci_core_network_security_group.web.id
|
||||
protocol = "6"
|
||||
direction = "INGRESS"
|
||||
source = each.value
|
||||
stateless = false
|
||||
|
||||
tcp_options {
|
||||
destination_port_range {
|
||||
min = 443
|
||||
max = 443
|
||||
}
|
||||
}
|
||||
}
|
||||
resource "oci_core_network_security_group_security_rule" "web_http" {
|
||||
for_each = toset(var.whitelist_web)
|
||||
|
||||
|
||||
@@ -135,8 +135,8 @@ resource "oci_core_subnet" "public" {
|
||||
resource "oci_core_subnet" "private" {
|
||||
for_each = { for idx, ad in local.zones : ad => idx }
|
||||
|
||||
cidr_block = cidrsubnet(oci_core_vcn.main.cidr_block, 8, each.value + 4)
|
||||
ipv6cidr_block = cidrsubnet(oci_core_vcn.main.ipv6cidr_blocks[0], 8, each.value + 11)
|
||||
cidr_block = cidrsubnet(oci_core_vcn.main.cidr_block, 8, each.value + 8)
|
||||
ipv6cidr_block = cidrsubnet(oci_core_vcn.main.ipv6cidr_blocks[0], 8, each.value + 16)
|
||||
compartment_id = var.compartment_ocid
|
||||
vcn_id = oci_core_vcn.main.id
|
||||
route_table_id = oci_core_route_table.private.id
|
||||
|
||||
@@ -4,7 +4,7 @@ variable "tenancy_ocid" {}
|
||||
variable "user_ocid" {}
|
||||
variable "fingerprint" {}
|
||||
variable "key_file" {
|
||||
default = "~/.oci/oci_public.pem"
|
||||
default = "~/.oci/oci_main_terraform_public.pem"
|
||||
}
|
||||
|
||||
variable "project" {
|
||||
@@ -31,7 +31,6 @@ variable "kubernetes" {
|
||||
default = {
|
||||
podSubnets = "10.32.0.0/12,fd40:10:32::/102"
|
||||
serviceSubnets = "10.200.0.0/22,fd40:10:200::/112"
|
||||
nodeSubnets = "192.168.0.0/16"
|
||||
domain = "cluster.local"
|
||||
apiDomain = "api.cluster.local"
|
||||
clusterName = "talos-k8s-oracle"
|
||||
|
||||
@@ -34,6 +34,8 @@ machine:
|
||||
servers:
|
||||
- 169.254.169.254
|
||||
cluster:
|
||||
id: ${clusterID}
|
||||
secret: ${clusterSecret}
|
||||
controlPlane:
|
||||
endpoint: https://${lbv4_local}:6443
|
||||
network:
|
||||
@@ -75,3 +77,5 @@ cluster:
|
||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/oracle/deployments/kubelet-serving-cert-approver.yaml
|
||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/oracle/deployments/metrics-server.yaml
|
||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/oracle/deployments/local-path-storage.yaml
|
||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/oracle/deployments/ingress-ns.yaml
|
||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/oracle/deployments/ingress_result.yaml
|
||||
|
||||
@@ -45,6 +45,8 @@ machine:
|
||||
servers:
|
||||
- 169.254.169.254
|
||||
cluster:
|
||||
id: ${clusterID}
|
||||
secret: ${clusterSecret}
|
||||
controlPlane:
|
||||
endpoint: https://${lbv4}:6443
|
||||
clusterName: ${clusterName}
|
||||
|
||||
@@ -41,6 +41,8 @@ machine:
|
||||
servers:
|
||||
- 169.254.169.254
|
||||
cluster:
|
||||
id: ${clusterID}
|
||||
secret: ${clusterSecret}
|
||||
controlPlane:
|
||||
endpoint: https://${lbv4}:6443
|
||||
clusterName: ${clusterName}
|
||||
|
||||
@@ -34,9 +34,10 @@ data "terraform_remote_state" "prepare" {
|
||||
}
|
||||
|
||||
locals {
|
||||
project = data.terraform_remote_state.prepare.outputs.project
|
||||
zone = data.terraform_remote_state.prepare.outputs.zones[0]
|
||||
zone_label = split(":", local.zone)[1]
|
||||
project = data.terraform_remote_state.prepare.outputs.project
|
||||
zones = data.terraform_remote_state.prepare.outputs.zones
|
||||
zone_count = length(local.zones)
|
||||
|
||||
dns_zone_id = data.terraform_remote_state.prepare.outputs.dns_zone_id
|
||||
|
||||
network_lb = data.terraform_remote_state.prepare.outputs.network_lb
|
||||
@@ -55,11 +56,12 @@ variable "kubernetes" {
|
||||
type = map(string)
|
||||
default = {
|
||||
podSubnets = "10.32.0.0/12,fd40:10:32::/102"
|
||||
serviceSubnets = "10.200.0.0/22,fd40:10:200::/112"
|
||||
nodeSubnets = "192.168.0.0/16"
|
||||
serviceSubnets = "10.200.0.0/22,fd40:10:200::/112",
|
||||
domain = "cluster.local"
|
||||
apiDomain = "api.cluster.local"
|
||||
clusterName = "talos-k8s-oracle"
|
||||
clusterID = ""
|
||||
clusterSecret = ""
|
||||
tokenMachine = ""
|
||||
caMachine = ""
|
||||
token = ""
|
||||
|
||||
@@ -31,7 +31,7 @@ variable "kubernetes" {
|
||||
nodeSubnets = "192.168.0.0/16"
|
||||
domain = "cluster.local"
|
||||
apiDomain = "api.cluster.local"
|
||||
clusterName = "talos-k8s-hetzner"
|
||||
clusterName = "talos-k8s-scaleway"
|
||||
clusterID = ""
|
||||
clusterSecret = ""
|
||||
tokenMachine = ""
|
||||
|
||||
Reference in New Issue
Block a user