mirror of
https://github.com/optim-enterprises-bv/terraform-talos.git
synced 2025-10-29 01:22:29 +00:00
remove modules
This commit is contained in:
@@ -21,6 +21,7 @@ output "network" {
|
||||
nat = try(azurerm_public_ip.nat[region].ip_address, "")
|
||||
dns = try(azurerm_private_dns_zone.main[0].name, "")
|
||||
peering = try(azurerm_linux_virtual_machine.router[region].private_ip_addresses, [])
|
||||
cidr = azurerm_virtual_network.main[region].address_space
|
||||
} }
|
||||
}
|
||||
|
||||
|
||||
@@ -47,6 +47,7 @@ machine:
|
||||
%{endif}
|
||||
time:
|
||||
servers:
|
||||
- 2.europe.pool.ntp.org
|
||||
- time.cloudflare.com
|
||||
install:
|
||||
wipe: false
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
|
||||
CLUSTERNAME := "talos-k8s-openstack"
|
||||
CPFIRST := ${shell terraform output -raw controlplane_endpoint 2>/dev/null}
|
||||
ENDPOINT:=${shell terraform output -raw controlplane_endpoint_public 2>/dev/null}
|
||||
ifeq ($(ENDPOINT),)
|
||||
ENDPOINT := 127.0.0.1
|
||||
@@ -13,14 +15,12 @@ create-lb: ## Create load balancer
|
||||
terraform refresh
|
||||
|
||||
create-config: ## Genereate talos configs
|
||||
talosctl gen config --output-dir _cfgs --with-docs=false --with-examples=false talos-k8s-openstack https://${ENDPOINT}:6443
|
||||
talosctl gen config --output-dir _cfgs --with-docs=false --with-examples=false ${CLUSTERNAME} https://${ENDPOINT}:6443
|
||||
talosctl --talosconfig _cfgs/talosconfig config endpoint ${ENDPOINT}
|
||||
|
||||
create-templates:
|
||||
@yq ea -P '. as $$item ireduce ({}; . * $$item )' _cfgs/controlplane.yaml templates/controlplane.yaml.tpl > templates/controlplane.yaml
|
||||
@echo 'podSubnets: "10.32.0.0/12,fd00:10:32::/102"' > _cfgs/tfstate.vars
|
||||
@echo 'serviceSubnets: "10.200.0.0/22,fd40:10:200::/112"' >> _cfgs/tfstate.vars
|
||||
@echo 'nodeSubnets: "172.16.0.0/12"' >> _cfgs/tfstate.vars
|
||||
@echo 'apiDomain: api.cluster.local' >> _cfgs/tfstate.vars
|
||||
@yq eval '.cluster.network.dnsDomain' _cfgs/controlplane.yaml | awk '{ print "domain: "$$1}' >> _cfgs/tfstate.vars
|
||||
@yq eval '.cluster.clusterName' _cfgs/controlplane.yaml | awk '{ print "clusterName: "$$1}' >> _cfgs/tfstate.vars
|
||||
@@ -33,26 +33,27 @@ create-templates:
|
||||
|
||||
@yq eval -o=json '{"kubernetes": .}' _cfgs/tfstate.vars > terraform.tfvars.json
|
||||
|
||||
create-controlplane-bootstrap:
|
||||
talosctl --talosconfig _cfgs/talosconfig config endpoint ${ENDPOINT}
|
||||
talosctl --talosconfig _cfgs/talosconfig --nodes ${ENDPOINT} bootstrap
|
||||
|
||||
create-deployments:
|
||||
helm template --namespace=kube-system --version=1.12.7 -f deployments/cilium.yaml cilium \
|
||||
cilium/cilium > deployments/cilium-result.yaml
|
||||
helm template --namespace=ingress-nginx --version=4.4.0 -f deployments/ingress.yaml ingress-nginx \
|
||||
ingress-nginx/ingress-nginx > deployments/ingress-result.yaml
|
||||
helm template --namespace=kube-system --version=2.27.1 -f deployments/openstack-cloud-controller-manager.yaml openstack-cloud-controller-manager \
|
||||
cpo/openstack-cloud-controller-manager > deployments/openstack-cloud-controller-manager-result.yaml
|
||||
|
||||
helm template --namespace=kube-system --version=2.27.1 -f deployments/openstack-csi.yaml openstack-cloud-controller-manager \
|
||||
cpo/openstack-cinder-csi > deployments/openstack-cinder-csi-result.yaml
|
||||
|
||||
create-network: ## Create networks
|
||||
cd prepare && terraform init && terraform apply -auto-approve
|
||||
|
||||
create-controlplane-bootstrap:
|
||||
talosctl --talosconfig _cfgs/talosconfig config endpoint ${ENDPOINT}
|
||||
talosctl --talosconfig _cfgs/talosconfig --nodes ${CPFIRST} bootstrap
|
||||
|
||||
create-controlplane: ## Bootstrap controlplane
|
||||
terraform apply -target=module.controlplane
|
||||
talosctl --talosconfig _cfgs/talosconfig config endpoint ${ENDPOINT}
|
||||
talosctl --talosconfig _cfgs/talosconfig --nodes ${ENDPOINT} bootstrap
|
||||
|
||||
create-kubeconfig: ## Download kubeconfig
|
||||
talosctl --talosconfig _cfgs/talosconfig --nodes ${ENDPOINT} kubeconfig .
|
||||
talosctl --talosconfig _cfgs/talosconfig --nodes ${CPFIRST} kubeconfig .
|
||||
kubectl --kubeconfig=kubeconfig config set clusters.talos-k8s-openstack.server https://${ENDPOINT}:6443
|
||||
kubectl --kubeconfig=kubeconfig config set-context --current --namespace=kube-system
|
||||
|
||||
|
||||
@@ -1,889 +0,0 @@
|
||||
---
|
||||
# Source: cilium/templates/cilium-agent/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: "cilium"
|
||||
namespace: kube-system
|
||||
---
|
||||
# Source: cilium/templates/cilium-operator/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: "cilium-operator"
|
||||
namespace: kube-system
|
||||
---
|
||||
# Source: cilium/templates/cilium-configmap.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cilium-config
|
||||
namespace: kube-system
|
||||
data:
|
||||
|
||||
# Identity allocation mode selects how identities are shared between cilium
|
||||
# nodes by setting how they are stored. The options are "crd" or "kvstore".
|
||||
# - "crd" stores identities in kubernetes as CRDs (custom resource definition).
|
||||
# These can be queried with:
|
||||
# kubectl get ciliumid
|
||||
# - "kvstore" stores identities in an etcd kvstore, that is
|
||||
# configured below. Cilium versions before 1.6 supported only the kvstore
|
||||
# backend. Upgrades from these older cilium versions should continue using
|
||||
# the kvstore by commenting out the identity-allocation-mode below, or
|
||||
# setting it to "kvstore".
|
||||
identity-allocation-mode: crd
|
||||
cilium-endpoint-gc-interval: "5m0s"
|
||||
nodes-gc-interval: "5m0s"
|
||||
skip-cnp-status-startup-clean: "false"
|
||||
# Disable the usage of CiliumEndpoint CRD
|
||||
disable-endpoint-crd: "false"
|
||||
|
||||
# If you want to run cilium in debug mode change this value to true
|
||||
debug: "false"
|
||||
# The agent can be put into the following three policy enforcement modes
|
||||
# default, always and never.
|
||||
# https://docs.cilium.io/en/latest/policy/intro/#policy-enforcement-modes
|
||||
enable-policy: "default"
|
||||
# If you want metrics enabled in all of your Cilium agents, set the port for
|
||||
# which the Cilium agents will have their metrics exposed.
|
||||
# This option deprecates the "prometheus-serve-addr" in the
|
||||
# "cilium-metrics-config" ConfigMap
|
||||
# NOTE that this will open the port on ALL nodes where Cilium pods are
|
||||
# scheduled.
|
||||
prometheus-serve-addr: ":9962"
|
||||
# Port to expose Envoy metrics (e.g. "9964"). Envoy metrics listener will be disabled if this
|
||||
# field is not set.
|
||||
proxy-prometheus-port: "9964"
|
||||
|
||||
# Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4
|
||||
# address.
|
||||
enable-ipv4: "true"
|
||||
|
||||
# Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6
|
||||
# address.
|
||||
enable-ipv6: "true"
|
||||
# Users who wish to specify their own custom CNI configuration file must set
|
||||
# custom-cni-conf to "true", otherwise Cilium may overwrite the configuration.
|
||||
custom-cni-conf: "false"
|
||||
enable-bpf-clock-probe: "true"
|
||||
# If you want cilium monitor to aggregate tracing for packets, set this level
|
||||
# to "low", "medium", or "maximum". The higher the level, the less packets
|
||||
# that will be seen in monitor output.
|
||||
monitor-aggregation: medium
|
||||
|
||||
# The monitor aggregation interval governs the typical time between monitor
|
||||
# notification events for each allowed connection.
|
||||
#
|
||||
# Only effective when monitor aggregation is set to "medium" or higher.
|
||||
monitor-aggregation-interval: 5s
|
||||
|
||||
# The monitor aggregation flags determine which TCP flags which, upon the
|
||||
# first observation, cause monitor notifications to be generated.
|
||||
#
|
||||
# Only effective when monitor aggregation is set to "medium" or higher.
|
||||
monitor-aggregation-flags: all
|
||||
# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic
|
||||
# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps.
|
||||
bpf-map-dynamic-size-ratio: "0.0025"
|
||||
# bpf-policy-map-max specifies the maximum number of entries in endpoint
|
||||
# policy map (per endpoint)
|
||||
bpf-policy-map-max: "16384"
|
||||
# bpf-lb-map-max specifies the maximum number of entries in bpf lb service,
|
||||
# backend and affinity maps.
|
||||
bpf-lb-map-max: "65536"
|
||||
# bpf-lb-bypass-fib-lookup instructs Cilium to enable the FIB lookup bypass
|
||||
# optimization for nodeport reverse NAT handling.
|
||||
bpf-lb-external-clusterip: "false"
|
||||
|
||||
# Pre-allocation of map entries allows per-packet latency to be reduced, at
|
||||
# the expense of up-front memory allocation for the entries in the maps. The
|
||||
# default value below will minimize memory usage in the default installation;
|
||||
# users who are sensitive to latency may consider setting this to "true".
|
||||
#
|
||||
# This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore
|
||||
# this option and behave as though it is set to "true".
|
||||
#
|
||||
# If this value is modified, then during the next Cilium startup the restore
|
||||
# of existing endpoints and tracking of ongoing connections may be disrupted.
|
||||
# As a result, reply packets may be dropped and the load-balancing decisions
|
||||
# for established connections may change.
|
||||
#
|
||||
# If this option is set to "false" during an upgrade from 1.3 or earlier to
|
||||
# 1.4 or later, then it may cause one-time disruptions during the upgrade.
|
||||
preallocate-bpf-maps: "false"
|
||||
|
||||
# Regular expression matching compatible Istio sidecar istio-proxy
|
||||
# container image names
|
||||
sidecar-istio-proxy-image: "cilium/istio_proxy"
|
||||
|
||||
# Name of the cluster. Only relevant when building a mesh of clusters.
|
||||
cluster-name: default
|
||||
# Unique ID of the cluster. Must be unique across all conneted clusters and
|
||||
# in the range of 1 and 255. Only relevant when building a mesh of clusters.
|
||||
cluster-id: "0"
|
||||
|
||||
# Encapsulation mode for communication between nodes
|
||||
# Possible values:
|
||||
# - disabled
|
||||
# - vxlan (default)
|
||||
# - geneve
|
||||
tunnel: "vxlan"
|
||||
# Enables L7 proxy for L7 policy enforcement and visibility
|
||||
enable-l7-proxy: "true"
|
||||
|
||||
enable-ipv4-masquerade: "true"
|
||||
enable-ipv6-masquerade: "true"
|
||||
enable-bpf-masquerade: "false"
|
||||
|
||||
enable-xt-socket-fallback: "true"
|
||||
install-iptables-rules: "true"
|
||||
install-no-conntrack-iptables-rules: "false"
|
||||
|
||||
auto-direct-node-routes: "false"
|
||||
enable-local-redirect-policy: "true"
|
||||
enable-host-firewall: "true"
|
||||
# List of devices used to attach bpf_host.o (implements BPF NodePort,
|
||||
# host-firewall and BPF masquerading)
|
||||
devices: "eth+"
|
||||
|
||||
kube-proxy-replacement: "strict"
|
||||
kube-proxy-replacement-healthz-bind-address: ""
|
||||
bpf-lb-sock: "false"
|
||||
host-reachable-services-protos:
|
||||
enable-health-check-nodeport: "true"
|
||||
node-port-bind-protection: "true"
|
||||
enable-auto-protect-node-port-range: "true"
|
||||
enable-svc-source-range-check: "true"
|
||||
enable-l2-neigh-discovery: "true"
|
||||
arping-refresh-period: "30s"
|
||||
k8s-require-ipv4-pod-cidr: "true"
|
||||
k8s-require-ipv6-pod-cidr: "true"
|
||||
enable-endpoint-health-checking: "true"
|
||||
enable-health-checking: "true"
|
||||
enable-well-known-identities: "false"
|
||||
enable-remote-node-identity: "true"
|
||||
synchronize-k8s-nodes: "true"
|
||||
operator-api-serve-addr: "127.0.0.1:9234"
|
||||
ipam: "kubernetes"
|
||||
disable-cnp-status-updates: "true"
|
||||
enable-vtep: "false"
|
||||
vtep-endpoint: ""
|
||||
vtep-cidr: ""
|
||||
vtep-mask: ""
|
||||
vtep-mac: ""
|
||||
enable-k8s-endpoint-slice: "true"
|
||||
enable-bgp-control-plane: "false"
|
||||
bpf-root: "/sys/fs/bpf"
|
||||
cgroup-root: "/sys/fs/cgroup"
|
||||
enable-k8s-terminating-endpoint: "true"
|
||||
remove-cilium-node-taints: "true"
|
||||
set-cilium-is-up-condition: "true"
|
||||
unmanaged-pod-watcher-interval: "15"
|
||||
tofqdns-dns-reject-response-code: "refused"
|
||||
tofqdns-enable-dns-compression: "true"
|
||||
tofqdns-endpoint-max-ip-per-hostname: "50"
|
||||
tofqdns-idle-connection-grace-period: "0s"
|
||||
tofqdns-max-deferred-connection-deletes: "10000"
|
||||
tofqdns-min-ttl: "3600"
|
||||
tofqdns-proxy-response-max-delay: "100ms"
|
||||
agent-not-ready-taint-key: "node.cilium.io/agent-not-ready"
|
||||
---
|
||||
# Source: cilium/templates/cilium-agent/clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: cilium
|
||||
rules:
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- discovery.k8s.io
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
- services
|
||||
- pods
|
||||
- endpoints
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
# This is used when validating policies in preflight. This will need to stay
|
||||
# until we figure out how to avoid "get" inside the preflight, and then
|
||||
# should be removed ideally.
|
||||
- get
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumbgploadbalancerippools
|
||||
- ciliumbgppeeringpolicies
|
||||
- ciliumclusterwideenvoyconfigs
|
||||
- ciliumclusterwidenetworkpolicies
|
||||
- ciliumegressgatewaypolicies
|
||||
- ciliumegressnatpolicies
|
||||
- ciliumendpoints
|
||||
- ciliumendpointslices
|
||||
- ciliumenvoyconfigs
|
||||
- ciliumidentities
|
||||
- ciliumlocalredirectpolicies
|
||||
- ciliumnetworkpolicies
|
||||
- ciliumnodes
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumidentities
|
||||
- ciliumendpoints
|
||||
- ciliumnodes
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
# To synchronize garbage collection of such resources
|
||||
resources:
|
||||
- ciliumidentities
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumendpoints
|
||||
verbs:
|
||||
- delete
|
||||
- get
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumnodes
|
||||
- ciliumnodes/status
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumnetworkpolicies/status
|
||||
- ciliumclusterwidenetworkpolicies/status
|
||||
- ciliumendpoints/status
|
||||
- ciliumendpoints
|
||||
verbs:
|
||||
- patch
|
||||
---
|
||||
# Source: cilium/templates/cilium-operator/clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: cilium-operator
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
# to automatically delete [core|kube]dns pods so that are starting to being
|
||||
# managed by Cilium
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
# To remove node taints
|
||||
- nodes
|
||||
# To set NetworkUnavailable false on startup
|
||||
- nodes/status
|
||||
verbs:
|
||||
- patch
|
||||
- apiGroups:
|
||||
- discovery.k8s.io
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
# to perform LB IP allocation for BGP
|
||||
- services/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
# to check apiserver connectivity
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
# to perform the translation of a CNP that contains `ToGroup` to its endpoints
|
||||
- services
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumnetworkpolicies
|
||||
- ciliumclusterwidenetworkpolicies
|
||||
verbs:
|
||||
# Create auto-generated CNPs and CCNPs from Policies that have 'toGroups'
|
||||
- create
|
||||
- update
|
||||
- deletecollection
|
||||
# To update the status of the CNPs and CCNPs
|
||||
- patch
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumnetworkpolicies/status
|
||||
- ciliumclusterwidenetworkpolicies/status
|
||||
verbs:
|
||||
# Update the auto-generated CNPs and CCNPs status.
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumendpoints
|
||||
- ciliumidentities
|
||||
verbs:
|
||||
# To perform garbage collection of such resources
|
||||
- delete
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumidentities
|
||||
verbs:
|
||||
# To synchronize garbage collection of such resources
|
||||
- update
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumnodes
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
# To perform CiliumNode garbage collector
|
||||
- delete
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumnodes/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumendpointslices
|
||||
- ciliumenvoyconfigs
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
- update
|
||||
resourceNames:
|
||||
- ciliumbgploadbalancerippools.cilium.io
|
||||
- ciliumbgppeeringpolicies.cilium.io
|
||||
- ciliumclusterwideenvoyconfigs.cilium.io
|
||||
- ciliumclusterwidenetworkpolicies.cilium.io
|
||||
- ciliumegressgatewaypolicies.cilium.io
|
||||
- ciliumegressnatpolicies.cilium.io
|
||||
- ciliumendpoints.cilium.io
|
||||
- ciliumendpointslices.cilium.io
|
||||
- ciliumenvoyconfigs.cilium.io
|
||||
- ciliumexternalworkloads.cilium.io
|
||||
- ciliumidentities.cilium.io
|
||||
- ciliumlocalredirectpolicies.cilium.io
|
||||
- ciliumnetworkpolicies.cilium.io
|
||||
- ciliumnodes.cilium.io
|
||||
# For cilium-operator running in HA mode.
|
||||
#
|
||||
# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election
|
||||
# between multiple running instances.
|
||||
# The preferred way of doing this is to use LeasesResourceLock as edits to Leases are less
|
||||
# common and fewer objects in the cluster watch "all Leases".
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- update
|
||||
---
|
||||
# Source: cilium/templates/cilium-agent/clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: cilium
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cilium
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: "cilium"
|
||||
namespace: kube-system
|
||||
---
|
||||
# Source: cilium/templates/cilium-operator/clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: cilium-operator
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cilium-operator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: "cilium-operator"
|
||||
namespace: kube-system
|
||||
---
|
||||
# Source: cilium/templates/cilium-agent/service.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: cilium-agent
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "9964"
|
||||
labels:
|
||||
k8s-app: cilium
|
||||
spec:
|
||||
clusterIP: None
|
||||
type: ClusterIP
|
||||
selector:
|
||||
k8s-app: cilium
|
||||
ports:
|
||||
- name: envoy-metrics
|
||||
port: 9964
|
||||
protocol: TCP
|
||||
targetPort: envoy-metrics
|
||||
---
|
||||
# Source: cilium/templates/cilium-agent/daemonset.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: cilium
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: cilium
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: cilium
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 2
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/port: "9962"
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
k8s-app: cilium
|
||||
spec:
|
||||
containers:
|
||||
- name: cilium-agent
|
||||
image: "quay.io/cilium/cilium:v1.12.7@sha256:8cb6b4742cc27b39e4f789d282a1fc2041decb6f5698bfe09112085a07b1fd61"
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- cilium-agent
|
||||
args:
|
||||
- --config-dir=/tmp/cilium/config-map
|
||||
startupProbe:
|
||||
httpGet:
|
||||
host: "127.0.0.1"
|
||||
path: /healthz
|
||||
port: 9879
|
||||
scheme: HTTP
|
||||
httpHeaders:
|
||||
- name: "brief"
|
||||
value: "true"
|
||||
failureThreshold: 105
|
||||
periodSeconds: 2
|
||||
successThreshold: 1
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: "127.0.0.1"
|
||||
path: /healthz
|
||||
port: 9879
|
||||
scheme: HTTP
|
||||
httpHeaders:
|
||||
- name: "brief"
|
||||
value: "true"
|
||||
periodSeconds: 30
|
||||
successThreshold: 1
|
||||
failureThreshold: 10
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
host: "127.0.0.1"
|
||||
path: /healthz
|
||||
port: 9879
|
||||
scheme: HTTP
|
||||
httpHeaders:
|
||||
- name: "brief"
|
||||
value: "true"
|
||||
periodSeconds: 30
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
timeoutSeconds: 5
|
||||
env:
|
||||
- name: K8S_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
- name: CILIUM_K8S_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
- name: CILIUM_CLUSTERMESH_CONFIG
|
||||
value: /var/lib/cilium/clustermesh/
|
||||
- name: CILIUM_CNI_CHAINING_MODE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: cilium-config
|
||||
key: cni-chaining-mode
|
||||
optional: true
|
||||
- name: CILIUM_CUSTOM_CNI_CONF
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: cilium-config
|
||||
key: custom-cni-conf
|
||||
optional: true
|
||||
- name: KUBERNETES_SERVICE_HOST
|
||||
value: "api.cluster.local"
|
||||
- name: KUBERNETES_SERVICE_PORT
|
||||
value: "6443"
|
||||
lifecycle:
|
||||
postStart:
|
||||
exec:
|
||||
command:
|
||||
- "/cni-install.sh"
|
||||
- "--enable-debug=false"
|
||||
- "--cni-exclusive=true"
|
||||
- "--log-file=/var/run/cilium/cilium-cni.log"
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /cni-uninstall.sh
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2
|
||||
memory: 1Gi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
ports:
|
||||
- name: peer-service
|
||||
containerPort: 4244
|
||||
hostPort: 4244
|
||||
protocol: TCP
|
||||
- name: prometheus
|
||||
containerPort: 9962
|
||||
hostPort: 9962
|
||||
protocol: TCP
|
||||
- name: envoy-metrics
|
||||
containerPort: 9964
|
||||
hostPort: 9964
|
||||
protocol: TCP
|
||||
securityContext:
|
||||
privileged: true
|
||||
terminationMessagePolicy: FallbackToLogsOnError
|
||||
volumeMounts:
|
||||
- name: bpf-maps
|
||||
mountPath: /sys/fs/bpf
|
||||
mountPropagation: Bidirectional
|
||||
# Check for duplicate mounts before mounting
|
||||
- name: cilium-cgroup
|
||||
mountPath: /sys/fs/cgroup
|
||||
- name: cilium-run
|
||||
mountPath: /var/run/cilium
|
||||
- name: cni-path
|
||||
mountPath: /host/opt/cni/bin
|
||||
- name: etc-cni-netd
|
||||
mountPath: /host/etc/cni/net.d
|
||||
- name: clustermesh-secrets
|
||||
mountPath: /var/lib/cilium/clustermesh
|
||||
readOnly: true
|
||||
- name: cilium-config-path
|
||||
mountPath: /tmp/cilium/config-map
|
||||
readOnly: true
|
||||
# Needed to be able to load kernel modules
|
||||
- name: lib-modules
|
||||
mountPath: /lib/modules
|
||||
readOnly: true
|
||||
- name: xtables-lock
|
||||
mountPath: /run/xtables.lock
|
||||
initContainers:
|
||||
- name: clean-cilium-state
|
||||
image: "quay.io/cilium/cilium:v1.12.7@sha256:8cb6b4742cc27b39e4f789d282a1fc2041decb6f5698bfe09112085a07b1fd61"
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- /init-container.sh
|
||||
env:
|
||||
- name: CILIUM_ALL_STATE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: cilium-config
|
||||
key: clean-cilium-state
|
||||
optional: true
|
||||
- name: CILIUM_BPF_STATE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: cilium-config
|
||||
key: clean-cilium-bpf-state
|
||||
optional: true
|
||||
- name: KUBERNETES_SERVICE_HOST
|
||||
value: "api.cluster.local"
|
||||
- name: KUBERNETES_SERVICE_PORT
|
||||
value: "6443"
|
||||
terminationMessagePolicy: FallbackToLogsOnError
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: bpf-maps
|
||||
mountPath: /sys/fs/bpf
|
||||
# Required to mount cgroup filesystem from the host to cilium agent pod
|
||||
- name: cilium-cgroup
|
||||
mountPath: /sys/fs/cgroup
|
||||
mountPropagation: HostToContainer
|
||||
- name: cilium-run
|
||||
mountPath: /var/run/cilium
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi # wait-for-kube-proxy
|
||||
restartPolicy: Always
|
||||
priorityClassName: system-node-critical
|
||||
serviceAccount: "cilium"
|
||||
serviceAccountName: "cilium"
|
||||
terminationGracePeriodSeconds: 1
|
||||
hostNetwork: true
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchLabels:
|
||||
k8s-app: cilium
|
||||
topologyKey: kubernetes.io/hostname
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
volumes:
|
||||
# To keep state between restarts / upgrades
|
||||
- name: cilium-run
|
||||
hostPath:
|
||||
path: /var/run/cilium
|
||||
type: DirectoryOrCreate
|
||||
# To keep state between restarts / upgrades for bpf maps
|
||||
- name: bpf-maps
|
||||
hostPath:
|
||||
path: /sys/fs/bpf
|
||||
type: DirectoryOrCreate
|
||||
# To keep state between restarts / upgrades for cgroup2 filesystem
|
||||
- name: cilium-cgroup
|
||||
hostPath:
|
||||
path: /sys/fs/cgroup
|
||||
type: DirectoryOrCreate
|
||||
# To install cilium cni plugin in the host
|
||||
- name: cni-path
|
||||
hostPath:
|
||||
path: /opt/cni/bin
|
||||
type: DirectoryOrCreate
|
||||
# To install cilium cni configuration in the host
|
||||
- name: etc-cni-netd
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
type: DirectoryOrCreate
|
||||
# To be able to load kernel modules
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
# To access iptables concurrently with other processes (e.g. kube-proxy)
|
||||
- name: xtables-lock
|
||||
hostPath:
|
||||
path: /run/xtables.lock
|
||||
type: FileOrCreate
|
||||
# To read the clustermesh configuration
|
||||
- name: clustermesh-secrets
|
||||
secret:
|
||||
secretName: cilium-clustermesh
|
||||
# note: the leading zero means this number is in octal representation: do not remove it
|
||||
defaultMode: 0400
|
||||
optional: true
|
||||
# To read the configuration from the config map
|
||||
- name: cilium-config-path
|
||||
configMap:
|
||||
name: cilium-config
|
||||
---
|
||||
# Source: cilium/templates/cilium-operator/deployment.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: cilium-operator
|
||||
namespace: kube-system
|
||||
labels:
|
||||
io.cilium/app: operator
|
||||
name: cilium-operator
|
||||
spec:
|
||||
# See docs on ServerCapabilities.LeasesResourceLock in file pkg/k8s/version/version.go
|
||||
# for more details.
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
io.cilium/app: operator
|
||||
name: cilium-operator
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
# ensure pods roll when configmap updates
|
||||
cilium.io/cilium-configmap-checksum: "93ed3047796c548140dd014145d2cb313155de38c36595eb2f05f60856400ae5"
|
||||
labels:
|
||||
io.cilium/app: operator
|
||||
name: cilium-operator
|
||||
spec:
|
||||
containers:
|
||||
- name: cilium-operator
|
||||
image: "quay.io/cilium/operator-generic:v1.12.7@sha256:80f24810bf8484974c757382eb2c7408c9c024e5cb0719f4a56fba3f47695c72"
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- cilium-operator-generic
|
||||
args:
|
||||
- --config-dir=/tmp/cilium/config-map
|
||||
- --debug=$(CILIUM_DEBUG)
|
||||
env:
|
||||
- name: K8S_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
- name: CILIUM_K8S_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
- name: CILIUM_DEBUG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: debug
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: KUBERNETES_SERVICE_HOST
|
||||
value: "api.cluster.local"
|
||||
- name: KUBERNETES_SERVICE_PORT
|
||||
value: "6443"
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: "127.0.0.1"
|
||||
path: /healthz
|
||||
port: 9234
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 3
|
||||
volumeMounts:
|
||||
- name: cilium-config-path
|
||||
mountPath: /tmp/cilium/config-map
|
||||
readOnly: true
|
||||
terminationMessagePolicy: FallbackToLogsOnError
|
||||
hostNetwork: true
|
||||
restartPolicy: Always
|
||||
priorityClassName: system-cluster-critical
|
||||
serviceAccount: "cilium-operator"
|
||||
serviceAccountName: "cilium-operator"
|
||||
# In HA mode, cilium-operator pods must not be scheduled on the same
|
||||
# node as they will clash with each other.
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchLabels:
|
||||
io.cilium/app: operator
|
||||
topologyKey: kubernetes.io/hostname
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
volumes:
|
||||
# To read the configuration from the config map
|
||||
- name: cilium-config-path
|
||||
configMap:
|
||||
name: cilium-config
|
||||
@@ -1,77 +0,0 @@
|
||||
---
|
||||
|
||||
k8sServiceHost: "api.cluster.local"
|
||||
k8sServicePort: "6443"
|
||||
|
||||
operator:
|
||||
enabled: true
|
||||
rollOutPods: true
|
||||
replicas: 1
|
||||
prometheus:
|
||||
enabled: false
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
effect: NoSchedule
|
||||
|
||||
identityAllocationMode: crd
|
||||
kubeProxyReplacement: strict
|
||||
enableK8sEndpointSlice: true
|
||||
localRedirectPolicy: true
|
||||
|
||||
tunnel: "vxlan"
|
||||
autoDirectNodeRoutes: false
|
||||
devices: [eth+]
|
||||
|
||||
healthChecking: true
|
||||
|
||||
cni:
|
||||
install: true
|
||||
|
||||
ipam:
|
||||
mode: "kubernetes"
|
||||
k8s:
|
||||
requireIPv4PodCIDR: true
|
||||
requireIPv6PodCIDR: true
|
||||
|
||||
bpf:
|
||||
masquerade: false
|
||||
ipv4:
|
||||
enabled: true
|
||||
ipv6:
|
||||
enabled: true
|
||||
hostServices:
|
||||
enabled: true
|
||||
hostPort:
|
||||
enabled: true
|
||||
nodePort:
|
||||
enabled: true
|
||||
externalIPs:
|
||||
enabled: true
|
||||
hostFirewall:
|
||||
enabled: true
|
||||
ingressController:
|
||||
enabled: false
|
||||
|
||||
securityContext:
|
||||
privileged: true
|
||||
|
||||
hubble:
|
||||
enabled: false
|
||||
|
||||
prometheus:
|
||||
enabled: true
|
||||
|
||||
cgroup:
|
||||
autoMount:
|
||||
enabled: false
|
||||
hostRoot: /sys/fs/cgroup
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2
|
||||
memory: 1Gi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
@@ -1,153 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: coredns-local
|
||||
namespace: kube-system
|
||||
data:
|
||||
empty.db: |
|
||||
@ 60 IN SOA localnet. root.localnet. (
|
||||
1 ; serial
|
||||
60 ; refresh
|
||||
60 ; retry
|
||||
60 ; expiry
|
||||
60 ) ; minimum
|
||||
;
|
||||
@ IN NS localnet.
|
||||
|
||||
hosts: |
|
||||
# static hosts
|
||||
169.254.2.53 dns.local
|
||||
|
||||
Corefile.local: |
|
||||
(empty) {
|
||||
file /etc/coredns/empty.db
|
||||
}
|
||||
|
||||
.:53 {
|
||||
errors
|
||||
bind 169.254.2.53
|
||||
|
||||
health 127.0.0.1:8091 {
|
||||
lameduck 5s
|
||||
}
|
||||
|
||||
hosts /etc/coredns/hosts {
|
||||
reload 60s
|
||||
fallthrough
|
||||
}
|
||||
|
||||
kubernetes cluster.local in-addr.arpa ip6.arpa {
|
||||
endpoint https://api.cluster.local:6443
|
||||
kubeconfig /etc/coredns/kubeconfig.conf coredns
|
||||
pods insecure
|
||||
ttl 60
|
||||
}
|
||||
prometheus :9153
|
||||
|
||||
forward . /etc/resolv.conf {
|
||||
policy sequential
|
||||
expire 30s
|
||||
}
|
||||
|
||||
cache 300
|
||||
loop
|
||||
reload
|
||||
loadbalance
|
||||
}
|
||||
kubeconfig.conf: |-
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
server: https://api.cluster.local:6443
|
||||
name: default
|
||||
contexts:
|
||||
- context:
|
||||
cluster: default
|
||||
namespace: kube-system
|
||||
user: coredns
|
||||
name: coredns
|
||||
current-context: coredns
|
||||
users:
|
||||
- name: coredns
|
||||
user:
|
||||
tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: coredns-local
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns-local
|
||||
kubernetes.io/name: CoreDNS
|
||||
spec:
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
minReadySeconds: 15
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-dns-local
|
||||
kubernetes.io/name: CoreDNS
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns-local
|
||||
kubernetes.io/name: CoreDNS
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "9153"
|
||||
spec:
|
||||
priorityClassName: system-node-critical
|
||||
serviceAccount: coredns
|
||||
serviceAccountName: coredns
|
||||
enableServiceLinks: false
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
- effect: NoSchedule
|
||||
key: node.cloudprovider.kubernetes.io/uninitialized
|
||||
operator: Exists
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: coredns
|
||||
image: coredns/coredns:1.9.4
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 64Mi
|
||||
args: [ "-conf", "/etc/coredns/Corefile.local" ]
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/coredns
|
||||
readOnly: true
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /health
|
||||
port: 8091
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
drop:
|
||||
- all
|
||||
readOnlyRootFilesystem: true
|
||||
dnsPolicy: Default
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: coredns-local
|
||||
@@ -1,4 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: ingress-nginx
|
||||
@@ -1,483 +0,0 @@
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.4.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.5.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
automountServiceAccountToken: true
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-configmap.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.4.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.5.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
data:
|
||||
allow-snippet-annotations: "true"
|
||||
client-body-timeout: "30"
|
||||
client-header-timeout: "30"
|
||||
enable-access-log-for-default-backend: "true"
|
||||
error-log-level: "error"
|
||||
hsts: "true"
|
||||
hsts-include-subdomains: "true"
|
||||
hsts-max-age: "31536000"
|
||||
hsts-preload: "true"
|
||||
http-redirect-code: "301"
|
||||
limit-req-status-code: "429"
|
||||
log-format-escape-json: "true"
|
||||
log-format-upstream: "{\"ip\":\"$remote_addr\", \"ssl\":\"$ssl_protocol\", \"method\":\"$request_method\", \"proto\":\"$scheme\", \"host\":\"$host\", \"uri\":\"$request_uri\", \"status\":$status, \"size\":$bytes_sent, \"agent\":\"$http_user_agent\", \"referer\":\"$http_referer\", \"namespace\":\"$namespace\"}"
|
||||
proxy-connect-timeout: "10"
|
||||
proxy-headers-hash-bucket-size: "128"
|
||||
proxy-hide-headers: "strict-transport-security"
|
||||
proxy-read-timeout: "60"
|
||||
proxy-real-ip-cidr: "173.245.48.0/20,103.21.244.0/22,103.22.200.0/22,103.31.4.0/22,141.101.64.0/18,108.162.192.0/18,190.93.240.0/20,188.114.96.0/20,197.234.240.0/22,198.41.128.0/17,162.158.0.0/15,172.64.0.0/13,131.0.72.0/22,104.16.0.0/13,104.24.0.0/14,172.16.0.0/12"
|
||||
proxy-send-timeout: "60"
|
||||
server-name-hash-bucket-size: "64"
|
||||
server-name-hash-max-size: "512"
|
||||
server-tokens: "false"
|
||||
ssl-protocols: "TLSv1.3"
|
||||
upstream-keepalive-connections: "32"
|
||||
use-forwarded-headers: "true"
|
||||
use-geoip: "false"
|
||||
use-geoip2: "false"
|
||||
use-gzip: "true"
|
||||
worker-cpu-affinity: "auto"
|
||||
worker-processes: "auto"
|
||||
---
|
||||
# Source: ingress-nginx/templates/clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.4.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.5.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: ingress-nginx
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
- endpoints
|
||||
- nodes
|
||||
- pods
|
||||
- secrets
|
||||
- namespaces
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingressclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- discovery.k8s.io
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- get
|
||||
---
|
||||
# Source: ingress-nginx/templates/clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.4.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.5.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: ingress-nginx
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: ingress-nginx
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ingress-nginx
|
||||
namespace: "ingress-nginx"
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-role.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.4.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.5.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
- pods
|
||||
- secrets
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingressclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
# TODO(Jintao Zhang)
|
||||
# Once we release a new version of the controller,
|
||||
# we will be able to remove the configmap related permissions
|
||||
# We have used the Lease API for selection
|
||||
# ref: https://github.com/kubernetes/ingress-nginx/pull/8921
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
resourceNames:
|
||||
- ingress-nginx-leader
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
resourceNames:
|
||||
- ingress-nginx-leader
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- discovery.k8s.io
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- get
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-rolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.4.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.5.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: ingress-nginx
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ingress-nginx
|
||||
namespace: "ingress-nginx"
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-service.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.4.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.5.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
type: ClusterIP
|
||||
clusterIP: None
|
||||
ipFamilyPolicy: RequireDualStack
|
||||
ipFamilies:
|
||||
- IPv4
|
||||
- IPv6
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
appProtocol: http
|
||||
- name: https
|
||||
port: 443
|
||||
protocol: TCP
|
||||
targetPort: https
|
||||
appProtocol: https
|
||||
selector:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-daemonset.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.4.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.5.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
revisionHistoryLimit: 2
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
minReadySeconds: 15
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/port: "10254"
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
spec:
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
containers:
|
||||
- name: controller
|
||||
image: "registry.k8s.io/ingress-nginx/controller:v1.5.1@sha256:4ba73c697770664c1e00e9f968de14e08f606ff961c76e5d7033a4a9c593c629"
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /wait-shutdown
|
||||
args:
|
||||
- /nginx-ingress-controller
|
||||
- --election-id=ingress-nginx-leader
|
||||
- --controller-class=k8s.io/ingress-nginx
|
||||
- --ingress-class=nginx
|
||||
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
runAsUser: 101
|
||||
allowPrivilegeEscalation: true
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: LD_PRELOAD
|
||||
value: /usr/local/lib/libmimalloc.so
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
livenessProbe:
|
||||
failureThreshold: 5
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 30
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
readinessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 30
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
protocol: TCP
|
||||
- name: https
|
||||
containerPort: 443
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1
|
||||
memory: 1Gi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
hostNetwork: true
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: project.io/node-pool
|
||||
operator: In
|
||||
values:
|
||||
- web
|
||||
serviceAccountName: ingress-nginx
|
||||
terminationGracePeriodSeconds: 300
|
||||
---
|
||||
# Source: ingress-nginx/templates/controller-ingressclass.yaml
|
||||
# We don't support namespaced ingressClass yet
|
||||
# So a ClusterRole and a ClusterRoleBinding is required
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: IngressClass
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: ingress-nginx-4.4.0
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/version: "1.5.1"
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/component: controller
|
||||
name: nginx
|
||||
spec:
|
||||
controller: k8s.io/ingress-nginx
|
||||
@@ -1,116 +0,0 @@
|
||||
|
||||
controller:
|
||||
kind: DaemonSet
|
||||
|
||||
hostNetwork: true
|
||||
hostPort:
|
||||
enabled: false
|
||||
ports:
|
||||
http: 80
|
||||
https: 443
|
||||
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
|
||||
publishService:
|
||||
enabled: false
|
||||
|
||||
config:
|
||||
worker-processes: "auto"
|
||||
worker-cpu-affinity: "auto"
|
||||
error-log-level: "error"
|
||||
|
||||
server-tokens: "false"
|
||||
http-redirect-code: "301"
|
||||
|
||||
use-gzip: "true"
|
||||
use-geoip: "false"
|
||||
use-geoip2: "false"
|
||||
|
||||
use-forwarded-headers: "true"
|
||||
# curl https://www.cloudflare.com/ips-v4 2>/dev/null | tr '\n' ','
|
||||
proxy-real-ip-cidr: "173.245.48.0/20,103.21.244.0/22,103.22.200.0/22,103.31.4.0/22,141.101.64.0/18,108.162.192.0/18,190.93.240.0/20,188.114.96.0/20,197.234.240.0/22,198.41.128.0/17,162.158.0.0/15,172.64.0.0/13,131.0.72.0/22,104.16.0.0/13,104.24.0.0/14,172.16.0.0/12"
|
||||
|
||||
enable-access-log-for-default-backend: "true"
|
||||
log-format-escape-json: "true"
|
||||
log-format-upstream: '{"ip":"$remote_addr", "ssl":"$ssl_protocol", "method":"$request_method", "proto":"$scheme", "host":"$host", "uri":"$request_uri", "status":$status, "size":$bytes_sent, "agent":"$http_user_agent", "referer":"$http_referer", "namespace":"$namespace"}'
|
||||
|
||||
upstream-keepalive-connections: "32"
|
||||
proxy-connect-timeout: "10"
|
||||
proxy-read-timeout: "60"
|
||||
proxy-send-timeout: "60"
|
||||
|
||||
ssl-protocols: "TLSv1.3"
|
||||
hsts: "true"
|
||||
hsts-max-age: "31536000"
|
||||
hsts-include-subdomains: "true"
|
||||
hsts-preload: "true"
|
||||
proxy-hide-headers: "strict-transport-security"
|
||||
proxy-headers-hash-bucket-size: "128"
|
||||
|
||||
server-name-hash-bucket-size: "64"
|
||||
server-name-hash-max-size: "512"
|
||||
|
||||
limit-req-status-code: "429"
|
||||
|
||||
client-header-timeout: "30"
|
||||
client-body-timeout: "30"
|
||||
|
||||
minReadySeconds: 15
|
||||
|
||||
podAnnotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "10254"
|
||||
|
||||
extraEnvs:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
|
||||
livenessProbe:
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 30
|
||||
readinessProbe:
|
||||
periodSeconds: 30
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1
|
||||
memory: 1Gi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: project.io/node-pool
|
||||
operator: In
|
||||
values:
|
||||
- web
|
||||
|
||||
service:
|
||||
enabled: true
|
||||
type: ClusterIP
|
||||
clusterIP: None
|
||||
ipFamilyPolicy: "RequireDualStack"
|
||||
ipFamilies:
|
||||
- IPv4
|
||||
- IPv6
|
||||
|
||||
admissionWebhooks:
|
||||
enabled: false
|
||||
metrics:
|
||||
enabled: false
|
||||
|
||||
revisionHistoryLimit: 2
|
||||
|
||||
defaultBackend:
|
||||
enabled: false
|
||||
@@ -1,231 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubelet-serving-cert-approver
|
||||
app.kubernetes.io/name: kubelet-serving-cert-approver
|
||||
name: kubelet-serving-cert-approver
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubelet-serving-cert-approver
|
||||
app.kubernetes.io/name: kubelet-serving-cert-approver
|
||||
name: kubelet-serving-cert-approver
|
||||
namespace: kubelet-serving-cert-approver
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubelet-serving-cert-approver
|
||||
app.kubernetes.io/name: kubelet-serving-cert-approver
|
||||
name: certificates:kubelet-serving-cert-approver
|
||||
rules:
|
||||
- apiGroups:
|
||||
- certificates.k8s.io
|
||||
resources:
|
||||
- certificatesigningrequests
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- certificates.k8s.io
|
||||
resources:
|
||||
- certificatesigningrequests/approval
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- authorization.k8s.io
|
||||
resources:
|
||||
- subjectaccessreviews
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- certificates.k8s.io
|
||||
resourceNames:
|
||||
- kubernetes.io/kubelet-serving
|
||||
resources:
|
||||
- signers
|
||||
verbs:
|
||||
- approve
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubelet-serving-cert-approver
|
||||
app.kubernetes.io/name: kubelet-serving-cert-approver
|
||||
name: events:kubelet-serving-cert-approver
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubelet-serving-cert-approver
|
||||
app.kubernetes.io/name: kubelet-serving-cert-approver
|
||||
name: psp:kubelet-serving-cert-approver
|
||||
rules:
|
||||
- apiGroups:
|
||||
- policy
|
||||
resourceNames:
|
||||
- kubelet-serving-cert-approver
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
verbs:
|
||||
- use
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubelet-serving-cert-approver
|
||||
app.kubernetes.io/name: kubelet-serving-cert-approver
|
||||
name: events:kubelet-serving-cert-approver
|
||||
namespace: default
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: events:kubelet-serving-cert-approver
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubelet-serving-cert-approver
|
||||
namespace: kubelet-serving-cert-approver
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubelet-serving-cert-approver
|
||||
app.kubernetes.io/name: kubelet-serving-cert-approver
|
||||
name: psp:kubelet-serving-cert-approver
|
||||
namespace: kubelet-serving-cert-approver
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: psp:kubelet-serving-cert-approver
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubelet-serving-cert-approver
|
||||
namespace: kubelet-serving-cert-approver
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubelet-serving-cert-approver
|
||||
app.kubernetes.io/name: kubelet-serving-cert-approver
|
||||
name: kubelet-serving-cert-approver
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: certificates:kubelet-serving-cert-approver
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubelet-serving-cert-approver
|
||||
namespace: kubelet-serving-cert-approver
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubelet-serving-cert-approver
|
||||
app.kubernetes.io/name: kubelet-serving-cert-approver
|
||||
name: kubelet-serving-cert-approver
|
||||
namespace: kubelet-serving-cert-approver
|
||||
spec:
|
||||
ports:
|
||||
- name: metrics
|
||||
port: 9090
|
||||
protocol: TCP
|
||||
targetPort: metrics
|
||||
selector:
|
||||
app.kubernetes.io/instance: kubelet-serving-cert-approver
|
||||
app.kubernetes.io/name: kubelet-serving-cert-approver
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubelet-serving-cert-approver
|
||||
app.kubernetes.io/name: kubelet-serving-cert-approver
|
||||
name: kubelet-serving-cert-approver
|
||||
namespace: kubelet-serving-cert-approver
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/instance: kubelet-serving-cert-approver
|
||||
app.kubernetes.io/name: kubelet-serving-cert-approver
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubelet-serving-cert-approver
|
||||
app.kubernetes.io/name: kubelet-serving-cert-approver
|
||||
spec:
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
- effect: NoSchedule
|
||||
key: node.cloudprovider.kubernetes.io/uninitialized
|
||||
operator: Exists
|
||||
containers:
|
||||
- args:
|
||||
- serve
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
image: ghcr.io/alex1989hu/kubelet-serving-cert-approver:main
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: health
|
||||
initialDelaySeconds: 6
|
||||
name: cert-approver
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: health
|
||||
- containerPort: 9090
|
||||
name: metrics
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: health
|
||||
initialDelaySeconds: 3
|
||||
resources:
|
||||
limits:
|
||||
cpu: 250m
|
||||
memory: 32Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 16Mi
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
privileged: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
priorityClassName: system-cluster-critical
|
||||
securityContext:
|
||||
fsGroup: 65534
|
||||
runAsGroup: 65534
|
||||
runAsUser: 65534
|
||||
serviceAccountName: kubelet-serving-cert-approver
|
||||
@@ -1,140 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: local-path-storage
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: local-path-provisioner-service-account
|
||||
namespace: local-path-storage
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: local-path-provisioner-role
|
||||
rules:
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "nodes", "persistentvolumeclaims", "configmaps" ]
|
||||
verbs: [ "get", "list", "watch" ]
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "endpoints", "persistentvolumes", "pods" ]
|
||||
verbs: [ "*" ]
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "events" ]
|
||||
verbs: [ "create", "patch" ]
|
||||
- apiGroups: [ "storage.k8s.io" ]
|
||||
resources: [ "storageclasses" ]
|
||||
verbs: [ "get", "list", "watch" ]
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: local-path-provisioner-bind
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: local-path-provisioner-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: local-path-provisioner-service-account
|
||||
namespace: local-path-storage
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: local-path-provisioner
|
||||
namespace: local-path-storage
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: local-path-provisioner
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: local-path-provisioner
|
||||
spec:
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
tolerations:
|
||||
- key: "node-role.kubernetes.io/control-plane"
|
||||
effect: NoSchedule
|
||||
serviceAccountName: local-path-provisioner-service-account
|
||||
containers:
|
||||
- name: local-path-provisioner
|
||||
image: rancher/local-path-provisioner:v0.0.23
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- local-path-provisioner
|
||||
- --debug
|
||||
- start
|
||||
- --config
|
||||
- /etc/config/config.json
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/config/
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: local-path-config
|
||||
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: local-path
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
provisioner: rancher.io/local-path
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
reclaimPolicy: Delete
|
||||
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: local-path-config
|
||||
namespace: local-path-storage
|
||||
data:
|
||||
config.json: |-
|
||||
{
|
||||
"nodePathMap":[
|
||||
{
|
||||
"node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
|
||||
"paths":["/var/data"]
|
||||
}
|
||||
]
|
||||
}
|
||||
setup: |-
|
||||
#!/bin/sh
|
||||
set -eu
|
||||
mkdir -m 0777 -p "$VOL_DIR"
|
||||
teardown: |-
|
||||
#!/bin/sh
|
||||
set -eu
|
||||
rm -rf "$VOL_DIR"
|
||||
helperPod.yaml: |-
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: helper-pod
|
||||
spec:
|
||||
priorityClassName: system-node-critical
|
||||
tolerations:
|
||||
- key: node.kubernetes.io/disk-pressure
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
containers:
|
||||
- name: helper-pod
|
||||
image: busybox
|
||||
imagePullPolicy: IfNotPresent
|
||||
@@ -1,197 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
rbac.authorization.k8s.io/aggregate-to-admin: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-edit: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-view: "true"
|
||||
name: system:aggregated-metrics-reader
|
||||
rules:
|
||||
- apiGroups:
|
||||
- metrics.k8s.io
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: system:metrics-server
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
- nodes/stats
|
||||
- namespaces
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: metrics-server-auth-reader
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: extension-apiserver-authentication-reader
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: metrics-server:system:auth-delegator
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:auth-delegator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: system:metrics-server
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:metrics-server
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
spec:
|
||||
ports:
|
||||
- name: https
|
||||
port: 443
|
||||
protocol: TCP
|
||||
targetPort: https
|
||||
selector:
|
||||
k8s-app: metrics-server
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: metrics-server
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
spec:
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
tolerations:
|
||||
- key: "node-role.kubernetes.io/control-plane"
|
||||
effect: NoSchedule
|
||||
containers:
|
||||
- args:
|
||||
- --cert-dir=/tmp
|
||||
- --secure-port=6443
|
||||
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
|
||||
- --kubelet-use-node-status-port
|
||||
- --metric-resolution=15s
|
||||
- --authorization-always-allow-paths=/metrics
|
||||
image: k8s.gcr.io/metrics-server/metrics-server:v0.5.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /livez
|
||||
port: https
|
||||
scheme: HTTPS
|
||||
periodSeconds: 10
|
||||
name: metrics-server
|
||||
ports:
|
||||
- containerPort: 6443
|
||||
name: https
|
||||
protocol: TCP
|
||||
readinessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: https
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 10
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp-dir
|
||||
priorityClassName: system-cluster-critical
|
||||
serviceAccountName: metrics-server
|
||||
volumes:
|
||||
- emptyDir: {}
|
||||
name: tmp-dir
|
||||
---
|
||||
apiVersion: apiregistration.k8s.io/v1
|
||||
kind: APIService
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: v1beta1.metrics.k8s.io
|
||||
spec:
|
||||
group: metrics.k8s.io
|
||||
groupPriorityMinimum: 100
|
||||
insecureSkipTLSVerify: true
|
||||
service:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
version: v1beta1
|
||||
versionPriority: 100
|
||||
11
openstack/deployments/openstack-cinder-csi-ns.yaml
Normal file
11
openstack/deployments/openstack-cinder-csi-ns.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: csi-cinder
|
||||
labels:
|
||||
pod-security.kubernetes.io/enforce: privileged
|
||||
pod-security.kubernetes.io/enforce-version: latest
|
||||
pod-security.kubernetes.io/audit: baseline
|
||||
pod-security.kubernetes.io/audit-version: latest
|
||||
pod-security.kubernetes.io/warn: baseline
|
||||
pod-security.kubernetes.io/warn-version: latest
|
||||
535
openstack/deployments/openstack-cinder-csi-result.yaml
Normal file
535
openstack/deployments/openstack-cinder-csi-result.yaml
Normal file
@@ -0,0 +1,535 @@
|
||||
---
|
||||
# Source: openstack-cinder-csi/templates/controllerplugin-rbac.yaml
|
||||
# This YAML file contains RBAC API objects,
|
||||
# which are necessary to run csi controller plugin
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-cinder-controller-sa
|
||||
namespace: kube-system
|
||||
---
|
||||
# Source: openstack-cinder-csi/templates/nodeplugin-rbac.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-cinder-node-sa
|
||||
namespace: kube-system
|
||||
---
|
||||
# Source: openstack-cinder-csi/templates/controllerplugin-rbac.yaml
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-attacher-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "patch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["csinodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments"]
|
||||
verbs: ["get", "list", "watch", "patch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments/status"]
|
||||
verbs: ["patch"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||
---
|
||||
# Source: openstack-cinder-csi/templates/controllerplugin-rbac.yaml
|
||||
# external Provisioner
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-provisioner-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "create", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["csinodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["list", "watch", "create", "update", "patch"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshots"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotcontents"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||
---
|
||||
# Source: openstack-cinder-csi/templates/controllerplugin-rbac.yaml
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-snapshotter-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["list", "watch", "create", "update", "patch"]
|
||||
# Secret permission is optional.
|
||||
# Enable it if your driver needs secret.
|
||||
# For example, `csi.storage.k8s.io/snapshotter-secret-name` is set in VolumeSnapshotClass.
|
||||
# See https://kubernetes-csi.github.io/docs/secrets-and-credentials.html for more details.
|
||||
# - apiGroups: [""]
|
||||
# resources: ["secrets"]
|
||||
# verbs: ["get", "list"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotcontents"]
|
||||
verbs: ["create", "get", "list", "watch", "update", "delete", "patch"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotcontents/status"]
|
||||
verbs: ["update", "patch"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||
---
|
||||
# Source: openstack-cinder-csi/templates/controllerplugin-rbac.yaml
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-resizer-role
|
||||
rules:
|
||||
# The following rule should be uncommented for plugins that require secrets
|
||||
# for provisioning.
|
||||
# - apiGroups: [""]
|
||||
# resources: ["secrets"]
|
||||
# verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims/status"]
|
||||
verbs: ["patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["list", "watch", "create", "update", "patch"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||
---
|
||||
# Source: openstack-cinder-csi/templates/nodeplugin-rbac.yaml
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-nodeplugin-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch"]
|
||||
---
|
||||
# Source: openstack-cinder-csi/templates/controllerplugin-rbac.yaml
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-attacher-binding
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-cinder-controller-sa
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: csi-attacher-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
# Source: openstack-cinder-csi/templates/controllerplugin-rbac.yaml
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-provisioner-binding
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-cinder-controller-sa
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: csi-provisioner-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
# Source: openstack-cinder-csi/templates/controllerplugin-rbac.yaml
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-snapshotter-binding
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-cinder-controller-sa
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: csi-snapshotter-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
# Source: openstack-cinder-csi/templates/controllerplugin-rbac.yaml
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-resizer-binding
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-cinder-controller-sa
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: csi-resizer-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
# Source: openstack-cinder-csi/templates/nodeplugin-rbac.yaml
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-nodeplugin-binding
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-cinder-node-sa
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: csi-nodeplugin-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
# Source: openstack-cinder-csi/templates/nodeplugin-daemonset.yaml
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: openstack-cinder-csi-nodeplugin
|
||||
namespace: kube-system
|
||||
labels:
|
||||
component: nodeplugin
|
||||
app: openstack-cinder-csi
|
||||
release: openstack-cloud-controller-manager
|
||||
chart: openstack-cinder-csi-2.27.1
|
||||
heritage: Helm
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
component: nodeplugin
|
||||
app: openstack-cinder-csi
|
||||
release: openstack-cloud-controller-manager
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: nodeplugin
|
||||
app: openstack-cinder-csi
|
||||
release: openstack-cloud-controller-manager
|
||||
chart: openstack-cinder-csi-2.27.1
|
||||
heritage: Helm
|
||||
spec:
|
||||
serviceAccount: csi-cinder-node-sa
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: node-driver-registrar
|
||||
image: "registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.6.2"
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- "-v=2"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /csi/csi.sock
|
||||
- name: DRIVER_REG_SOCK_PATH
|
||||
value: /var/lib/kubelet/plugins/cinder.csi.openstack.org/csi.sock
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: registration-dir
|
||||
mountPath: /registration
|
||||
resources:
|
||||
{}
|
||||
- name: liveness-probe
|
||||
image: "registry.k8s.io/sig-storage/livenessprobe:v2.9.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- "-v=2"
|
||||
- --csi-address=/csi/csi.sock
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
resources:
|
||||
{}
|
||||
- name: cinder-csi-plugin
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: "registry.k8s.io/provider-os/cinder-csi-plugin:v1.27.1"
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- /bin/cinder-csi-plugin
|
||||
- "-v=2"
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
- "--cloud-config=$(CLOUD_CONFIG)"
|
||||
env:
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix://csi/csi.sock
|
||||
- name: CLOUD_CONFIG
|
||||
value: /etc/kubernetes/cloud.conf
|
||||
ports:
|
||||
- containerPort: 9808
|
||||
name: healthz
|
||||
protocol: TCP
|
||||
# The probe
|
||||
livenessProbe:
|
||||
failureThreshold: 5
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: healthz
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 10
|
||||
periodSeconds: 60
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: kubelet-dir
|
||||
mountPath: /var/lib/kubelet
|
||||
mountPropagation: "Bidirectional"
|
||||
- name: pods-probe-dir
|
||||
mountPath: /dev
|
||||
mountPropagation: "HostToContainer"
|
||||
- mountPath: /etc/kubernetes
|
||||
name: cloud-config
|
||||
readOnly: true
|
||||
resources:
|
||||
{}
|
||||
volumes:
|
||||
- name: socket-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins/cinder.csi.openstack.org
|
||||
type: DirectoryOrCreate
|
||||
- name: registration-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins_registry/
|
||||
type: Directory
|
||||
- name: kubelet-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet
|
||||
type: Directory
|
||||
# - name: pods-cloud-data
|
||||
# hostPath:
|
||||
# path: /var/lib/cloud/data
|
||||
# type: Directory
|
||||
- name: pods-probe-dir
|
||||
hostPath:
|
||||
path: /dev
|
||||
type: Directory
|
||||
- name: cloud-config
|
||||
secret:
|
||||
secretName: openstack-cloud-controller-manager
|
||||
affinity:
|
||||
{}
|
||||
nodeSelector:
|
||||
node.cloudprovider.kubernetes.io/platform: openstack
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
---
|
||||
# Source: openstack-cinder-csi/templates/controllerplugin-deployment.yaml
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: openstack-cinder-csi-controllerplugin
|
||||
namespace: kube-system
|
||||
labels:
|
||||
component: controllerplugin
|
||||
app: openstack-cinder-csi
|
||||
release: openstack-cloud-controller-manager
|
||||
chart: openstack-cinder-csi-2.27.1
|
||||
heritage: Helm
|
||||
spec:
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
maxSurge: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
component: controllerplugin
|
||||
app: openstack-cinder-csi
|
||||
release: openstack-cloud-controller-manager
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: controllerplugin
|
||||
app: openstack-cinder-csi
|
||||
release: openstack-cloud-controller-manager
|
||||
chart: openstack-cinder-csi-2.27.1
|
||||
heritage: Helm
|
||||
spec:
|
||||
serviceAccount: csi-cinder-controller-sa
|
||||
containers:
|
||||
- name: csi-attacher
|
||||
image: "registry.k8s.io/sig-storage/csi-attacher:v4.2.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- "-v=2"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--timeout=3m"
|
||||
- "--leader-election=true"
|
||||
- "--default-fstype=ext4"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/csi/sockets/pluginproxy/csi.sock
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/csi/sockets/pluginproxy/
|
||||
resources:
|
||||
{}
|
||||
- name: csi-provisioner
|
||||
image: "registry.k8s.io/sig-storage/csi-provisioner:v3.4.1"
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- "-v=2"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--timeout=3m"
|
||||
- "--leader-election=true"
|
||||
- "--default-fstype=ext4"
|
||||
- "--feature-gates=Topology=true"
|
||||
- "--extra-create-metadata"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/csi/sockets/pluginproxy/csi.sock
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/csi/sockets/pluginproxy/
|
||||
resources:
|
||||
{}
|
||||
- name: csi-snapshotter
|
||||
image: "registry.k8s.io/sig-storage/csi-snapshotter:v6.2.1"
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- "-v=2"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--timeout=3m"
|
||||
- "--leader-election=true"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/csi/sockets/pluginproxy/csi.sock
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/csi/sockets/pluginproxy/
|
||||
name: socket-dir
|
||||
resources:
|
||||
{}
|
||||
- name: csi-resizer
|
||||
image: "registry.k8s.io/sig-storage/csi-resizer:v1.7.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- "-v=2"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--timeout=3m"
|
||||
- "--handle-volume-inuse-error=false"
|
||||
- "--leader-election=true"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/csi/sockets/pluginproxy/csi.sock
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/csi/sockets/pluginproxy/
|
||||
resources:
|
||||
{}
|
||||
- name: liveness-probe
|
||||
image: "registry.k8s.io/sig-storage/livenessprobe:v2.9.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- "-v=2"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/csi/sockets/pluginproxy/csi.sock
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/csi/sockets/pluginproxy/
|
||||
name: socket-dir
|
||||
resources:
|
||||
{}
|
||||
- name: cinder-csi-plugin
|
||||
image: "registry.k8s.io/provider-os/cinder-csi-plugin:v1.27.1"
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- /bin/cinder-csi-plugin
|
||||
- "-v=2"
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
- "--cloud-config=$(CLOUD_CONFIG)"
|
||||
- "--cluster=$(CLUSTER_NAME)"
|
||||
env:
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix://csi/csi.sock
|
||||
- name: CLOUD_CONFIG
|
||||
value: /etc/kubernetes/cloud.conf
|
||||
- name: CLUSTER_NAME
|
||||
value: "kubernetes"
|
||||
ports:
|
||||
- containerPort: 9808
|
||||
name: healthz
|
||||
protocol: TCP
|
||||
# The probe
|
||||
livenessProbe:
|
||||
failureThreshold: 5
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: healthz
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 10
|
||||
periodSeconds: 60
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- mountPath: /etc/kubernetes
|
||||
name: cloud-config
|
||||
readOnly: true
|
||||
resources:
|
||||
{}
|
||||
volumes:
|
||||
- name: socket-dir
|
||||
emptyDir:
|
||||
- name: cloud-config
|
||||
secret:
|
||||
secretName: openstack-cloud-controller-manager
|
||||
affinity:
|
||||
{}
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
node.cloudprovider.kubernetes.io/platform: openstack
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/control-plane
|
||||
---
|
||||
# Source: openstack-cinder-csi/templates/cinder-csi-driver.yaml
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: CSIDriver
|
||||
metadata:
|
||||
name: cinder.csi.openstack.org
|
||||
spec:
|
||||
attachRequired: true
|
||||
podInfoOnMount: true
|
||||
volumeLifecycleModes:
|
||||
- Persistent
|
||||
- Ephemeral
|
||||
@@ -0,0 +1,197 @@
|
||||
---
|
||||
# Source: openstack-cloud-controller-manager/templates/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: openstack-cloud-controller-manager
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
---
|
||||
# Source: openstack-cloud-controller-manager/templates/clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: system:openstack-cloud-controller-manager
|
||||
annotations:
|
||||
rules:
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes/status
|
||||
verbs:
|
||||
- patch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services/status
|
||||
verbs:
|
||||
- patch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- serviceaccounts/token
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- persistentvolumes
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
---
|
||||
# Source: openstack-cloud-controller-manager/templates/clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: system:openstack-cloud-controller-manager
|
||||
annotations:
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:openstack-cloud-controller-manager
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: openstack-cloud-controller-manager
|
||||
namespace: "kube-system"
|
||||
---
|
||||
# Source: openstack-cloud-controller-manager/templates/daemonset.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: openstack-cloud-controller-manager
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app.kubernetes.io/name: openstack-cloud-controller-manager
|
||||
helm.sh/chart: openstack-cloud-controller-manager-2.28.0-alpha.6
|
||||
app.kubernetes.io/instance: openstack-cloud-controller-manager
|
||||
app.kubernetes.io/version: "v1.27.1"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
component: controllermanager
|
||||
app: openstack-cloud-controller-manager
|
||||
release: openstack-cloud-controller-manager
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
checksum/config: bf42776b3c976e5762a25c440c7615361c3faf25106844e44870eb6ce9a9f4f1
|
||||
labels:
|
||||
component: controllermanager
|
||||
app: openstack-cloud-controller-manager
|
||||
release: openstack-cloud-controller-manager
|
||||
chart: openstack-cloud-controller-manager-2.28.0-alpha.6
|
||||
heritage: Helm
|
||||
spec:
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
node.cloudprovider.kubernetes.io/platform: openstack
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1001
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
key: node.cloudprovider.kubernetes.io/uninitialized
|
||||
value: "true"
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/control-plane
|
||||
serviceAccountName: openstack-cloud-controller-manager
|
||||
containers:
|
||||
- name: openstack-cloud-controller-manager
|
||||
image: "registry.k8s.io/provider-os/openstack-cloud-controller-manager:v1.27.1"
|
||||
args:
|
||||
- /bin/openstack-cloud-controller-manager
|
||||
- --v=2
|
||||
- --cloud-config=$(CLOUD_CONFIG)
|
||||
- --cluster-name=$(CLUSTER_NAME)
|
||||
- --cloud-provider=openstack
|
||||
- --use-service-account-credentials=true
|
||||
- --controllers=cloud-node-lifecycle
|
||||
- --bind-address=127.0.0.1
|
||||
- --leader-elect-resource-name=cloud-controller-manager-openstack
|
||||
volumeMounts:
|
||||
- mountPath: /etc/config
|
||||
name: cloud-config-volume
|
||||
readOnly: true
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
env:
|
||||
- name: CLOUD_CONFIG
|
||||
value: /etc/config/cloud.conf
|
||||
- name: CLUSTER_NAME
|
||||
value: kubernetes
|
||||
hostNetwork: true
|
||||
volumes:
|
||||
- name: cloud-config-volume
|
||||
secret:
|
||||
secretName: openstack-cloud-controller-manager
|
||||
@@ -1,192 +1,33 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
|
||||
secret:
|
||||
enabled: true
|
||||
create: false
|
||||
name: openstack-cloud-controller-manager
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: system:openstack-cloud-controller-manager
|
||||
rules:
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes/status
|
||||
verbs:
|
||||
- patch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services/status
|
||||
verbs:
|
||||
- patch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- serviceaccounts/token
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- persistentvolumes
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: system:openstack-cloud-controller-manager
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:openstack-cloud-controller-manager
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: openstack-cloud-controller-manager
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: system:openstack-cloud-controller-manager:extension-apiserver-authentication-reader
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: extension-apiserver-authentication-reader
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: openstack-cloud-controller-manager
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: openstack-cloud-controller-manager
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: control-plane
|
||||
k8s-app: openstack-cloud-controller-manager
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: openstack-cloud-controller-manager
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: control-plane
|
||||
k8s-app: openstack-cloud-controller-manager
|
||||
spec:
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
node.cloudprovider.kubernetes.io/platform: openstack
|
||||
tolerations:
|
||||
- key: "node.cloudprovider.kubernetes.io/uninitialized"
|
||||
value: "true"
|
||||
effect: "NoSchedule"
|
||||
- key: "node-role.kubernetes.io/control-plane"
|
||||
effect: NoSchedule
|
||||
securityContext:
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
runAsUser: 1001
|
||||
runAsNonRoot: true
|
||||
priorityClassName: system-cluster-critical
|
||||
hostNetwork: true
|
||||
serviceAccountName: openstack-cloud-controller-manager
|
||||
containers:
|
||||
- name: cloud-controller-manager
|
||||
image: k8scloudprovider/openstack-cloud-controller-manager:v1.25.3
|
||||
# image: ghcr.io/sergelogvinov/openstack-cloud-controller-manager:v1.17.0-465-gc47f994
|
||||
command:
|
||||
- /bin/openstack-cloud-controller-manager
|
||||
args:
|
||||
- --v=2
|
||||
- --cluster-name=$(CLUSTER_NAME)
|
||||
- --cloud-config=/etc/config/cloud.conf
|
||||
- --cloud-provider=openstack
|
||||
- --allocate-node-cidrs=false
|
||||
- --controllers=cloud-node-lifecycle
|
||||
- --leader-elect-resource-name=cloud-controller-manager-openstack
|
||||
- --use-service-account-credentials
|
||||
- --bind-address=127.0.0.1
|
||||
env:
|
||||
- name: CLUSTER_NAME
|
||||
value: kubernetes
|
||||
volumeMounts:
|
||||
- mountPath: /etc/config
|
||||
name: cloud-config
|
||||
readOnly: true
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
volumes:
|
||||
- name: cloud-config
|
||||
secret:
|
||||
secretName: openstack-cloud-controller-manager
|
||||
|
||||
enabledControllers:
|
||||
- cloud-node-lifecycle
|
||||
controllerExtraArgs: |-
|
||||
- --leader-elect-resource-name=openstack-cloud-controller-manager
|
||||
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
|
||||
podSecurityContext:
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
runAsUser: 1001
|
||||
runAsNonRoot: true
|
||||
|
||||
extraVolumes: []
|
||||
extraVolumeMounts: []
|
||||
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
node.cloudprovider.kubernetes.io/platform: openstack
|
||||
tolerations:
|
||||
- key: "node.cloudprovider.kubernetes.io/uninitialized"
|
||||
value: "true"
|
||||
effect: "NoSchedule"
|
||||
- key: "node-role.kubernetes.io/control-plane"
|
||||
effect: NoSchedule
|
||||
|
||||
@@ -1,145 +0,0 @@
|
||||
# This YAML defines all API objects to create RBAC roles for csi node plugin.
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-cinder-node-sa
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-nodeplugin-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch"]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-nodeplugin-binding
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-cinder-node-sa
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: csi-nodeplugin-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
# This YAML file contains driver-registrar & csi driver nodeplugin API objects,
|
||||
# which are necessary to run csi nodeplugin for cinder.
|
||||
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-cinder-nodeplugin
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: csi-cinder-nodeplugin
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: csi-cinder-nodeplugin
|
||||
spec:
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
node.cloudprovider.kubernetes.io/platform: openstack
|
||||
tolerations:
|
||||
- key: "node-role.kubernetes.io/control-plane"
|
||||
effect: NoSchedule
|
||||
serviceAccount: csi-cinder-node-sa
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: node-driver-registrar
|
||||
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.5.0
|
||||
args:
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /csi/csi.sock
|
||||
- name: DRIVER_REG_SOCK_PATH
|
||||
value: /var/lib/kubelet/plugins/cinder.csi.openstack.org/csi.sock
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: registration-dir
|
||||
mountPath: /registration
|
||||
- name: liveness-probe
|
||||
image: k8s.gcr.io/sig-storage/livenessprobe:v2.6.0
|
||||
args:
|
||||
- --csi-address=/csi/csi.sock
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: cinder-csi-plugin
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: docker.io/k8scloudprovider/cinder-csi-plugin:latest
|
||||
args:
|
||||
- /bin/cinder-csi-plugin
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
- "--cloud-config=$(CLOUD_CONFIG)"
|
||||
env:
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix://csi/csi.sock
|
||||
- name: CLOUD_CONFIG
|
||||
value: /etc/config/cloud.conf
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
ports:
|
||||
- containerPort: 9808
|
||||
name: healthz
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
failureThreshold: 5
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: healthz
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 3
|
||||
periodSeconds: 10
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: kubelet-dir
|
||||
mountPath: /var/lib/kubelet
|
||||
mountPropagation: "Bidirectional"
|
||||
- name: pods-probe-dir
|
||||
mountPath: /dev
|
||||
mountPropagation: "HostToContainer"
|
||||
- name: secret-cinderplugin
|
||||
mountPath: /etc/config
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: socket-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins/cinder.csi.openstack.org
|
||||
type: DirectoryOrCreate
|
||||
- name: registration-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins_registry/
|
||||
type: Directory
|
||||
- name: kubelet-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet
|
||||
type: Directory
|
||||
- name: pods-probe-dir
|
||||
hostPath:
|
||||
path: /dev
|
||||
type: Directory
|
||||
- name: secret-cinderplugin
|
||||
secret:
|
||||
secretName: openstack-cloud-controller-manager
|
||||
@@ -1,333 +1,32 @@
|
||||
# This YAML file contains RBAC API objects,
|
||||
# which are necessary to run csi controller plugin
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-cinder-controller-sa
|
||||
namespace: kube-system
|
||||
secret:
|
||||
enabled: true
|
||||
create: false
|
||||
name: openstack-cloud-controller-manager
|
||||
|
||||
---
|
||||
# external attacher
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-attacher-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "patch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["csinodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments"]
|
||||
verbs: ["get", "list", "watch", "patch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments/status"]
|
||||
verbs: ["patch"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||
storageClass:
|
||||
enabled: false
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-attacher-binding
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-cinder-controller-sa
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: csi-attacher-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
csi:
|
||||
plugin:
|
||||
# volumes: []
|
||||
# volumeMounts: []
|
||||
|
||||
---
|
||||
# external Provisioner
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-provisioner-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "create", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["csinodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["list", "watch", "create", "update", "patch"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshots"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotcontents"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-provisioner-binding
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-cinder-controller-sa
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: csi-provisioner-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
# external snapshotter
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-snapshotter-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["list", "watch", "create", "update", "patch"]
|
||||
# Secret permission is optional.
|
||||
# Enable it if your driver needs secret.
|
||||
# For example, `csi.storage.k8s.io/snapshotter-secret-name` is set in VolumeSnapshotClass.
|
||||
# See https://kubernetes-csi.github.io/docs/secrets-and-credentials.html for more details.
|
||||
# - apiGroups: [""]
|
||||
# resources: ["secrets"]
|
||||
# verbs: ["get", "list"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotcontents"]
|
||||
verbs: ["create", "get", "list", "watch", "update", "delete", "patch"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotcontents/status"]
|
||||
verbs: ["update", "patch"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-snapshotter-binding
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-cinder-controller-sa
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: csi-snapshotter-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
|
||||
# External Resizer
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-resizer-role
|
||||
rules:
|
||||
# The following rule should be uncommented for plugins that require secrets
|
||||
# for provisioning.
|
||||
# - apiGroups: [""]
|
||||
# resources: ["secrets"]
|
||||
# verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims/status"]
|
||||
verbs: ["patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["list", "watch", "create", "update", "patch"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-resizer-binding
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-cinder-controller-sa
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: csi-resizer-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
# This YAML file contains CSI Controller Plugin Sidecars
|
||||
# external-attacher, external-provisioner, external-snapshotter
|
||||
# external-resize, liveness-probe
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-cinder-controllerplugin
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
maxSurge: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: csi-cinder-controllerplugin
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: csi-cinder-controllerplugin
|
||||
spec:
|
||||
nodePlugin:
|
||||
nodeSelector:
|
||||
node.cloudprovider.kubernetes.io/platform: openstack
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
controllerPlugin:
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
maxSurge: 1
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
node.cloudprovider.kubernetes.io/platform: openstack
|
||||
tolerations:
|
||||
- key: "node-role.kubernetes.io/control-plane"
|
||||
effect: NoSchedule
|
||||
serviceAccount: csi-cinder-controller-sa
|
||||
containers:
|
||||
- name: csi-attacher
|
||||
image: k8s.gcr.io/sig-storage/csi-attacher:v3.4.0
|
||||
args:
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--timeout=3m"
|
||||
- "--leader-election=true"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/csi/sockets/pluginproxy/csi.sock
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/csi/sockets/pluginproxy/
|
||||
- name: csi-provisioner
|
||||
image: k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0
|
||||
args:
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--timeout=3m"
|
||||
- "--default-fstype=ext4"
|
||||
- "--feature-gates=Topology=true"
|
||||
- "--extra-create-metadata"
|
||||
- "--leader-election=true"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/csi/sockets/pluginproxy/csi.sock
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/csi/sockets/pluginproxy/
|
||||
- name: csi-snapshotter
|
||||
image: k8s.gcr.io/sig-storage/csi-snapshotter:v5.0.1
|
||||
args:
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--timeout=3m"
|
||||
- "--extra-create-metadata"
|
||||
- "--leader-election=true"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/csi/sockets/pluginproxy/csi.sock
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/csi/sockets/pluginproxy/
|
||||
name: socket-dir
|
||||
- name: csi-resizer
|
||||
image: k8s.gcr.io/sig-storage/csi-resizer:v1.4.0
|
||||
args:
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--timeout=3m"
|
||||
- "--handle-volume-inuse-error=false"
|
||||
- "--leader-election=true"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/csi/sockets/pluginproxy/csi.sock
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/csi/sockets/pluginproxy/
|
||||
- name: liveness-probe
|
||||
image: k8s.gcr.io/sig-storage/livenessprobe:v2.6.0
|
||||
args:
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/csi/sockets/pluginproxy/csi.sock
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/csi/sockets/pluginproxy/
|
||||
name: socket-dir
|
||||
- name: cinder-csi-plugin
|
||||
image: docker.io/k8scloudprovider/cinder-csi-plugin:latest
|
||||
args:
|
||||
- /bin/cinder-csi-plugin
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
- "--cloud-config=$(CLOUD_CONFIG)"
|
||||
- "--cluster=$(CLUSTER_NAME)"
|
||||
env:
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix://csi/csi.sock
|
||||
- name: CLOUD_CONFIG
|
||||
value: /etc/config/cloud.conf
|
||||
- name: CLUSTER_NAME
|
||||
value: kubernetes
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
ports:
|
||||
- containerPort: 9808
|
||||
name: healthz
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
failureThreshold: 5
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: healthz
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 10
|
||||
periodSeconds: 60
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: cloud-config
|
||||
mountPath: /etc/config
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: socket-dir
|
||||
emptyDir:
|
||||
- name: cloud-config
|
||||
secret:
|
||||
secretName: openstack-cloud-controller-manager
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: CSIDriver
|
||||
metadata:
|
||||
name: cinder.csi.openstack.org
|
||||
spec:
|
||||
attachRequired: true
|
||||
podInfoOnMount: true
|
||||
volumeLifecycleModes:
|
||||
- Persistent
|
||||
- Ephemeral
|
||||
|
||||
@@ -19,6 +19,10 @@ spec:
|
||||
node.cloudprovider.kubernetes.io/platform: openstack
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
securityContext:
|
||||
runAsUser: 0
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
containers:
|
||||
- name: statefulset-openstack
|
||||
image: ubuntu
|
||||
|
||||
@@ -10,7 +10,7 @@ regions = ["GRA7", "GRA9"]
|
||||
```
|
||||
|
||||
```shell
|
||||
wget https://github.com/siderolabs/talos/releases/download/v1.3.4/openstack-amd64.tar.gz
|
||||
wget https://github.com/siderolabs/talos/releases/download/v1.4.6/openstack-amd64.tar.gz
|
||||
tar -xzf openstack-amd64.tar.gz
|
||||
|
||||
terraform init && terraform apply -auto-approve
|
||||
|
||||
@@ -7,7 +7,7 @@ resource "openstack_images_image_v2" "talos" {
|
||||
disk_format = "raw"
|
||||
min_disk_gb = 5
|
||||
min_ram_mb = 1
|
||||
tags = ["talos-1.3.4"]
|
||||
tags = ["talos-1.4.6"]
|
||||
|
||||
properties = {
|
||||
hw_qemu_guest_agent = "no"
|
||||
|
||||
@@ -3,8 +3,8 @@ terraform {
|
||||
required_providers {
|
||||
openstack = {
|
||||
source = "terraform-provider-openstack/openstack"
|
||||
version = "~> 1.49.0"
|
||||
version = "~> 1.52.1"
|
||||
}
|
||||
}
|
||||
required_version = ">= 1.2"
|
||||
required_version = ">= 1.5"
|
||||
}
|
||||
|
||||
@@ -1,4 +1,24 @@
|
||||
|
||||
locals {
|
||||
controlplane_prefix = "controlplane"
|
||||
|
||||
controlplanes = { for k in flatten([
|
||||
for region in local.regions : [
|
||||
for inx in range(lookup(try(var.controlplane[region], {}), "count", 0)) : {
|
||||
name : "${local.controlplane_prefix}-${lower(region)}-${1 + inx}"
|
||||
region : region
|
||||
ip = cidrhost(local.network_public[region].cidr, 11 + inx)
|
||||
vip = cidrhost(local.network_public[region].cidr, 5)
|
||||
type : lookup(try(var.controlplane[region], {}), "type", "d2-2")
|
||||
}
|
||||
]
|
||||
]) : k.name => k }
|
||||
|
||||
controlplane_lbv4 = { for region in local.regions :
|
||||
region => cidrhost(local.network_public[region].cidr, 5) if lookup(try(var.controlplane[region], {}), "count", 0) != 0
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_servergroup_v2" "controlplane" {
|
||||
for_each = { for idx, name in local.regions : name => idx }
|
||||
region = each.key
|
||||
@@ -6,42 +26,107 @@ resource "openstack_compute_servergroup_v2" "controlplane" {
|
||||
policies = ["anti-affinity"]
|
||||
}
|
||||
|
||||
module "controlplane" {
|
||||
source = "./modules/controlplane"
|
||||
for_each = { for idx, name in local.regions : name => idx }
|
||||
region = each.key
|
||||
resource "openstack_networking_port_v2" "controlplane" {
|
||||
for_each = local.controlplanes
|
||||
region = each.value.region
|
||||
name = lower(each.value.name)
|
||||
network_id = local.network_public[each.value.region].network_id
|
||||
admin_state_up = true
|
||||
|
||||
instance_servergroup = openstack_compute_servergroup_v2.controlplane[each.key].id
|
||||
instance_count = lookup(try(var.controlplane[each.key], {}), "count", 0)
|
||||
instance_flavor = lookup(try(var.controlplane[each.key], {}), "type", "d2-2")
|
||||
instance_image = data.openstack_images_image_v2.talos[each.key].id
|
||||
instance_tags = concat(var.tags, ["infra"])
|
||||
instance_secgroups = [local.network_secgroup[each.key].common, local.network_secgroup[each.key].controlplane]
|
||||
instance_params = merge(var.kubernetes, {
|
||||
lbv4 = local.lbv4
|
||||
routes = "\n${join("\n", formatlist(" - network: %s", flatten([for zone in local.regions : local.network_subnets[zone]])))}"
|
||||
region = each.key
|
||||
auth = local.openstack_auth_url
|
||||
project_id = local.project_id
|
||||
project_domain_id = local.project_domain_id
|
||||
network_public_name = local.network_external[each.key].name
|
||||
port_security_enabled = false
|
||||
fixed_ip {
|
||||
subnet_id = local.network_public[each.value.region].subnet_id
|
||||
ip_address = each.value.ip
|
||||
}
|
||||
|
||||
occm = templatefile("${path.module}/deployments/openstack-cloud-controller-manager.conf.tpl", {
|
||||
username = var.ccm_username
|
||||
password = var.ccm_password
|
||||
region = each.key
|
||||
auth = local.openstack_auth_url
|
||||
project_id = local.project_id
|
||||
project_domain_id = local.project_domain_id
|
||||
network_public_name = local.network_external[each.key].name
|
||||
})
|
||||
})
|
||||
lifecycle {
|
||||
ignore_changes = [port_security_enabled]
|
||||
}
|
||||
}
|
||||
|
||||
network_internal = local.network_public[each.key]
|
||||
network_external = local.network_external[each.key]
|
||||
resource "openstack_networking_port_v2" "controlplane_public" {
|
||||
for_each = local.controlplanes
|
||||
region = each.value.region
|
||||
name = lower(each.value.name)
|
||||
network_id = local.network_external[each.value.region].id
|
||||
admin_state_up = true
|
||||
security_group_ids = [local.network_secgroup[each.value.region].common, local.network_secgroup[each.value.region].controlplane]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "controlplane" {
|
||||
for_each = local.controlplanes
|
||||
region = each.value.region
|
||||
name = each.value.name
|
||||
flavor_name = each.value.type
|
||||
tags = concat(var.tags, ["infra"])
|
||||
image_id = data.openstack_images_image_v2.talos[each.value.region].id
|
||||
|
||||
scheduler_hints {
|
||||
group = openstack_compute_servergroup_v2.controlplane[each.value.region].id
|
||||
}
|
||||
|
||||
stop_before_destroy = true
|
||||
|
||||
network {
|
||||
port = openstack_networking_port_v2.controlplane_public[each.key].id
|
||||
}
|
||||
network {
|
||||
port = openstack_networking_port_v2.controlplane[each.key].id
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [flavor_name, image_id, scheduler_hints, user_data]
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
lbv4s = compact([for c in module.controlplane : c.controlplane_lb])
|
||||
endpoint = [for ip in try(flatten([for c in module.controlplane : c.controlplane_endpoints]), []) : ip if length(split(".", ip)) > 1]
|
||||
ips = flatten([for k, v in openstack_networking_port_v2.controlplane : v.all_fixed_ips])
|
||||
endpoint = flatten([for k, v in openstack_networking_port_v2.controlplane_public : v.all_fixed_ips])
|
||||
}
|
||||
|
||||
resource "local_sensitive_file" "controlplane" {
|
||||
for_each = local.controlplanes
|
||||
|
||||
content = templatefile("${path.module}/templates/controlplane.yaml.tpl",
|
||||
merge(var.kubernetes, {
|
||||
name = each.value.name
|
||||
labels = "topology.kubernetes.io/region=${each.value.region}"
|
||||
certSANs = flatten([
|
||||
var.kubernetes["apiDomain"],
|
||||
])
|
||||
|
||||
routes = "\n${join("\n", formatlist(" - network: %s", flatten([for zone in local.regions : local.network_subnets[zone]])))}"
|
||||
ipv4_local = each.value.ip
|
||||
ipv4_local_vip = each.value.vip
|
||||
ipv4 = one([for ip in openstack_networking_port_v2.controlplane_public[each.key].all_fixed_ips : ip if length(split(".", ip)) > 1])
|
||||
ipv6 = one([for ip in openstack_networking_port_v2.controlplane_public[each.key].all_fixed_ips : ip if length(split(":", ip)) > 1])
|
||||
nodeSubnets = split(",", local.network_public[each.value.region].cidr)
|
||||
|
||||
occm = templatefile("${path.module}/templates/openstack-cloud-controller-manager.conf.tpl", {
|
||||
username = var.ccm_username
|
||||
password = var.ccm_password
|
||||
region = each.value.region
|
||||
auth = local.openstack_auth_url
|
||||
project_id = local.project_id
|
||||
project_domain_id = local.project_domain_id
|
||||
network_public_name = local.network_external[each.value.region].name
|
||||
})
|
||||
})
|
||||
)
|
||||
filename = "_cfgs/${each.value.name}.yaml"
|
||||
file_permission = "0600"
|
||||
}
|
||||
|
||||
locals {
|
||||
bootstrap = [for k, v in local.controlplanes : "talosctl apply-config --insecure --nodes ${
|
||||
one([for ip in openstack_networking_port_v2.controlplane_public[k].all_fixed_ips : ip if length(split(".", ip)) > 1])
|
||||
} --config-patch @${local_sensitive_file.controlplane[k].filename} --file _cfgs/controlplane.yaml"]
|
||||
}
|
||||
|
||||
output "bootstrap" {
|
||||
value = local.bootstrap
|
||||
}
|
||||
|
||||
# locals {
|
||||
# lbv4s = compact([for c in module.controlplane : c.controlplane_lb])
|
||||
# }
|
||||
|
||||
@@ -6,25 +6,103 @@ resource "openstack_compute_servergroup_v2" "web" {
|
||||
policies = ["soft-anti-affinity"]
|
||||
}
|
||||
|
||||
module "web" {
|
||||
source = "./modules/worker"
|
||||
for_each = { for idx, name in local.regions : name => idx }
|
||||
region = each.key
|
||||
locals {
|
||||
web_prefix = "web"
|
||||
|
||||
instance_servergroup = openstack_compute_servergroup_v2.web[each.key].id
|
||||
instance_count = lookup(try(var.instances[each.key], {}), "web_count", 0)
|
||||
instance_name = "web"
|
||||
instance_flavor = lookup(try(var.instances[each.key], {}), "web_instance_type", 0)
|
||||
instance_image = data.openstack_images_image_v2.talos[each.key].id
|
||||
instance_tags = concat(var.tags, ["web"])
|
||||
instance_secgroups = [local.network_secgroup[each.key].common, local.network_secgroup[each.key].web]
|
||||
instance_params = merge(var.kubernetes, {
|
||||
ipv4_local_network = local.network[each.key].cidr
|
||||
ipv4_local_gw = local.network_public[each.key].gateway
|
||||
lbv4 = module.controlplane[each.key].controlplane_lb != "" ? module.controlplane[each.key].controlplane_lb : one(local.lbv4s)
|
||||
routes = "${join("\n ", formatlist("- network: %s", flatten([for zone in local.regions : local.network_subnets[zone]])))}"
|
||||
})
|
||||
|
||||
network_internal = local.network_public[each.key]
|
||||
network_external = local.network_external[each.key]
|
||||
web = { for k in flatten([
|
||||
for region in local.regions : [
|
||||
for inx in range(lookup(try(var.instances[region], {}), "web_count", 0)) : {
|
||||
name : "${local.web_prefix}-${lower(region)}-${1 + inx}"
|
||||
region : region
|
||||
ip = cidrhost(local.network_public[region].cidr, 21 + inx)
|
||||
cidr = local.network_public[region].cidr
|
||||
lbv4 = try(local.controlplane_lbv4[region], one([for ip in local.controlplane_lbv4 : ip]))
|
||||
type : lookup(try(var.instances[region], {}), "web_type", "d2-2")
|
||||
}
|
||||
]
|
||||
]) : k.name => k }
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "web" {
|
||||
for_each = local.web
|
||||
region = each.value.region
|
||||
name = lower(each.value.name)
|
||||
network_id = local.network_public[each.value.region].network_id
|
||||
admin_state_up = true
|
||||
|
||||
port_security_enabled = false
|
||||
fixed_ip {
|
||||
subnet_id = local.network_public[each.value.region].subnet_id
|
||||
ip_address = each.value.ip
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [port_security_enabled]
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "web_public" {
|
||||
for_each = local.web
|
||||
region = each.value.region
|
||||
name = lower(each.value.name)
|
||||
admin_state_up = true
|
||||
network_id = local.network_external[each.value.region].id
|
||||
security_group_ids = [local.network_secgroup[each.value.region].common, local.network_secgroup[each.value.region].web]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "web" {
|
||||
for_each = local.web
|
||||
region = each.value.region
|
||||
name = each.value.name
|
||||
flavor_name = each.value.type
|
||||
tags = concat(var.tags, ["web"])
|
||||
image_id = data.openstack_images_image_v2.talos[each.value.region].id
|
||||
|
||||
scheduler_hints {
|
||||
group = openstack_compute_servergroup_v2.web[each.value.region].id
|
||||
}
|
||||
network {
|
||||
port = openstack_networking_port_v2.web_public[each.key].id
|
||||
}
|
||||
network {
|
||||
port = openstack_networking_port_v2.web[each.key].id
|
||||
}
|
||||
|
||||
user_data = templatefile("${path.module}/templates/worker.yaml.tpl",
|
||||
merge(var.kubernetes, {
|
||||
name = each.value.name
|
||||
labels = "topology.kubernetes.io/region=${each.value.region},project.io/node-pool=web"
|
||||
iface = "eth1"
|
||||
nodeSubnets = each.value.cidr
|
||||
lbv4 = each.value.lbv4
|
||||
routes = "\n${join("\n", formatlist(" - network: %s", flatten([for zone in local.regions : local.network_subnets[zone]])))}"
|
||||
})
|
||||
)
|
||||
|
||||
stop_before_destroy = true
|
||||
lifecycle {
|
||||
ignore_changes = [flavor_name, image_id, scheduler_hints, user_data]
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
web_endpoint = flatten([for k, v in openstack_networking_port_v2.web_public : v.all_fixed_ips])
|
||||
}
|
||||
|
||||
resource "local_file" "worker" {
|
||||
for_each = local.web
|
||||
|
||||
content = templatefile("${path.module}/templates/worker.yaml.tpl",
|
||||
merge(var.kubernetes, {
|
||||
name = each.value.name
|
||||
labels = "topology.kubernetes.io/region=${each.value.region},project.io/node-pool=web"
|
||||
iface = "eth1"
|
||||
nodeSubnets = each.value.cidr
|
||||
lbv4 = each.value.lbv4
|
||||
routes = "\n${join("\n", formatlist(" - network: %s", flatten([for zone in local.regions : local.network_subnets[zone]])))}"
|
||||
})
|
||||
)
|
||||
|
||||
filename = "_cfgs/${each.value.name}.yaml"
|
||||
file_permission = "0600"
|
||||
}
|
||||
|
||||
@@ -1,26 +1,89 @@
|
||||
|
||||
module "worker" {
|
||||
source = "./modules/worker"
|
||||
resource "openstack_compute_servergroup_v2" "worker" {
|
||||
for_each = { for idx, name in local.regions : name => idx }
|
||||
region = each.key
|
||||
name = "worker"
|
||||
policies = ["soft-anti-affinity"]
|
||||
}
|
||||
|
||||
instance_count = lookup(try(var.instances[each.key], {}), "worker_count", 0)
|
||||
instance_name = "worker"
|
||||
instance_flavor = lookup(try(var.instances[each.key], {}), "worker_instance_type", 0)
|
||||
instance_image = data.openstack_images_image_v2.talos[each.key].id
|
||||
instance_tags = concat(var.tags, ["worker"])
|
||||
instance_secgroups = [local.network_secgroup[each.key].common]
|
||||
instance_params = merge(var.kubernetes, {
|
||||
ipv4_local_network = local.network[each.key].cidr
|
||||
ipv4_local_gw = local.network_private[each.key].gateway
|
||||
lbv4 = module.controlplane[each.key].controlplane_lb != "" ? module.controlplane[each.key].controlplane_lb : one(local.lbv4s)
|
||||
routes = "${join("\n ", formatlist("- network: %s", flatten([for zone in local.regions : local.network_subnets[zone]])))}"
|
||||
})
|
||||
locals {
|
||||
worker_prefix = "worker"
|
||||
|
||||
network_internal = local.network_private[each.key]
|
||||
network_external = {
|
||||
id = local.network_external[each.key].id
|
||||
subnet = local.network_external[each.key].subnets_v6[0]
|
||||
mtu = local.network_external[each.key].mtu
|
||||
worker = { for k in flatten([
|
||||
for region in local.regions : [
|
||||
for inx in range(lookup(try(var.instances[region], {}), "worker_count", 0)) : {
|
||||
name : "${local.worker_prefix}-${lower(region)}-${1 + inx}"
|
||||
region : region
|
||||
ip = cidrhost(local.network_private[region].cidr, 21 + inx)
|
||||
cidr = local.network_private[region].cidr
|
||||
lbv4 = try(local.controlplane_lbv4[region], one([for ip in local.controlplane_lbv4 : ip]))
|
||||
type : lookup(try(var.instances[region], {}), "worker_type", "d2-2")
|
||||
}
|
||||
]
|
||||
]) : k.name => k }
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "worker" {
|
||||
for_each = local.worker
|
||||
region = each.value.region
|
||||
name = lower(each.value.name)
|
||||
network_id = local.network_private[each.value.region].network_id
|
||||
admin_state_up = true
|
||||
|
||||
port_security_enabled = false
|
||||
fixed_ip {
|
||||
subnet_id = local.network_private[each.value.region].subnet_id
|
||||
ip_address = each.value.ip
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [port_security_enabled]
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "worker_public" {
|
||||
for_each = local.worker
|
||||
region = each.value.region
|
||||
name = lower(each.value.name)
|
||||
admin_state_up = true
|
||||
network_id = local.network_external[each.value.region].id
|
||||
fixed_ip {
|
||||
subnet_id = one(local.network_external[each.value.region].subnets_v6)
|
||||
}
|
||||
security_group_ids = [local.network_secgroup[each.value.region].common]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "worker" {
|
||||
for_each = local.worker
|
||||
region = each.value.region
|
||||
name = each.value.name
|
||||
flavor_name = each.value.type
|
||||
tags = concat(var.tags, ["worker"])
|
||||
image_id = data.openstack_images_image_v2.talos[each.value.region].id
|
||||
|
||||
scheduler_hints {
|
||||
group = openstack_compute_servergroup_v2.worker[each.value.region].id
|
||||
}
|
||||
network {
|
||||
port = openstack_networking_port_v2.worker_public[each.key].id
|
||||
}
|
||||
network {
|
||||
port = openstack_networking_port_v2.worker[each.key].id
|
||||
}
|
||||
|
||||
user_data = templatefile("${path.module}/templates/worker.yaml.tpl",
|
||||
merge(var.kubernetes, {
|
||||
name = each.value.name
|
||||
labels = "topology.kubernetes.io/region=${each.value.region},project.io/node-pool=worker"
|
||||
iface = "eth1"
|
||||
nodeSubnets = each.value.cidr
|
||||
lbv4 = each.value.lbv4
|
||||
routes = "\n${join("\n", formatlist(" - network: %s", flatten([for zone in local.regions : local.network_subnets[zone]])))}"
|
||||
})
|
||||
)
|
||||
|
||||
stop_before_destroy = true
|
||||
lifecycle {
|
||||
ignore_changes = [flavor_name, image_id, scheduler_hints, user_data]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,82 +0,0 @@
|
||||
|
||||
resource "openstack_networking_port_v2" "controlplane" {
|
||||
count = var.instance_count
|
||||
region = var.region
|
||||
name = "controlplane-${lower(var.region)}-${count.index + 1}"
|
||||
network_id = var.network_internal.network_id
|
||||
admin_state_up = true
|
||||
|
||||
port_security_enabled = false
|
||||
fixed_ip {
|
||||
subnet_id = var.network_internal.subnet_id
|
||||
ip_address = cidrhost(var.network_internal.cidr, var.instance_ip_start + count.index)
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [port_security_enabled]
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "controlplane_public" {
|
||||
count = var.instance_count
|
||||
region = var.region
|
||||
name = "controlplane-${lower(var.region)}-${count.index + 1}"
|
||||
network_id = var.network_external.id
|
||||
admin_state_up = true
|
||||
security_group_ids = var.instance_secgroups
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "controlplane" {
|
||||
count = var.instance_count
|
||||
region = var.region
|
||||
name = "controlplane-${lower(var.region)}-${count.index + 1}"
|
||||
flavor_name = var.instance_flavor
|
||||
tags = var.instance_tags
|
||||
image_id = var.instance_image
|
||||
|
||||
scheduler_hints {
|
||||
group = var.instance_servergroup
|
||||
}
|
||||
|
||||
stop_before_destroy = true
|
||||
|
||||
network {
|
||||
port = openstack_networking_port_v2.controlplane_public[count.index].id
|
||||
}
|
||||
network {
|
||||
port = openstack_networking_port_v2.controlplane[count.index].id
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [flavor_name, image_id, scheduler_hints, user_data]
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
ipv4_local = var.instance_count > 0 ? [for ip in try(openstack_networking_port_v2.controlplane_public[0].all_fixed_ips, []) : ip if length(split(".", ip)) > 1][0] : ""
|
||||
ipv4_local_vip = var.instance_count > 0 ? cidrhost(var.network_internal.cidr, 5) : ""
|
||||
|
||||
controlplane_labels = "topology.kubernetes.io/region=${var.region}"
|
||||
}
|
||||
|
||||
resource "local_file" "controlplane" {
|
||||
count = var.instance_count
|
||||
|
||||
content = templatefile("${path.module}/../../templates/controlplane.yaml",
|
||||
merge(var.instance_params, {
|
||||
name = "controlplane-${lower(var.region)}-${count.index + 1}"
|
||||
type = "controlplane"
|
||||
labels = local.controlplane_labels
|
||||
|
||||
ipv4_local = [for k in openstack_networking_port_v2.controlplane[count.index].all_fixed_ips : k if length(regexall("[0-9]+.[0-9.]+", k)) > 0][0]
|
||||
ipv4_local_vip = local.ipv4_local_vip
|
||||
|
||||
ipv4 = [for k in openstack_networking_port_v2.controlplane_public[count.index].all_fixed_ips : k if length(regexall("[0-9]+.[0-9.]+", k)) > 0][0]
|
||||
ipv6 = [for k in openstack_networking_port_v2.controlplane_public[count.index].all_fixed_ips : k if length(regexall("[0-9a-z]+:[0-9a-z:]+", k)) > 0][0]
|
||||
|
||||
nodeSubnets = split(",", var.network_internal.cidr)
|
||||
})
|
||||
)
|
||||
filename = "_cfgs/controlplane-${lower(var.region)}-${count.index + 1}.yaml"
|
||||
file_permission = "0600"
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
|
||||
output "controlplane_lb" {
|
||||
description = "Kubernetes controlplane local loadbalancer ip"
|
||||
value = local.ipv4_local_vip
|
||||
}
|
||||
|
||||
output "controlplane_endpoints" {
|
||||
description = "Kubernetes controlplane endpoint"
|
||||
value = flatten([for ip in try(openstack_networking_port_v2.controlplane_public[*].all_fixed_ips, []) : ip])
|
||||
depends_on = [openstack_networking_port_v2.controlplane_public]
|
||||
}
|
||||
|
||||
output "controlplane_bootstrap" {
|
||||
description = "Kubernetes controlplane bootstrap command"
|
||||
value = local.ipv4_local == "" ? "" : "talosctl apply-config --insecure --nodes ${local.ipv4_local} --file _cfgs/controlplane-${lower(var.region)}-1.yaml"
|
||||
depends_on = [openstack_networking_port_v2.controlplane_public]
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
|
||||
variable "region" {
|
||||
description = "Region"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "network_internal" {
|
||||
description = "Internal network"
|
||||
}
|
||||
|
||||
variable "network_external" {
|
||||
description = "External network"
|
||||
}
|
||||
|
||||
variable "instance_servergroup" {
|
||||
description = "Server Group"
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "instance_count" {
|
||||
description = "Instances in region"
|
||||
type = number
|
||||
}
|
||||
|
||||
variable "instance_flavor" {
|
||||
description = "Instance type"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_image" {
|
||||
description = "Instance image"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_tags" {
|
||||
description = "Instance tags"
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "instance_secgroups" {
|
||||
description = "Instance network security groups"
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "instance_params" {
|
||||
description = "Instance template parameters"
|
||||
type = map(string)
|
||||
}
|
||||
|
||||
variable "instance_ip_start" {
|
||||
description = "Instances in region"
|
||||
type = number
|
||||
default = 11
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
|
||||
terraform {
|
||||
required_providers {
|
||||
openstack = {
|
||||
source = "terraform-provider-openstack/openstack"
|
||||
version = "~> 1.49.0"
|
||||
}
|
||||
}
|
||||
required_version = ">= 1.2"
|
||||
}
|
||||
@@ -1,89 +0,0 @@
|
||||
|
||||
resource "openstack_networking_port_v2" "worker" {
|
||||
count = var.instance_count
|
||||
region = var.region
|
||||
name = "${var.instance_name}-${lower(var.region)}-${count.index + 1}"
|
||||
network_id = var.network_internal.network_id
|
||||
admin_state_up = true
|
||||
|
||||
# port_security_enabled = len(var.instance_secgroups) > 0
|
||||
# security_group_ids = var.instance_secgroups
|
||||
|
||||
fixed_ip {
|
||||
subnet_id = var.network_internal.subnet_id
|
||||
ip_address = cidrhost(var.network_internal.cidr, var.instance_ip_start + count.index)
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "worker_public" {
|
||||
count = length(try(var.network_external, {})) == 0 ? 0 : var.instance_count
|
||||
region = var.region
|
||||
name = "${var.instance_name}-${lower(var.region)}-${count.index + 1}"
|
||||
network_id = var.network_external.id
|
||||
admin_state_up = true
|
||||
security_group_ids = var.instance_secgroups
|
||||
|
||||
dynamic "fixed_ip" {
|
||||
for_each = try([var.network_external.subnet], [])
|
||||
content {
|
||||
subnet_id = fixed_ip.value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
worker_labels = "topology.kubernetes.io/region=${var.region},project.io/node-pool=${var.instance_name}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "worker" {
|
||||
count = var.instance_count
|
||||
region = var.region
|
||||
name = "${var.instance_name}-${lower(var.region)}-${count.index + 1}"
|
||||
flavor_name = var.instance_flavor
|
||||
# tags = var.instance_tags
|
||||
image_id = var.instance_image
|
||||
|
||||
scheduler_hints {
|
||||
group = var.instance_servergroup
|
||||
}
|
||||
|
||||
stop_before_destroy = true
|
||||
|
||||
user_data = templatefile("${path.module}/../../templates/worker.yaml.tpl",
|
||||
merge(var.instance_params, {
|
||||
name = "${var.instance_name}-${lower(var.region)}-${count.index + 1}"
|
||||
labels = local.worker_labels
|
||||
iface = length(try(var.network_external, {})) == 0 ? "eth0" : "eth1"
|
||||
nodeSubnets = var.network_internal.cidr
|
||||
})
|
||||
)
|
||||
|
||||
dynamic "network" {
|
||||
for_each = try([openstack_networking_port_v2.worker_public[count.index]], [])
|
||||
content {
|
||||
port = network.value.id
|
||||
}
|
||||
}
|
||||
network {
|
||||
port = openstack_networking_port_v2.worker[count.index].id
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [flavor_name, image_id, user_data]
|
||||
}
|
||||
}
|
||||
|
||||
resource "local_file" "worker" {
|
||||
count = var.instance_count
|
||||
|
||||
content = templatefile("${path.module}/../../templates/worker.yaml.tpl",
|
||||
merge(var.instance_params, {
|
||||
name = "${var.instance_name}-${lower(var.region)}-${count.index + 1}"
|
||||
labels = local.worker_labels
|
||||
iface = length(try(var.network_external, {})) == 0 ? "eth0" : "eth1"
|
||||
nodeSubnets = var.network_internal.cidr
|
||||
})
|
||||
)
|
||||
filename = "_cfgs/${var.instance_name}-${lower(var.region)}-${count.index + 1}.yaml"
|
||||
file_permission = "0600"
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
|
||||
output "worker_endpoints" {
|
||||
description = "Kubernetes worker endpoint"
|
||||
value = flatten([for ip in try(openstack_networking_port_v2.worker_public[*].all_fixed_ips, []) : ip])
|
||||
}
|
||||
@@ -1,64 +0,0 @@
|
||||
|
||||
variable "region" {
|
||||
description = "Region"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "network_internal" {
|
||||
description = "Internal network"
|
||||
}
|
||||
|
||||
variable "network_external" {
|
||||
description = "External network"
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "instance_servergroup" {
|
||||
description = "Server Group"
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "instance_count" {
|
||||
description = "Instances in region"
|
||||
type = number
|
||||
}
|
||||
|
||||
variable "instance_name" {
|
||||
description = "Instance name prefix"
|
||||
type = string
|
||||
default = "worker"
|
||||
}
|
||||
|
||||
variable "instance_flavor" {
|
||||
description = "Instance type"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_image" {
|
||||
description = "Instance image"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "instance_tags" {
|
||||
description = "Instance tags"
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "instance_secgroups" {
|
||||
description = "Instance network security groups"
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "instance_params" {
|
||||
description = "Instance template parameters"
|
||||
type = map(string)
|
||||
}
|
||||
|
||||
variable "instance_ip_start" {
|
||||
description = "Instances in region"
|
||||
type = number
|
||||
default = 21
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
|
||||
terraform {
|
||||
required_providers {
|
||||
openstack = {
|
||||
source = "terraform-provider-openstack/openstack"
|
||||
version = "~> 1.49.0"
|
||||
}
|
||||
}
|
||||
required_version = ">= 1.2"
|
||||
}
|
||||
@@ -1,18 +1,14 @@
|
||||
|
||||
locals {
|
||||
lbv4 = "1.1.1.1"
|
||||
resource "openstack_networking_port_v2" "vip" {
|
||||
for_each = { for idx, name in local.regions : name => idx }
|
||||
region = each.key
|
||||
name = "controlplane-${lower(each.key)}-lb"
|
||||
network_id = local.network_public[each.key].network_id
|
||||
admin_state_up = true
|
||||
|
||||
port_security_enabled = false
|
||||
fixed_ip {
|
||||
subnet_id = local.network_public[each.key].subnet_id
|
||||
ip_address = cidrhost(local.network_public[each.key].cidr, 5)
|
||||
}
|
||||
}
|
||||
|
||||
# resource "openstack_networking_port_v2" "vip" {
|
||||
# for_each = { for idx, name in local.regions : name => idx }
|
||||
# region = each.key
|
||||
# name = "controlplane-${lower(each.key)}-lb"
|
||||
# network_id = local.network[each.key].id
|
||||
# admin_state_up = true
|
||||
# port_security_enabled = false
|
||||
|
||||
# fixed_ip {
|
||||
# subnet_id = local.network_public[each.key].id
|
||||
# ip_address = cidrhost(local.network_public[each.key].cidr, 10)
|
||||
# }
|
||||
# }
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
|
||||
data "openstack_networking_subnet_v2" "controlplane_public" {
|
||||
for_each = { for idx, name in local.regions : name => idx }
|
||||
region = each.key
|
||||
network_id = local.network_external[each.key].id
|
||||
ip_version = 6
|
||||
}
|
||||
|
||||
# resource "openstack_networking_router_v2" "gw" {
|
||||
# count = length(var.regions)
|
||||
# region = element(var.regions, count.index)
|
||||
# name = "private"
|
||||
# admin_state_up = true
|
||||
# external_network_id = data.openstack_networking_network_v2.external[count.index].id
|
||||
# }
|
||||
|
||||
# resource "openstack_networking_port_v2" "gw" {
|
||||
# count = length(var.regions)
|
||||
# region = element(var.regions, count.index)
|
||||
# name = "gw"
|
||||
# network_id = data.openstack_networking_network_v2.main[count.index].id
|
||||
# admin_state_up = "true"
|
||||
# fixed_ip {
|
||||
# subnet_id = openstack_networking_subnet_v2.private[count.index].id
|
||||
# ip_address = cidrhost(openstack_networking_subnet_v2.private[count.index].cidr, 1)
|
||||
# }
|
||||
# }
|
||||
|
||||
# resource "openstack_networking_router_interface_v2" "private" {
|
||||
# count = length(var.regions)
|
||||
# region = element(var.regions, count.index)
|
||||
# router_id = openstack_networking_router_v2.gw[count.index].id
|
||||
# port_id = openstack_networking_port_v2.gw[count.index].id
|
||||
# }
|
||||
@@ -1,15 +1,15 @@
|
||||
|
||||
output "controlplane_endpoint" {
|
||||
description = "Kubernetes controlplane endpoint"
|
||||
value = module.controlplane
|
||||
value = one([for ip in local.ips : ip if length(split(".", ip)) > 1])
|
||||
}
|
||||
|
||||
output "controlplane_endpoint_public" {
|
||||
description = "Kubernetes controlplane endpoint public"
|
||||
value = try(local.endpoint[0], "127.0.0.1")
|
||||
value = one([for ip in local.endpoint : ip if length(split(".", ip)) > 1])
|
||||
}
|
||||
|
||||
output "web_endpoint" {
|
||||
description = "Kubernetes controlplane endpoint"
|
||||
value = module.web
|
||||
description = "Kubernetes web endpoint"
|
||||
value = local.web_endpoint
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ resource "openstack_compute_keypair_v2" "keypair" {
|
||||
for_each = { for idx, name in var.regions : name => idx }
|
||||
region = each.key
|
||||
name = "Terraform"
|
||||
public_key = file("~/.ssh/id_rsa.pub")
|
||||
public_key = file("~/.ssh/terraform.pub")
|
||||
}
|
||||
|
||||
data "openstack_images_image_v2" "debian" {
|
||||
|
||||
@@ -66,6 +66,8 @@ resource "openstack_networking_port_v2" "router" {
|
||||
name = "router-${lower(each.key)}-${openstack_networking_subnet_v2.private[each.key].name}"
|
||||
network_id = local.network_id[each.key].id
|
||||
admin_state_up = "true"
|
||||
|
||||
port_security_enabled = false
|
||||
fixed_ip {
|
||||
subnet_id = openstack_networking_subnet_v2.private[each.key].id
|
||||
ip_address = cidrhost(openstack_networking_subnet_v2.private[each.key].cidr, try(var.capabilities[each.key].gateway, false) && data.openstack_networking_quota_v2.quota[each.key].router > 0 ? 2 : 1)
|
||||
|
||||
@@ -129,6 +129,17 @@ resource "openstack_networking_secgroup_rule_v2" "controlplane_talos_admins" {
|
||||
remote_ip_prefix = var.whitelist_admins[0]
|
||||
}
|
||||
|
||||
# resource "openstack_networking_secgroup_rule_v2" "controlplane_talos_admins_ipv6" {
|
||||
# for_each = { for idx, name in var.regions : name => idx }
|
||||
# region = each.key
|
||||
# security_group_id = openstack_networking_secgroup_v2.controlplane[each.key].id
|
||||
# direction = "ingress"
|
||||
# ethertype = "IPv6"
|
||||
# protocol = "tcp"
|
||||
# port_range_min = 50000
|
||||
# port_range_max = 50000
|
||||
# }
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "controlplane_etcd_ipv4" {
|
||||
for_each = { for idx, name in var.regions : name => idx }
|
||||
region = each.key
|
||||
@@ -208,6 +219,17 @@ resource "openstack_networking_secgroup_rule_v2" "web_https_v4" {
|
||||
port_range_max = 443
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "web_https_v6" {
|
||||
for_each = { for idx, name in var.regions : name => idx }
|
||||
region = each.key
|
||||
security_group_id = openstack_networking_secgroup_v2.web[each.key].id
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "tcp"
|
||||
port_range_min = 443
|
||||
port_range_max = 443
|
||||
}
|
||||
|
||||
###
|
||||
|
||||
resource "openstack_networking_secgroup_v2" "router" {
|
||||
|
||||
@@ -4,6 +4,10 @@ output "regions" {
|
||||
value = var.regions
|
||||
}
|
||||
|
||||
output "peers" {
|
||||
value = { for idx, name in var.regions : name => openstack_networking_port_v2.router_external[name].all_fixed_ips if try(var.capabilities[name].peering, false) }
|
||||
}
|
||||
|
||||
output "network" {
|
||||
value = { for zone, network in local.network_id : zone => {
|
||||
name = var.network_name
|
||||
|
||||
@@ -3,8 +3,8 @@ terraform {
|
||||
required_providers {
|
||||
openstack = {
|
||||
source = "terraform-provider-openstack/openstack"
|
||||
version = "~> 1.49.0"
|
||||
version = "~> 1.52.1"
|
||||
}
|
||||
}
|
||||
required_version = ">= 1.2"
|
||||
required_version = ">= 1.5"
|
||||
}
|
||||
|
||||
@@ -1,32 +1,14 @@
|
||||
version: v1alpha1
|
||||
debug: false
|
||||
persist: true
|
||||
machine:
|
||||
type: ${type}
|
||||
certSANs:
|
||||
- "${lbv4}"
|
||||
- "${ipv4}"
|
||||
- "${ipv6}"
|
||||
- "${ipv4_local}"
|
||||
- "${ipv4_local_vip}"
|
||||
- "${apiDomain}"
|
||||
features:
|
||||
kubernetesTalosAPIAccess:
|
||||
enabled: true
|
||||
allowedRoles:
|
||||
- os:reader
|
||||
allowedKubernetesNamespaces:
|
||||
- kube-system
|
||||
certSANs: ${format("%#v",certSANs)}
|
||||
kubelet:
|
||||
extraArgs:
|
||||
node-ip: "${ipv4_local}"
|
||||
rotate-server-certificates: true
|
||||
node-labels: "${labels}"
|
||||
rotate-server-certificates: true
|
||||
clusterDNS:
|
||||
- 169.254.2.53
|
||||
- ${cidrhost(split(",",serviceSubnets)[0], 10)}
|
||||
nodeIP:
|
||||
validSubnets: ${format("%#v",nodeSubnets)}
|
||||
validSubnets: ["${ipv4_local}/32"]
|
||||
network:
|
||||
hostname: "${name}"
|
||||
interfaces:
|
||||
@@ -40,36 +22,42 @@ machine:
|
||||
addresses:
|
||||
- 169.254.2.53/32
|
||||
extraHostEntries:
|
||||
- ip: ${ipv4_local_vip}
|
||||
- ip: 127.0.0.1
|
||||
aliases:
|
||||
- ${apiDomain}
|
||||
install:
|
||||
wipe: false
|
||||
sysctls:
|
||||
net.core.somaxconn: 65535
|
||||
net.core.netdev_max_backlog: 4096
|
||||
systemDiskEncryption:
|
||||
state:
|
||||
provider: luks2
|
||||
options:
|
||||
- no_read_workqueue
|
||||
- no_write_workqueue
|
||||
keys:
|
||||
- nodeID: {}
|
||||
slot: 0
|
||||
ephemeral:
|
||||
provider: luks2
|
||||
keys:
|
||||
- nodeID: {}
|
||||
slot: 0
|
||||
options:
|
||||
- no_read_workqueue
|
||||
- no_write_workqueue
|
||||
keys:
|
||||
- nodeID: {}
|
||||
slot: 0
|
||||
features:
|
||||
kubernetesTalosAPIAccess:
|
||||
enabled: true
|
||||
allowedRoles:
|
||||
- os:reader
|
||||
allowedKubernetesNamespaces:
|
||||
- kube-system
|
||||
cluster:
|
||||
id: ${clusterID}
|
||||
secret: ${clusterSecret}
|
||||
adminKubeconfig:
|
||||
certLifetime: 16h0m0s
|
||||
controlPlane:
|
||||
endpoint: https://${apiDomain}:6443
|
||||
clusterName: ${clusterName}
|
||||
discovery:
|
||||
enabled: true
|
||||
network:
|
||||
dnsDomain: ${domain}
|
||||
podSubnets: ${format("%#v",split(",",podSubnets))}
|
||||
@@ -77,38 +65,11 @@ cluster:
|
||||
cni:
|
||||
name: custom
|
||||
urls:
|
||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/openstack/deployments/cilium-result.yaml
|
||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/cilium-result.yaml
|
||||
proxy:
|
||||
disabled: true
|
||||
apiServer:
|
||||
certSANs:
|
||||
- "${lbv4}"
|
||||
- "${ipv4}"
|
||||
- "${ipv6}"
|
||||
- "${ipv4_local}"
|
||||
- "${ipv4_local_vip}"
|
||||
- "${apiDomain}"
|
||||
admissionControl:
|
||||
- name: PodSecurity
|
||||
configuration:
|
||||
apiVersion: pod-security.admission.config.k8s.io/v1alpha1
|
||||
defaults:
|
||||
audit: restricted
|
||||
audit-version: latest
|
||||
enforce: baseline
|
||||
enforce-version: latest
|
||||
warn: restricted
|
||||
warn-version: latest
|
||||
exemptions:
|
||||
namespaces:
|
||||
- kube-system
|
||||
- ingress-nginx
|
||||
- monitoring
|
||||
- local-path-storage
|
||||
- local-lvm
|
||||
runtimeClasses: []
|
||||
usernames: []
|
||||
kind: PodSecurityConfiguration
|
||||
certSANs: ${format("%#v",certSANs)}
|
||||
controllerManager:
|
||||
extraArgs:
|
||||
node-cidr-mask-size-ipv4: 24
|
||||
@@ -116,9 +77,9 @@ cluster:
|
||||
scheduler: {}
|
||||
etcd:
|
||||
advertisedSubnets:
|
||||
- ${nodeSubnets[0]}
|
||||
- ${ipv4_local}/32
|
||||
listenSubnets:
|
||||
- ${nodeSubnets[0]}
|
||||
- ${ipv4_local}/32
|
||||
extraArgs:
|
||||
election-timeout: "5000"
|
||||
heartbeat-interval: "1000"
|
||||
@@ -136,11 +97,12 @@ cluster:
|
||||
externalCloudProvider:
|
||||
enabled: true
|
||||
manifests:
|
||||
- https://raw.githubusercontent.com/siderolabs/talos-cloud-controller-manager/main/docs/deploy/cloud-controller-manager.yml
|
||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/openstack/deployments/openstack-cloud-controller-manager.yaml
|
||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/openstack/deployments/kubelet-serving-cert-approver.yaml
|
||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/openstack/deployments/metrics-server.yaml
|
||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/openstack/deployments/local-path-storage.yaml
|
||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/openstack/deployments/coredns-local.yaml
|
||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/openstack/deployments/ingress-ns.yaml
|
||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/openstack/deployments/ingress-result.yaml
|
||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/talos-cloud-controller-manager-result.yaml
|
||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/openstack/deployments/openstack-cloud-controller-manager-result.yaml
|
||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/openstack/deployments/openstack-cinder-csi-result.yaml
|
||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/metrics-server-result.yaml
|
||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/local-path-storage-ns.yaml
|
||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/local-path-storage-result.yaml
|
||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/coredns-local.yaml
|
||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/ingress-ns.yaml
|
||||
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/ingress-result.yaml
|
||||
|
||||
@@ -7,6 +7,7 @@ machine:
|
||||
ca:
|
||||
crt: ${caMachine}
|
||||
kubelet:
|
||||
defaultRuntimeSeccompProfileEnabled: true
|
||||
extraArgs:
|
||||
cloud-provider: external
|
||||
rotate-server-certificates: true
|
||||
@@ -21,8 +22,7 @@ machine:
|
||||
interfaces:
|
||||
- interface: ${iface}
|
||||
dhcp: true
|
||||
routes:
|
||||
${routes}
|
||||
routes: ${routes}
|
||||
- interface: dummy0
|
||||
addresses:
|
||||
- 169.254.2.53/32
|
||||
@@ -30,11 +30,33 @@ machine:
|
||||
- ip: ${lbv4}
|
||||
aliases:
|
||||
- ${apiDomain}
|
||||
nameservers:
|
||||
- 1.1.1.1
|
||||
- 2606:4700:4700::1111
|
||||
- 2001:4860:4860::8888
|
||||
time:
|
||||
servers:
|
||||
- 2.europe.pool.ntp.org
|
||||
- time.cloudflare.com
|
||||
install:
|
||||
wipe: false
|
||||
sysctls:
|
||||
net.core.somaxconn: 65535
|
||||
net.core.netdev_max_backlog: 4096
|
||||
systemDiskEncryption:
|
||||
state:
|
||||
provider: luks2
|
||||
keys:
|
||||
- nodeID: {}
|
||||
slot: 0
|
||||
ephemeral:
|
||||
provider: luks2
|
||||
keys:
|
||||
- nodeID: {}
|
||||
slot: 0
|
||||
options:
|
||||
- no_read_workqueue
|
||||
- no_write_workqueue
|
||||
cluster:
|
||||
id: ${clusterID}
|
||||
secret: ${clusterSecret}
|
||||
|
||||
@@ -77,10 +77,10 @@ variable "instances" {
|
||||
type = map(any)
|
||||
default = {
|
||||
"REGION" = {
|
||||
web_count = 0,
|
||||
web_instance_type = "d2-2",
|
||||
worker_count = 0,
|
||||
worker_instance_type = "d2-2",
|
||||
web_count = 0,
|
||||
web_type = "d2-2",
|
||||
worker_count = 0,
|
||||
worker_type = "d2-2",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,8 +3,8 @@ terraform {
|
||||
required_providers {
|
||||
openstack = {
|
||||
source = "terraform-provider-openstack/openstack"
|
||||
version = "~> 1.49.0"
|
||||
version = "~> 1.52.1"
|
||||
}
|
||||
}
|
||||
required_version = ">= 1.2"
|
||||
required_version = ">= 1.5"
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user