update image preparation

This commit is contained in:
Serge Logvinov
2023-05-07 14:41:28 +03:00
parent 93d921c1c1
commit 65be45f298
20 changed files with 300 additions and 1006 deletions

View File

@@ -31,15 +31,9 @@ create-controlplane-bootstrap:
talosctl --talosconfig _cfgs/talosconfig --nodes 172.16.0.48 bootstrap
create-controlplane: ## Bootstrap first controlplane node
terraform apply -auto-approve -target=local_file.worker_patch
terraform apply -auto-approve -target=null_resource.controlplane
create-kubeconfig: ## Prepare kubeconfig
talosctl --talosconfig _cfgs/talosconfig --nodes 172.16.0.48 kubeconfig .
kubectl --kubeconfig=kubeconfig config set clusters.${CLUSTERNAME}.server https://${ENDPOINT}:6443
kubectl --kubeconfig=kubeconfig config set-context --current --namespace=kube-system
create-deployments:
helm template --namespace=kube-system --version=1.12.8 -f deployments/cilium.yaml cilium \
cilium/cilium > deployments/cilium-result.yaml
helm template --namespace=ingress-nginx --version=4.6.0 -f deployments/ingress.yaml ingress-nginx \
ingress-nginx/ingress-nginx > deployments/ingress-result.yaml

View File

@@ -1,5 +1,7 @@
# Proxmox
It was tested on Proxmox version 7.4-3
## Agenda
* create VM config in directory `/etc/pve/qemu-server/VMID.conf`
@@ -8,6 +10,26 @@
* upload user-data (talos machine config) to the Proxmox host
* upload meta-data to the Proxmox host
## VM template
First we need to upload the talos OS image to the Proxmox host machine.
If you do not have shared storage, you need to upload image to each machine.
Folow this link [README](images/README.md) to make it.
## Init
Create Proxmox role and account.
This credentials will use by Proxmox CCM and CSI.
```shell
cd init
terraform init -upgrade
terraform apply
```
## Bootstrap cluster
Result VM config:
```yaml

View File

@@ -8,6 +8,6 @@ resource "local_file" "worker_patch" {
})
)
filename = "${path.module}/templates/worker.patch.yaml.tpl"
filename = "${path.module}/templates/worker.patch.yaml"
file_permission = "0600"
}

View File

@@ -1,153 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns-local
namespace: kube-system
data:
empty.db: |
@ 60 IN SOA localnet. root.localnet. (
1 ; serial
60 ; refresh
60 ; retry
60 ; expiry
60 ) ; minimum
;
@ IN NS localnet.
hosts: |
# static hosts
169.254.2.53 dns.local
Corefile.local: |
(empty) {
file /etc/coredns/empty.db
}
.:53 {
errors
bind 169.254.2.53
health 127.0.0.1:8091 {
lameduck 5s
}
hosts /etc/coredns/hosts {
reload 60s
fallthrough
}
kubernetes cluster.local in-addr.arpa ip6.arpa {
endpoint https://api.cluster.local:6443
kubeconfig /etc/coredns/kubeconfig.conf coredns
pods insecure
ttl 60
}
prometheus :9153
forward . /etc/resolv.conf {
policy sequential
expire 30s
}
cache 300
loop
reload
loadbalance
}
kubeconfig.conf: |-
apiVersion: v1
kind: Config
clusters:
- cluster:
certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
server: https://api.cluster.local:6443
name: default
contexts:
- context:
cluster: default
namespace: kube-system
user: coredns
name: coredns
current-context: coredns
users:
- name: coredns
user:
tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: coredns-local
namespace: kube-system
labels:
k8s-app: kube-dns-local
kubernetes.io/name: CoreDNS
spec:
updateStrategy:
type: RollingUpdate
minReadySeconds: 15
selector:
matchLabels:
k8s-app: kube-dns-local
kubernetes.io/name: CoreDNS
template:
metadata:
labels:
k8s-app: kube-dns-local
kubernetes.io/name: CoreDNS
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9153"
spec:
priorityClassName: system-node-critical
serviceAccount: coredns
serviceAccountName: coredns
enableServiceLinks: false
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Exists
- effect: NoSchedule
key: node.cloudprovider.kubernetes.io/uninitialized
operator: Exists
hostNetwork: true
containers:
- name: coredns
image: coredns/coredns:1.9.4
imagePullPolicy: IfNotPresent
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 50m
memory: 64Mi
args: [ "-conf", "/etc/coredns/Corefile.local" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
livenessProbe:
httpGet:
host: 127.0.0.1
path: /health
port: 8091
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns-local

View File

@@ -1,11 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: ingress-nginx
labels:
pod-security.kubernetes.io/enforce: privileged
pod-security.kubernetes.io/enforce-version: latest
pod-security.kubernetes.io/audit: baseline
pod-security.kubernetes.io/audit-version: latest
pod-security.kubernetes.io/warn: baseline
pod-security.kubernetes.io/warn-version: latest

View File

@@ -1,467 +0,0 @@
---
# Source: ingress-nginx/templates/controller-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: ingress-nginx-4.6.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.7.0"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
automountServiceAccountToken: true
---
# Source: ingress-nginx/templates/controller-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
labels:
helm.sh/chart: ingress-nginx-4.6.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.7.0"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
data:
allow-snippet-annotations: "true"
client-body-timeout: "30"
client-header-timeout: "30"
enable-access-log-for-default-backend: "true"
error-log-level: "error"
hsts: "true"
hsts-include-subdomains: "true"
hsts-max-age: "31536000"
hsts-preload: "true"
http-redirect-code: "301"
limit-req-status-code: "429"
log-format-escape-json: "true"
log-format-upstream: "{\"ip\":\"$remote_addr\", \"ssl\":\"$ssl_protocol\", \"method\":\"$request_method\", \"proto\":\"$scheme\", \"host\":\"$host\", \"uri\":\"$request_uri\", \"status\":$status, \"size\":$bytes_sent, \"agent\":\"$http_user_agent\", \"referer\":\"$http_referer\", \"namespace\":\"$namespace\"}"
proxy-connect-timeout: "10"
proxy-headers-hash-bucket-size: "128"
proxy-hide-headers: "strict-transport-security"
proxy-read-timeout: "60"
proxy-real-ip-cidr: "173.245.48.0/20,103.21.244.0/22,103.22.200.0/22,103.31.4.0/22,141.101.64.0/18,108.162.192.0/18,190.93.240.0/20,188.114.96.0/20,197.234.240.0/22,198.41.128.0/17,162.158.0.0/15,172.64.0.0/13,131.0.72.0/22,104.16.0.0/13,104.24.0.0/14"
proxy-send-timeout: "60"
server-name-hash-bucket-size: "64"
server-name-hash-max-size: "512"
server-tokens: "false"
ssl-protocols: "TLSv1.3"
upstream-keepalive-connections: "32"
use-forwarded-headers: "true"
use-geoip: "false"
use-geoip2: "false"
use-gzip: "true"
worker-cpu-affinity: "auto"
worker-processes: "auto"
---
# Source: ingress-nginx/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: ingress-nginx-4.6.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.7.0"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
- namespaces
verbs:
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
# Source: ingress-nginx/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.6.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.7.0"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: "ingress-nginx"
---
# Source: ingress-nginx/templates/controller-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
helm.sh/chart: ingress-nginx-4.6.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.7.0"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
resourceNames:
- ingress-nginx-leader
verbs:
- get
- update
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
# Source: ingress-nginx/templates/controller-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.6.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.7.0"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: "ingress-nginx"
---
# Source: ingress-nginx/templates/controller-service.yaml
apiVersion: v1
kind: Service
metadata:
annotations:
labels:
helm.sh/chart: ingress-nginx-4.6.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.7.0"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
type: ClusterIP
clusterIP: None
ipFamilyPolicy: RequireDualStack
ipFamilies:
- IPv4
- IPv6
ports:
- name: http
port: 80
protocol: TCP
targetPort: http
appProtocol: http
- name: https
port: 443
protocol: TCP
targetPort: https
appProtocol: https
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
helm.sh/chart: ingress-nginx-4.6.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.7.0"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
selector:
matchLabels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
revisionHistoryLimit: 2
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
minReadySeconds: 15
template:
metadata:
annotations:
prometheus.io/port: "10254"
prometheus.io/scrape: "true"
labels:
helm.sh/chart: ingress-nginx-4.6.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.7.0"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
spec:
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: controller
image: "registry.k8s.io/ingress-nginx/controller:v1.7.0@sha256:7612338342a1e7b8090bef78f2a04fffcadd548ccaabe8a47bf7758ff549a5f7"
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
args:
- /nginx-ingress-controller
- --election-id=ingress-nginx-leader
- --controller-class=k8s.io/ingress-nginx
- --ingress-class=nginx
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
runAsUser: 101
allowPrivilegeEscalation: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LD_PRELOAD
value: /usr/local/lib/libmimalloc.so
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 15
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 1
ports:
- name: http
containerPort: 80
protocol: TCP
- name: https
containerPort: 443
protocol: TCP
resources:
limits:
cpu: 1
memory: 1Gi
requests:
cpu: 100m
memory: 128Mi
hostNetwork: true
nodeSelector:
kubernetes.io/os: linux
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: project.io/node-pool
operator: In
values:
- web
serviceAccountName: ingress-nginx
terminationGracePeriodSeconds: 300
---
# Source: ingress-nginx/templates/controller-ingressclass.yaml
# We don't support namespaced ingressClass yet
# So a ClusterRole and a ClusterRoleBinding is required
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
labels:
helm.sh/chart: ingress-nginx-4.6.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.7.0"
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: nginx
spec:
controller: k8s.io/ingress-nginx

View File

@@ -1,116 +0,0 @@
controller:
kind: DaemonSet
hostNetwork: true
hostPort:
enabled: false
ports:
http: 80
https: 443
dnsPolicy: ClusterFirstWithHostNet
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
publishService:
enabled: false
config:
worker-processes: "auto"
worker-cpu-affinity: "auto"
error-log-level: "error"
server-tokens: "false"
http-redirect-code: "301"
use-gzip: "true"
use-geoip: "false"
use-geoip2: "false"
use-forwarded-headers: "true"
# curl https://www.cloudflare.com/ips-v4 2>/dev/null | tr '\n' ','
proxy-real-ip-cidr: "173.245.48.0/20,103.21.244.0/22,103.22.200.0/22,103.31.4.0/22,141.101.64.0/18,108.162.192.0/18,190.93.240.0/20,188.114.96.0/20,197.234.240.0/22,198.41.128.0/17,162.158.0.0/15,172.64.0.0/13,131.0.72.0/22,104.16.0.0/13,104.24.0.0/14"
enable-access-log-for-default-backend: "true"
log-format-escape-json: "true"
log-format-upstream: '{"ip":"$remote_addr", "ssl":"$ssl_protocol", "method":"$request_method", "proto":"$scheme", "host":"$host", "uri":"$request_uri", "status":$status, "size":$bytes_sent, "agent":"$http_user_agent", "referer":"$http_referer", "namespace":"$namespace"}'
upstream-keepalive-connections: "32"
proxy-connect-timeout: "10"
proxy-read-timeout: "60"
proxy-send-timeout: "60"
ssl-protocols: "TLSv1.3"
hsts: "true"
hsts-max-age: "31536000"
hsts-include-subdomains: "true"
hsts-preload: "true"
proxy-hide-headers: "strict-transport-security"
proxy-headers-hash-bucket-size: "128"
server-name-hash-bucket-size: "64"
server-name-hash-max-size: "512"
limit-req-status-code: "429"
client-header-timeout: "30"
client-body-timeout: "30"
minReadySeconds: 15
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "10254"
extraEnvs:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
livenessProbe:
initialDelaySeconds: 15
periodSeconds: 30
readinessProbe:
periodSeconds: 30
resources:
limits:
cpu: 1
memory: 1Gi
requests:
cpu: 100m
memory: 128Mi
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: project.io/node-pool
operator: In
values:
- web
service:
enabled: true
type: ClusterIP
clusterIP: None
ipFamilyPolicy: "RequireDualStack"
ipFamilies:
- IPv4
- IPv6
admissionWebhooks:
enabled: false
metrics:
enabled: false
revisionHistoryLimit: 2
defaultBackend:
enabled: false

View File

@@ -1,231 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
app.kubernetes.io/instance: kubelet-serving-cert-approver
app.kubernetes.io/name: kubelet-serving-cert-approver
name: kubelet-serving-cert-approver
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/instance: kubelet-serving-cert-approver
app.kubernetes.io/name: kubelet-serving-cert-approver
name: kubelet-serving-cert-approver
namespace: kubelet-serving-cert-approver
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/instance: kubelet-serving-cert-approver
app.kubernetes.io/name: kubelet-serving-cert-approver
name: certificates:kubelet-serving-cert-approver
rules:
- apiGroups:
- certificates.k8s.io
resources:
- certificatesigningrequests
verbs:
- get
- list
- watch
- apiGroups:
- certificates.k8s.io
resources:
- certificatesigningrequests/approval
verbs:
- update
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
- apiGroups:
- certificates.k8s.io
resourceNames:
- kubernetes.io/kubelet-serving
resources:
- signers
verbs:
- approve
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/instance: kubelet-serving-cert-approver
app.kubernetes.io/name: kubelet-serving-cert-approver
name: events:kubelet-serving-cert-approver
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/instance: kubelet-serving-cert-approver
app.kubernetes.io/name: kubelet-serving-cert-approver
name: psp:kubelet-serving-cert-approver
rules:
- apiGroups:
- policy
resourceNames:
- kubelet-serving-cert-approver
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/instance: kubelet-serving-cert-approver
app.kubernetes.io/name: kubelet-serving-cert-approver
name: events:kubelet-serving-cert-approver
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: events:kubelet-serving-cert-approver
subjects:
- kind: ServiceAccount
name: kubelet-serving-cert-approver
namespace: kubelet-serving-cert-approver
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/instance: kubelet-serving-cert-approver
app.kubernetes.io/name: kubelet-serving-cert-approver
name: psp:kubelet-serving-cert-approver
namespace: kubelet-serving-cert-approver
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: psp:kubelet-serving-cert-approver
subjects:
- kind: ServiceAccount
name: kubelet-serving-cert-approver
namespace: kubelet-serving-cert-approver
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/instance: kubelet-serving-cert-approver
app.kubernetes.io/name: kubelet-serving-cert-approver
name: kubelet-serving-cert-approver
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: certificates:kubelet-serving-cert-approver
subjects:
- kind: ServiceAccount
name: kubelet-serving-cert-approver
namespace: kubelet-serving-cert-approver
---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/instance: kubelet-serving-cert-approver
app.kubernetes.io/name: kubelet-serving-cert-approver
name: kubelet-serving-cert-approver
namespace: kubelet-serving-cert-approver
spec:
ports:
- name: metrics
port: 9090
protocol: TCP
targetPort: metrics
selector:
app.kubernetes.io/instance: kubelet-serving-cert-approver
app.kubernetes.io/name: kubelet-serving-cert-approver
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubernetes.io/instance: kubelet-serving-cert-approver
app.kubernetes.io/name: kubelet-serving-cert-approver
name: kubelet-serving-cert-approver
namespace: kubelet-serving-cert-approver
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: kubelet-serving-cert-approver
app.kubernetes.io/name: kubelet-serving-cert-approver
template:
metadata:
labels:
app.kubernetes.io/instance: kubelet-serving-cert-approver
app.kubernetes.io/name: kubelet-serving-cert-approver
spec:
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Exists
- effect: NoSchedule
key: node.cloudprovider.kubernetes.io/uninitialized
operator: Exists
containers:
- args:
- serve
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: ghcr.io/alex1989hu/kubelet-serving-cert-approver:main
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /healthz
port: health
initialDelaySeconds: 6
name: cert-approver
ports:
- containerPort: 8080
name: health
- containerPort: 9090
name: metrics
readinessProbe:
httpGet:
path: /readyz
port: health
initialDelaySeconds: 3
resources:
limits:
cpu: 250m
memory: 32Mi
requests:
cpu: 10m
memory: 16Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
priorityClassName: system-cluster-critical
securityContext:
fsGroup: 65534
runAsGroup: 65534
runAsUser: 65534
serviceAccountName: kubelet-serving-cert-approver

6
proxmox/images/Makefile Normal file
View File

@@ -0,0 +1,6 @@
init:
packer init -upgrade .
release:
packer build -only=release.proxmox.talos .

54
proxmox/images/README.md Normal file
View File

@@ -0,0 +1,54 @@
# Upload Talos image
We will use `nocloud` image.
## Method 1: packer
```
make init
make release
```
## Method 1: manual
Create the VM, config example:
```yaml
agent: 0
boot: order=scsi0;ide2;net0
cores: 1
cpu: host
kvm: 1
balloon: 0
memory: 3072
name: talos
net0: virtio=...
onboot: 0
ostype: l26
ide2: cdrom,media=cdrom
scsi0: local-lvm:vm-106-disk-0,size=32G
scsihw: virtio-scsi-single
serial0: socket
smbios1: uuid=...
numa: 0
sockets: 1
template: 1
```
Find the name of system disk.
In example it - `local-lvm:vm-106-disk-0`, lvm volume `vm-106-disk-0`
We copy Talos system disk to this volume.
```shell
cd /tmp
wget https://github.com/siderolabs/talos/releases/download/v1.4.1/nocloud-amd64.raw.xz
xz -d -c nocloud-amd64.raw.xz | dd of=/dev/mapper/vg0-vm--106--disk--0
```
And then, convert it to template.
# Resources
* https://developer.hashicorp.com/packer/plugins/builders/proxmox/iso
* https://wiki.archlinux.org/title/Arch_Linux_on_a_VPS

View File

@@ -0,0 +1,80 @@
packer {
required_plugins {
proxmox = {
version = ">= 1.1.3"
source = "github.com/hashicorp/proxmox"
}
}
}
source "proxmox" "talos" {
proxmox_url = "https://${var.proxmox_host}:8006/api2/json"
username = var.proxmox_username
token = var.proxmox_token
node = var.proxmox_nodename
insecure_skip_tls_verify = true
iso_file = "local:iso/archlinux-2023.05.03-x86_64.iso"
# iso_url = "https://mirror.rackspace.com/archlinux/iso/2023.05.03/archlinux-2023.05.03-x86_64.iso"
# iso_checksum = "sha1:3ae7c83eca8bd698b4e54c49d43e8de5dc8a4456"
# iso_storage_pool = "local"
unmount_iso = true
network_adapters {
bridge = "vmbr0"
model = "virtio"
firewall = true
}
network_adapters {
bridge = "vmbr1"
model = "virtio"
}
scsi_controller = "virtio-scsi-single"
disks {
type = "scsi"
storage_pool = var.proxmox_storage
format = "raw"
disk_size = "5G"
io_thread = "true"
cache_mode = "writethrough"
}
cpu_type = "host"
memory = 3072
# vga {
# type = "serial0"
# }
serials = ["socket"]
ssh_username = "root"
ssh_password = "packer"
ssh_timeout = "15m"
qemu_agent = true
# ssh_bastion_host = var.proxmox_host
# ssh_bastion_username = "root"
# ssh_bastion_agent_auth = true
template_name = "talos"
template_description = "Talos system disk, version ${var.talos_version}"
boot_wait = "15s"
boot_command = [
"<enter><wait1m>",
"passwd<enter><wait>packer<enter><wait>packer<enter>"
]
}
build {
name = "release"
sources = ["source.proxmox.talos"]
provisioner "shell" {
inline = [
"curl -L ${local.image} -o /tmp/talos.raw.xz",
"xz -d -c /tmp/talos.raw.xz | dd of=/dev/sda && sync",
]
}
}

View File

@@ -0,0 +1,33 @@
variable "proxmox_host" {
type = string
}
variable "proxmox_username" {
type = string
}
variable "proxmox_token" {
type = string
}
variable "proxmox_nodename" {
type = string
}
variable "proxmox_storage" {
type = string
}
variable "proxmox_storage_type" {
type = string
}
variable "talos_version" {
type = string
default = "v1.4.1"
}
locals {
image = "https://github.com/talos-systems/talos/releases/download/${var.talos_version}/nocloud-amd64.raw.xz"
}

8
proxmox/init/auth.tf Normal file
View File

@@ -0,0 +1,8 @@
provider "proxmox" {
endpoint = "https://${var.proxmox_host}:8006/"
insecure = true
username = var.proxmox_token_id
password = var.proxmox_token_secret
}

20
proxmox/init/role.tf Normal file
View File

@@ -0,0 +1,20 @@
resource "proxmox_virtual_environment_role" "ccm" {
role_id = "CCM"
privileges = [
"VM.Audit",
]
}
resource "proxmox_virtual_environment_role" "csi" {
role_id = "CSI"
privileges = [
"VM.Audit",
"VM.Config.Disk",
"Datastore.Allocate",
"Datastore.AllocateSpace",
"Datastore.Audit",
]
}

23
proxmox/init/users.tf Normal file
View File

@@ -0,0 +1,23 @@
resource "random_password" "kubernetes" {
length = 16
override_special = "_%@"
special = true
}
resource "proxmox_virtual_environment_user" "kubernetes" {
acl {
path = "/"
propagate = true
role_id = proxmox_virtual_environment_role.ccm.role_id
}
acl {
path = "/"
propagate = true
role_id = proxmox_virtual_environment_role.csi.role_id
}
comment = "Kubernetes"
password = random_password.kubernetes.result
user_id = "kubernetes@pve"
}

21
proxmox/init/variables.tf Normal file
View File

@@ -0,0 +1,21 @@
variable "proxmox_host" {
description = "Proxmox host"
type = string
default = "192.168.1.1"
}
variable "proxmox_nodename" {
description = "Proxmox node name"
type = string
}
variable "proxmox_token_id" {
description = "Proxmox token id"
type = string
}
variable "proxmox_token_secret" {
description = "Proxmox token secret"
type = string
}

9
proxmox/init/versions.tf Normal file
View File

@@ -0,0 +1,9 @@
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
version = "0.18.2"
}
}
required_version = ">= 1.0"
}

View File

@@ -123,10 +123,10 @@ resource "local_file" "controlplane" {
file_permission = "0600"
}
# resource "null_resource" "controlplane" {
# for_each = local.controlplanes
# provisioner "local-exec" {
# command = "sleep 60 && talosctl apply-config --insecure --nodes ${split("/", each.value.ipv4)[0]} --config-patch @_cfgs/${each.value.name}.yaml --file _cfgs/controlplane.yaml"
# }
# depends_on = [proxmox_vm_qemu.controlplane, local_file.controlplane]
# }
resource "null_resource" "controlplane" {
for_each = local.controlplanes
provisioner "local-exec" {
command = "sleep 60 && talosctl apply-config --insecure --nodes ${split("/", each.value.ipv4)[0]} --config-patch @_cfgs/${each.value.name}.yaml --file _cfgs/controlplane.yaml"
}
depends_on = [proxmox_vm_qemu.controlplane, local_file.controlplane]
}

View File

@@ -59,7 +59,7 @@ cluster:
cni:
name: custom
urls:
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/proxmox/deployments/cilium-result.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/cilium-result.yaml
proxy:
disabled: true
controllerManager:
@@ -74,10 +74,12 @@ cluster:
externalCloudProvider:
enabled: true
manifests:
- https://raw.githubusercontent.com/siderolabs/talos-cloud-controller-manager/main/docs/deploy/cloud-controller-manager.yml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/proxmox/deployments/kubelet-serving-cert-approver.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/proxmox/deployments/metrics-server.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/proxmox/deployments/coredns-local.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/proxmox/deployments/local-path-storage.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/proxmox/deployments/ingress-ns.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/proxmox/deployments/ingress-result.yaml
# - https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/talos-cloud-controller-manager-result.yaml
- https://raw.githubusercontent.com/sergelogvinov/proxmox-cloud-controller-manager/main/docs/deploy/cloud-controller-manager.yml
# - https://raw.githubusercontent.com/sergelogvinov/proxmox-csi-plugin/main/docs/deploy/proxmox-csi-plugin-talos.yml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/metrics-server-result.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/local-path-storage-ns.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/local-path-storage-result.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/coredns-local.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/ingress-ns.yaml
- https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/ingress-result.yaml

View File

@@ -6,18 +6,18 @@ machine:
node-labels: "project.io/node-pool=worker"
clusterDNS:
- 169.254.2.53
- 10.200.0.10
- ${cidrhost(split(",",serviceSubnets)[0], 10)}
nodeIP:
validSubnets: ["172.16.0.0/24"]
validSubnets: ${format("%#v",split(",",nodeSubnets))}
network:
interfaces:
- interface: dummy0
addresses:
- 169.254.2.53/32
extraHostEntries:
- ip: 172.16.0.10
- ip: ${lbv4}
aliases:
- api.cluster.local
- ${apiDomain}
sysctls:
net.core.somaxconn: 65535
net.core.netdev_max_backlog: 4096
@@ -40,6 +40,6 @@ machine:
slot: 0
cluster:
controlPlane:
endpoint: https://api.cluster.local:6443
endpoint: https://${apiDomain}:6443
proxy:
disabled: true