mirror of
https://github.com/outbackdingo/cozystack.git
synced 2026-01-28 18:18:41 +00:00
Compare commits
1 Commits
tests-w-re
...
hcloud
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ea2e369e24 |
@@ -2,18 +2,6 @@
|
||||
|
||||
@test "Create DB ClickHouse" {
|
||||
name='test'
|
||||
withResources='true'
|
||||
if [ "$withResources" == 'true' ]; then
|
||||
resources=$(cat <<EOF
|
||||
resources:
|
||||
resources:
|
||||
cpu: 500m
|
||||
memory: 768Mi
|
||||
EOF
|
||||
)
|
||||
else
|
||||
resources=' resources: {}'
|
||||
fi
|
||||
kubectl apply -f- <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: ClickHouse
|
||||
@@ -39,13 +27,15 @@ spec:
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
$resources
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait --timeout=40s hr clickhouse-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=130s clickhouses $name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=120s sts chi-clickhouse-$name-clickhouse-0-0 --for=jsonpath='{.status.replicas}'=1
|
||||
timeout 210 sh -ec "until kubectl -n tenant-test wait svc chendpoint-clickhouse-$name --for=jsonpath='{.spec.ports[0].port}'=8123; do sleep 10; done"
|
||||
kubectl -n tenant-test delete clickhouse.apps.cozystack.io $name
|
||||
kubectl -n tenant-test wait hr clickhouse-$name --timeout=20s --for=condition=ready
|
||||
timeout 180 sh -ec "until kubectl -n tenant-test get svc chendpoint-clickhouse-$name -o jsonpath='{.spec.ports[*].port}' | grep -q '8123 9000'; do sleep 10; done"
|
||||
kubectl -n tenant-test wait statefulset.apps/chi-clickhouse-$name-clickhouse-0-0 --timeout=120s --for=jsonpath='{.status.replicas}'=1
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints chi-clickhouse-$name-clickhouse-0-0 -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
timeout 100 sh -ec "until kubectl -n tenant-test get svc chi-clickhouse-$name-clickhouse-0-0 -o jsonpath='{.spec.ports[*].port}' | grep -q '9000 8123 9009'; do sleep 10; done"
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get sts chi-clickhouse-$name-clickhouse-0-1 ; do sleep 10; done"
|
||||
kubectl -n tenant-test wait statefulset.apps/chi-clickhouse-$name-clickhouse-0-1 --timeout=140s --for=jsonpath='{.status.replicas}'=1
|
||||
}
|
||||
|
||||
@@ -2,18 +2,6 @@
|
||||
|
||||
@test "Create Kafka" {
|
||||
name='test'
|
||||
withResources='true'
|
||||
if [ "$withResources" == 'true' ]; then
|
||||
resources=$(cat <<EOF
|
||||
resources:
|
||||
resources:
|
||||
cpu: 500m
|
||||
memory: 768Mi
|
||||
EOF
|
||||
)
|
||||
else
|
||||
resources='resources: {}'
|
||||
fi
|
||||
kubectl apply -f- <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Kafka
|
||||
@@ -26,13 +14,13 @@ spec:
|
||||
size: 10Gi
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
$resources
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
zookeeper:
|
||||
size: 5Gi
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
$resources
|
||||
resources:
|
||||
resourcesPreset: "nano"
|
||||
topics:
|
||||
- name: testResults
|
||||
@@ -50,9 +38,14 @@ spec:
|
||||
replicas: 2
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait --timeout=30s hr kafka-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=1m kafkas $name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=50s pvc data-kafka-$name-zookeeper-0 --for=jsonpath='{.status.phase}'=Bound
|
||||
kubectl -n tenant-test wait --timeout=40s svc kafka-$name-zookeeper-client --for=jsonpath='{.spec.ports[0].port}'=2181
|
||||
kubectl -n tenant-test wait hr kafka-$name --timeout=30s --for=condition=ready
|
||||
kubectl wait kafkas -n tenant-test test --timeout=60s --for=condition=ready
|
||||
timeout 60 sh -ec "until kubectl -n tenant-test get pvc data-kafka-$name-zookeeper-0; do sleep 10; done"
|
||||
kubectl -n tenant-test wait pvc data-kafka-$name-zookeeper-0 --timeout=50s --for=jsonpath='{.status.phase}'=Bound
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc kafka-$name-zookeeper-client -o jsonpath='{.spec.ports[0].port}' | grep -q '2181'; do sleep 10; done"
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc kafka-$name-zookeeper-nodes -o jsonpath='{.spec.ports[*].port}' | grep -q '2181 2888 3888'; do sleep 10; done"
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints kafka-$name-zookeeper-nodes -o jsonpath='{.subsets[*].addresses[0].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test delete kafka.apps.cozystack.io $name
|
||||
kubectl -n tenant-test delete pvc data-kafka-$name-zookeeper-0
|
||||
kubectl -n tenant-test delete pvc data-kafka-$name-zookeeper-1
|
||||
}
|
||||
|
||||
@@ -1,17 +1,16 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
@test "Create a tenant Kubernetes control plane" {
|
||||
name='test'
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Kubernetes
|
||||
metadata:
|
||||
name: $name
|
||||
name: test
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
addons:
|
||||
certManager:
|
||||
enabled: true
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
cilium:
|
||||
valuesOverride: {}
|
||||
@@ -25,12 +24,10 @@ spec:
|
||||
valuesOverride: {}
|
||||
ingressNginx:
|
||||
enabled: true
|
||||
hosts:
|
||||
- example.org
|
||||
exposeMethod: Proxied
|
||||
hosts: []
|
||||
valuesOverride: {}
|
||||
monitoringAgents:
|
||||
enabled: true
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
verticalPodAutoscaler:
|
||||
valuesOverride: {}
|
||||
@@ -64,39 +61,12 @@ spec:
|
||||
- ingress-nginx
|
||||
storageClass: replicated
|
||||
EOF
|
||||
sleep 10
|
||||
kubectl wait --timeout=20s namespace tenant-test --for=jsonpath='{.status.phase}'=Active
|
||||
kubectl -n tenant-test wait --timeout=10s kamajicontrolplane kubernetes-$name --for=jsonpath='{.status.conditions[0].status}'=True
|
||||
kubectl -n tenant-test wait --timeout=4m kamajicontrolplane kubernetes-$name --for=condition=TenantControlPlaneCreated
|
||||
kubectl -n tenant-test wait --timeout=210s tcp kubernetes-$name --for=jsonpath='{.status.kubernetesResources.version.status}'=Ready
|
||||
kubectl -n tenant-test wait --timeout=4m deploy kubernetes-$name kubernetes-$name-cluster-autoscaler kubernetes-$name-kccm kubernetes-$name-kcsi-controller --for=condition=available
|
||||
kubectl -n tenant-test wait --timeout=1m machinedeployment kubernetes-$name-md0 --for=jsonpath='{.status.replicas}'=2
|
||||
kubectl -n tenant-test wait --timeout=10m machinedeployment kubernetes-$name-md0 --for=jsonpath='{.status.v1beta2.readyReplicas}'=2
|
||||
# ingress / load balancer
|
||||
kubectl -n tenant-test wait --timeout=5m hr kubernetes-$name-monitoring-agents --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=5m hr kubernetes-$name-ingress-nginx --for=condition=ready
|
||||
kubectl -n tenant-test get secret kubernetes-$name-admin-kubeconfig -o go-template='{{ printf "%s\n" (index .data "admin.conf" | base64decode) }}' > admin.conf
|
||||
KUBECONFIG=admin.conf kubectl -n cozy-ingress-nginx wait --timeout=3m deploy ingress-nginx-defaultbackend --for=jsonpath='{.status.conditions[0].status}'=True
|
||||
KUBECONFIG=admin.conf kubectl -n cozy-monitoring wait --timeout=3m deploy cozy-monitoring-agents-metrics-server --for=jsonpath='{.status.conditions[0].status}'=True
|
||||
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
|
||||
timeout 10 sh -ec 'until kubectl get kamajicontrolplane -n tenant-test kubernetes-test; do sleep 1; done'
|
||||
kubectl wait --for=condition=TenantControlPlaneCreated kamajicontrolplane -n tenant-test kubernetes-test --timeout=4m
|
||||
kubectl wait tcp -n tenant-test kubernetes-test --timeout=2m --for=jsonpath='{.status.kubernetesResources.version.status}'=Ready
|
||||
kubectl wait deploy --timeout=4m --for=condition=available -n tenant-test kubernetes-test kubernetes-test-cluster-autoscaler kubernetes-test-kccm kubernetes-test-kcsi-controller
|
||||
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=1m --for=jsonpath='{.status.replicas}'=2
|
||||
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=10m --for=jsonpath='{.status.v1beta2.readyReplicas}'=2
|
||||
kubectl -n tenant-test delete kuberneteses.apps.cozystack.io test
|
||||
}
|
||||
|
||||
@test "Create a PVC in tenant Kubernetes" {
|
||||
name='test'
|
||||
KUBECONFIG=admin.conf kubectl apply -f - <<EOF
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: pvc-$name
|
||||
namespace: cozy-monitoring
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
EOF
|
||||
sleep 10
|
||||
KUBECONFIG=admin.conf kubectl -n cozy-monitoring wait --timeout=20s pvc pvc-$name --for=jsonpath='{.status.phase}'=Bound
|
||||
KUBECONFIG=admin.conf kubectl -n cozy-monitoring delete pvc pvc-$name
|
||||
kubectl -n tenant-test delete kuberneteses.apps.cozystack.io $name
|
||||
}
|
||||
@@ -2,18 +2,6 @@
|
||||
|
||||
@test "Create DB MySQL" {
|
||||
name='test'
|
||||
withResources='true'
|
||||
if [ "$withResources" == 'true' ]; then
|
||||
resources=$(cat <<EOF
|
||||
resources:
|
||||
resources:
|
||||
cpu: 3000m
|
||||
memory: 3Gi
|
||||
EOF
|
||||
)
|
||||
else
|
||||
resources=' resources: {}'
|
||||
fi
|
||||
kubectl apply -f- <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: MySQL
|
||||
@@ -43,15 +31,16 @@ spec:
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
$resources
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
EOF
|
||||
sleep 10
|
||||
kubectl -n tenant-test wait --timeout=30s hr mysql-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=130s mysqls $name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=110s sts mysql-$name --for=jsonpath='{.status.replicas}'=2
|
||||
sleep 60
|
||||
kubectl -n tenant-test wait --timeout=60s deploy mysql-$name-metrics --for=jsonpath='{.status.replicas}'=1
|
||||
kubectl -n tenant-test wait --timeout=100s svc mysql-$name --for=jsonpath='{.spec.ports[0].port}'=3306
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr mysql-$name --timeout=30s --for=condition=ready
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get svc mysql-$name -o jsonpath='{.spec.ports[0].port}' | grep -q '3306'; do sleep 10; done"
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints mysql-$name -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test wait statefulset.apps/mysql-$name --timeout=110s --for=jsonpath='{.status.replicas}'=2
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get svc mysql-$name-metrics -o jsonpath='{.spec.ports[0].port}' | grep -q '9104'; do sleep 10; done"
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get endpoints mysql-$name-metrics -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test wait deployment.apps/mysql-$name-metrics --timeout=90s --for=jsonpath='{.status.replicas}'=1
|
||||
kubectl -n tenant-test delete mysqls.apps.cozystack.io $name
|
||||
}
|
||||
|
||||
@@ -2,18 +2,6 @@
|
||||
|
||||
@test "Create DB PostgreSQL" {
|
||||
name='test'
|
||||
withResources='true'
|
||||
if [ "$withResources" == 'true' ]; then
|
||||
resources=$(cat <<EOF
|
||||
resources:
|
||||
resources:
|
||||
cpu: 500m
|
||||
memory: 768Mi
|
||||
EOF
|
||||
)
|
||||
else
|
||||
resources=' resources: {}'
|
||||
fi
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Postgres
|
||||
@@ -48,14 +36,19 @@ spec:
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
$resources
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait --timeout=200s hr postgres-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=130s postgreses $name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=50s job.batch postgres-$name-init-job --for=condition=Complete
|
||||
kubectl -n tenant-test wait --timeout=40s svc postgres-$name-r --for=jsonpath='{.spec.ports[0].port}'=5432
|
||||
kubectl -n tenant-test wait hr postgres-$name --timeout=100s --for=condition=ready
|
||||
kubectl -n tenant-test wait job.batch postgres-$name-init-job --timeout=50s --for=condition=Complete
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-r -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-ro -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-rw -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
||||
timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-r -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
# for some reason it takes longer for the read-only endpoint to be ready
|
||||
#timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-ro -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-rw -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test delete postgreses.apps.cozystack.io $name
|
||||
kubectl -n tenant-test delete job.batch/postgres-$name-init-job
|
||||
}
|
||||
|
||||
@@ -2,18 +2,6 @@
|
||||
|
||||
@test "Create Redis" {
|
||||
name='test'
|
||||
withResources='true'
|
||||
if [ "$withResources" == 'true' ]; then
|
||||
resources=$(cat <<EOF
|
||||
resources:
|
||||
resources:
|
||||
cpu: 500m
|
||||
memory: 768Mi
|
||||
EOF
|
||||
)
|
||||
else
|
||||
resources='resources: {}'
|
||||
fi
|
||||
kubectl apply -f- <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Redis
|
||||
@@ -26,15 +14,13 @@ spec:
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
authEnabled: true
|
||||
$resources
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait --timeout=20s hr redis-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=130s redis.apps.cozystack.io $name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=50s pvc redisfailover-persistent-data-rfr-redis-$name-0 --for=jsonpath='{.status.phase}'=Bound
|
||||
kubectl -n tenant-test wait --timeout=90s sts rfr-redis-$name --for=jsonpath='{.status.replicas}'=2
|
||||
sleep 45
|
||||
kubectl -n tenant-test wait --timeout=45s deploy rfs-redis-$name --for=condition=available
|
||||
kubectl -n tenant-test wait hr redis-$name --timeout=20s --for=condition=ready
|
||||
kubectl -n tenant-test wait pvc redisfailover-persistent-data-rfr-redis-$name-0 --timeout=50s --for=jsonpath='{.status.phase}'=Bound
|
||||
kubectl -n tenant-test wait deploy rfs-redis-$name --timeout=90s --for=condition=available
|
||||
kubectl -n tenant-test wait sts rfr-redis-$name --timeout=90s --for=jsonpath='{.status.replicas}'=2
|
||||
kubectl -n tenant-test delete redis.apps.cozystack.io $name
|
||||
}
|
||||
|
||||
@@ -2,14 +2,6 @@
|
||||
|
||||
@test "Create a Virtual Machine" {
|
||||
name='test'
|
||||
withResources='true'
|
||||
if [ "$withResources" == 'true' ]; then
|
||||
cores="1000m"
|
||||
memory="1Gi
|
||||
else
|
||||
cores="2000m"
|
||||
memory="2Gi
|
||||
fi
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: VirtualMachine
|
||||
@@ -17,12 +9,6 @@ metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
domain:
|
||||
cpu:
|
||||
cores: "$cores"
|
||||
resources:
|
||||
requests:
|
||||
memory: "$memory"
|
||||
external: false
|
||||
externalMethod: PortList
|
||||
externalPorts:
|
||||
@@ -34,6 +20,9 @@ spec:
|
||||
storage: 5Gi
|
||||
storageClass: replicated
|
||||
gpus: []
|
||||
resources:
|
||||
cpu: ""
|
||||
memory: ""
|
||||
sshKeys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF
|
||||
test@test
|
||||
@@ -48,12 +37,11 @@ spec:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF test@test
|
||||
cloudInitSeed: ""
|
||||
EOF
|
||||
sleep 10
|
||||
kubectl -n tenant-test wait --timeout=10s hr virtual-machine-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=130s virtualmachines $name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=130s pvc virtual-machine-$name --for=jsonpath='{.status.phase}'=Bound
|
||||
kubectl -n tenant-test wait --timeout=150s dv virtual-machine-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=100s vm virtual-machine-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=150s vmi virtual-machine-$name --for=jsonpath='{status.phase}'=Running
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr virtual-machine-$name --timeout=10s --for=condition=ready
|
||||
kubectl -n tenant-test wait dv virtual-machine-$name --timeout=150s --for=condition=ready
|
||||
kubectl -n tenant-test wait pvc virtual-machine-$name --timeout=100s --for=jsonpath='{.status.phase}'=Bound
|
||||
kubectl -n tenant-test wait vm virtual-machine-$name --timeout=100s --for=condition=ready
|
||||
timeout 120 sh -ec "until kubectl -n tenant-test get vmi virtual-machine-$name -o jsonpath='{.status.interfaces[0].ipAddress}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test delete virtualmachines.apps.cozystack.io $name
|
||||
}
|
||||
|
||||
@@ -17,37 +17,21 @@ spec:
|
||||
storageClass: replicated
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait --timeout=5s hr vm-disk-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=130s vmdisks $name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=130s pvc vm-disk-$name --for=jsonpath='{.status.phase}'=Bound
|
||||
kubectl -n tenant-test wait --timeout=150s dv vm-disk-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait hr vm-disk-$name --timeout=5s --for=condition=ready
|
||||
kubectl -n tenant-test wait dv vm-disk-$name --timeout=150s --for=condition=ready
|
||||
kubectl -n tenant-test wait pvc vm-disk-$name --timeout=100s --for=jsonpath='{.status.phase}'=Bound
|
||||
}
|
||||
|
||||
@test "Create a VM Instance" {
|
||||
diskName='test'
|
||||
name='test'
|
||||
withResources='true'
|
||||
if [ "$withResources" == 'true' ]; then
|
||||
cores="1000m"
|
||||
memory="1Gi
|
||||
else
|
||||
cores="2000m"
|
||||
memory="2Gi
|
||||
fi
|
||||
kubectl -n tenant-test get vminstances.apps.cozystack.io $name ||
|
||||
kubectl create -f - <<EOF
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: VMInstance
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
domain:
|
||||
cpu:
|
||||
cores: "$cores"
|
||||
resources:
|
||||
requests:
|
||||
memory: "$memory"
|
||||
external: false
|
||||
externalMethod: PortList
|
||||
externalPorts:
|
||||
@@ -58,6 +42,9 @@ spec:
|
||||
disks:
|
||||
- name: $diskName
|
||||
gpus: []
|
||||
resources:
|
||||
cpu: ""
|
||||
memory: ""
|
||||
sshKeys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF
|
||||
test@test
|
||||
@@ -73,10 +60,9 @@ spec:
|
||||
cloudInitSeed: ""
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait --timeout=5s hr vm-instance-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=130s vminstances $name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=20s vm vm-instance-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=40s vmi vm-instance-$name --for=jsonpath='{status.phase}'=Running
|
||||
timeout 20 sh -ec "until kubectl -n tenant-test get vmi vm-instance-$name -o jsonpath='{.status.interfaces[0].ipAddress}' | grep -q '[0-9]'; do sleep 5; done"
|
||||
kubectl -n tenant-test wait hr vm-instance-$name --timeout=5s --for=condition=ready
|
||||
kubectl -n tenant-test wait vm vm-instance-$name --timeout=20s --for=condition=ready
|
||||
kubectl -n tenant-test delete vminstances.apps.cozystack.io $name
|
||||
kubectl -n tenant-test delete vmdisks.apps.cozystack.io $diskName
|
||||
}
|
||||
|
||||
@@ -75,6 +75,6 @@ This setting is ignored if the corresponding `resources` value is set.
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `2` | `2Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
@@ -62,6 +62,6 @@ This setting is ignored if the corresponding `resources` value is set.
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `2` | `2Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
@@ -100,7 +100,7 @@ This setting is ignored if the corresponding `resources` value is set.
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `2` | `2Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@ This setting is ignored if the corresponding `resources` value is set.
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `2` | `2Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.25.2
|
||||
version: 0.25.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -64,8 +64,6 @@ image-kubevirt-csi-driver:
|
||||
--load=$(LOAD)
|
||||
echo "$(REGISTRY)/kubevirt-csi-driver:$(call settag,$(KUBERNETES_PKG_TAG))@$$(yq e '."containerimage.digest"' images/kubevirt-csi-driver.json -o json -r)" \
|
||||
> images/kubevirt-csi-driver.tag
|
||||
IMAGE=$$(cat images/kubevirt-csi-driver.tag) \
|
||||
yq -i '.csiDriver.image = strenv(IMAGE)' ../../system/kubevirt-csi-node/values.yaml
|
||||
rm -f images/kubevirt-csi-driver.json
|
||||
|
||||
|
||||
|
||||
@@ -146,7 +146,7 @@ This setting is ignored if the corresponding `resources` value is set.
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `2` | `2Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.25.1@sha256:3a8170433e1632e5cc2b6d9db34d0605e8e6c63c158282c38450415e700e932e
|
||||
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.25.0@sha256:3a8170433e1632e5cc2b6d9db34d0605e8e6c63c158282c38450415e700e932e
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.25.1@sha256:412ed2b3c77249bd1b973e6dc9c87976d31863717fb66ba74ccda573af737eb1
|
||||
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.25.0@sha256:412ed2b3c77249bd1b973e6dc9c87976d31863717fb66ba74ccda573af737eb1
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.25.1@sha256:445c2727b04ac68595b43c988ff17b3d69a7b22b0644fde3b10c65b47a7bc036
|
||||
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.25.0@sha256:445c2727b04ac68595b43c988ff17b3d69a7b22b0644fde3b10c65b47a7bc036
|
||||
|
||||
@@ -13,17 +13,11 @@ rules:
|
||||
resources: ["datavolumes"]
|
||||
verbs: ["get", "create", "delete"]
|
||||
- apiGroups: ["kubevirt.io"]
|
||||
resources: ["virtualmachineinstances", "virtualmachines"]
|
||||
resources: ["virtualmachineinstances"]
|
||||
verbs: ["list", "get"]
|
||||
- apiGroups: ["subresources.kubevirt.io"]
|
||||
resources: ["virtualmachines/addvolume", "virtualmachines/removevolume"]
|
||||
resources: ["virtualmachineinstances/addvolume", "virtualmachineinstances/removevolume"]
|
||||
verbs: ["update"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshots"]
|
||||
verbs: ["get", "create", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "patch"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
|
||||
@@ -40,7 +40,6 @@ spec:
|
||||
{{ .Release.Name }}-fluxcd-operator
|
||||
{{ .Release.Name }}-fluxcd
|
||||
{{ .Release.Name }}-gpu-operator
|
||||
{{ .Release.Name }}-velero
|
||||
-p '{"spec": {"suspend": true}}'
|
||||
--type=merge --field-manager=flux-client-side-apply || true
|
||||
---
|
||||
@@ -80,8 +79,6 @@ rules:
|
||||
- {{ .Release.Name }}-fluxcd-operator
|
||||
- {{ .Release.Name }}-fluxcd
|
||||
- {{ .Release.Name }}-gpu-operator
|
||||
- {{ .Release.Name }}-velero
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
|
||||
@@ -119,7 +119,7 @@ This setting is ignored if the corresponding `resources` value is set.
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `2` | `2Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@ This setting is ignored if the corresponding `resources` value is set.
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `2` | `2Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.17.1
|
||||
version: 0.16.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -11,50 +11,7 @@ This managed service is controlled by the CloudNativePG operator, ensuring effic
|
||||
- Docs: <https://cloudnative-pg.io/docs/>
|
||||
- Github: <https://github.com/cloudnative-pg/cloudnative-pg>
|
||||
|
||||
## Operations
|
||||
|
||||
### How to enable backups
|
||||
|
||||
To back up a PostgreSQL application, an external S3-compatible storage is required.
|
||||
|
||||
To start regular backups, update the application, setting `backup.enabled` to `true`, and fill in the path and credentials to an `backup.*`:
|
||||
|
||||
```yaml
|
||||
## @param backup.enabled Enable regular backups
|
||||
## @param backup.schedule Cron schedule for automated backups
|
||||
## @param backup.retentionPolicy Retention policy
|
||||
## @param backup.destinationPath Path to store the backup (i.e. s3://bucket/path/to/folder)
|
||||
## @param backup.endpointURL S3 Endpoint used to upload data to the cloud
|
||||
## @param backup.s3AccessKey Access key for S3, used for authentication
|
||||
## @param backup.s3SecretKey Secret key for S3, used for authentication
|
||||
backup:
|
||||
enabled: false
|
||||
retentionPolicy: 30d
|
||||
destinationPath: s3://bucket/path/to/folder/
|
||||
endpointURL: http://minio-gateway-service:9000
|
||||
schedule: "0 2 * * * *"
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
```
|
||||
|
||||
### How to recover a backup
|
||||
|
||||
CloudNativePG supports point-in-time-recovery.
|
||||
Recovering a backup is done by creating a new database instance and restoring the data in it.
|
||||
|
||||
Create a new PostgreSQL application with a different name, but identical configuration.
|
||||
Set `bootstrap.enabled` to `true` and fill in the name of the database instance to recover from and the recovery time:
|
||||
|
||||
```yaml
|
||||
## @param bootstrap.enabled Restore database cluster from a backup
|
||||
## @param bootstrap.recoveryTime Timestamp (PITR) up to which recovery will proceed, expressed in RFC 3339 format. If left empty, will restore latest
|
||||
## @param bootstrap.oldName Name of database cluster before deleting
|
||||
##
|
||||
bootstrap:
|
||||
enabled: false
|
||||
recoveryTime: "" # leave empty for latest or exact timestamp; example: 2020-11-26 15:22:00.00000+00
|
||||
oldName: "<previous-postgres-instance>"
|
||||
```
|
||||
## HowTos
|
||||
|
||||
### How to switch primary/secondary replica
|
||||
|
||||
@@ -62,6 +19,24 @@ See:
|
||||
|
||||
- <https://cloudnative-pg.io/documentation/1.15/rolling_update/#manual-updates-supervised>
|
||||
|
||||
### How to restore backup
|
||||
|
||||
find snapshot:
|
||||
|
||||
```bash
|
||||
restic -r s3:s3.example.org/postgres-backups/database_name snapshots
|
||||
```
|
||||
|
||||
restore:
|
||||
|
||||
```bash
|
||||
restic -r s3:s3.example.org/postgres-backups/database_name restore latest --target /tmp/
|
||||
```
|
||||
|
||||
more details:
|
||||
|
||||
- <https://blog.aenix.io/restic-effective-backup-from-stdin-4bc1e8f083c1>
|
||||
|
||||
## Parameters
|
||||
|
||||
### Common parameters
|
||||
@@ -85,23 +60,23 @@ See:
|
||||
|
||||
### Backup parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | ---------------------------------------------------------- | ----------------------------------- |
|
||||
| `backup.enabled` | Enable regular backups | `false` |
|
||||
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * * *` |
|
||||
| `backup.retentionPolicy` | Retention policy | `30d` |
|
||||
| `backup.destinationPath` | Path to store the backup (i.e. s3://bucket/path/to/folder) | `s3://bucket/path/to/folder/` |
|
||||
| `backup.endpointURL` | S3 Endpoint used to upload data to the cloud | `http://minio-gateway-service:9000` |
|
||||
| `backup.s3AccessKey` | Access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||
| `backup.s3SecretKey` | Secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | -------------------------------------------------------------------- | ----------------------------------- |
|
||||
| `backup.enabled` | Enable pereiodic backups | `false` |
|
||||
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * * *` |
|
||||
| `backup.retentionPolicy` | The retention policy | `30d` |
|
||||
| `backup.destinationPath` | The path where to store the backup (i.e. s3://bucket/path/to/folder) | `s3://BUCKET_NAME/` |
|
||||
| `backup.endpointURL` | Endpoint to be used to upload data to the cloud | `http://minio-gateway-service:9000` |
|
||||
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||
|
||||
### Bootstrap parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | --------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| `bootstrap.enabled` | Restore database cluster from a backup | `false` |
|
||||
| `bootstrap.recoveryTime` | Timestamp (PITR) up to which recovery will proceed, expressed in RFC 3339 format. If left empty, will restore latest | `""` |
|
||||
| `bootstrap.oldName` | Name of database cluster before deleting | `""` |
|
||||
| `bootstrap.enabled` | Restore cluster from backup | `false` |
|
||||
| `bootstrap.recoveryTime` | Time stamp up to which recovery will proceed, expressed in RFC 3339 format, if empty, will restore latest | `""` |
|
||||
| `bootstrap.oldName` | Name of cluster before deleting | `""` |
|
||||
| `resources` | Explicit CPU and memory configuration for each PostgreSQL replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
|
||||
| `resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `micro` |
|
||||
|
||||
@@ -128,7 +103,7 @@ This setting is ignored if the corresponding `resources` value is set.
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `2` | `2Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ stringData:
|
||||
until pg_isready ; do sleep 5; done
|
||||
|
||||
echo "== create users"
|
||||
{{- if and .Values.users (not (hasKey .Values.users "postgres")) }}
|
||||
{{- if .Values.users }}
|
||||
psql -v ON_ERROR_STOP=1 <<\EOT
|
||||
{{- range $user, $u := .Values.users }}
|
||||
SELECT 'CREATE ROLE "{{ $user }}" LOGIN INHERIT;'
|
||||
@@ -47,8 +47,6 @@ stringData:
|
||||
COMMENT ON ROLE "{{ $user }}" IS 'user managed by helm';
|
||||
{{- end }}
|
||||
EOT
|
||||
{{- else if and .Values.users (hasKey .Values.users "postgres") }}
|
||||
{{- fail "`users.postgres` is forbidden by policy. Use a different username." }}
|
||||
{{- end }}
|
||||
|
||||
echo "== delete users"
|
||||
|
||||
@@ -62,7 +62,7 @@
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enable regular backups",
|
||||
"description": "Enable pereiodic backups",
|
||||
"default": false
|
||||
},
|
||||
"schedule": {
|
||||
@@ -72,27 +72,27 @@
|
||||
},
|
||||
"retentionPolicy": {
|
||||
"type": "string",
|
||||
"description": "Retention policy",
|
||||
"description": "The retention policy",
|
||||
"default": "30d"
|
||||
},
|
||||
"destinationPath": {
|
||||
"type": "string",
|
||||
"description": "Path to store the backup (i.e. s3://bucket/path/to/folder)",
|
||||
"default": "s3://bucket/path/to/folder/"
|
||||
"description": "The path where to store the backup (i.e. s3://bucket/path/to/folder)",
|
||||
"default": "s3://BUCKET_NAME/"
|
||||
},
|
||||
"endpointURL": {
|
||||
"type": "string",
|
||||
"description": "S3 Endpoint used to upload data to the cloud",
|
||||
"description": "Endpoint to be used to upload data to the cloud",
|
||||
"default": "http://minio-gateway-service:9000"
|
||||
},
|
||||
"s3AccessKey": {
|
||||
"type": "string",
|
||||
"description": "Access key for S3, used for authentication",
|
||||
"description": "The access key for S3, used for authentication",
|
||||
"default": "oobaiRus9pah8PhohL1ThaeTa4UVa7gu"
|
||||
},
|
||||
"s3SecretKey": {
|
||||
"type": "string",
|
||||
"description": "Secret key for S3, used for authentication",
|
||||
"description": "The secret key for S3, used for authentication",
|
||||
"default": "ju3eum4dekeich9ahM1te8waeGai0oog"
|
||||
}
|
||||
}
|
||||
@@ -102,17 +102,17 @@
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Restore database cluster from a backup",
|
||||
"description": "Restore cluster from backup",
|
||||
"default": false
|
||||
},
|
||||
"recoveryTime": {
|
||||
"type": "string",
|
||||
"description": "Timestamp (PITR) up to which recovery will proceed, expressed in RFC 3339 format. If left empty, will restore latest",
|
||||
"description": "Time stamp up to which recovery will proceed, expressed in RFC 3339 format, if empty, will restore latest",
|
||||
"default": ""
|
||||
},
|
||||
"oldName": {
|
||||
"type": "string",
|
||||
"description": "Name of database cluster before deleting",
|
||||
"description": "Name of cluster before deleting",
|
||||
"default": ""
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,17 +59,17 @@ databases: {}
|
||||
|
||||
## @section Backup parameters
|
||||
|
||||
## @param backup.enabled Enable regular backups
|
||||
## @param backup.enabled Enable pereiodic backups
|
||||
## @param backup.schedule Cron schedule for automated backups
|
||||
## @param backup.retentionPolicy Retention policy
|
||||
## @param backup.destinationPath Path to store the backup (i.e. s3://bucket/path/to/folder)
|
||||
## @param backup.endpointURL S3 Endpoint used to upload data to the cloud
|
||||
## @param backup.s3AccessKey Access key for S3, used for authentication
|
||||
## @param backup.s3SecretKey Secret key for S3, used for authentication
|
||||
## @param backup.retentionPolicy The retention policy
|
||||
## @param backup.destinationPath The path where to store the backup (i.e. s3://bucket/path/to/folder)
|
||||
## @param backup.endpointURL Endpoint to be used to upload data to the cloud
|
||||
## @param backup.s3AccessKey The access key for S3, used for authentication
|
||||
## @param backup.s3SecretKey The secret key for S3, used for authentication
|
||||
backup:
|
||||
enabled: false
|
||||
retentionPolicy: 30d
|
||||
destinationPath: s3://bucket/path/to/folder/
|
||||
destinationPath: s3://BUCKET_NAME/
|
||||
endpointURL: http://minio-gateway-service:9000
|
||||
schedule: "0 2 * * * *"
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
@@ -77,9 +77,9 @@ backup:
|
||||
|
||||
## @section Bootstrap parameters
|
||||
|
||||
## @param bootstrap.enabled Restore database cluster from a backup
|
||||
## @param bootstrap.recoveryTime Timestamp (PITR) up to which recovery will proceed, expressed in RFC 3339 format. If left empty, will restore latest
|
||||
## @param bootstrap.oldName Name of database cluster before deleting
|
||||
## @param bootstrap.enabled Restore cluster from backup
|
||||
## @param bootstrap.recoveryTime Time stamp up to which recovery will proceed, expressed in RFC 3339 format, if empty, will restore latest
|
||||
## @param bootstrap.oldName Name of cluster before deleting
|
||||
##
|
||||
bootstrap:
|
||||
enabled: false
|
||||
|
||||
@@ -45,6 +45,6 @@ This setting is ignored if the corresponding `resources` value is set.
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `2` | `2Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
@@ -52,6 +52,6 @@ This setting is ignored if the corresponding `resources` value is set.
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `2` | `2Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
@@ -54,9 +54,7 @@ kafka 0.7.0 6358fd7a
|
||||
kafka 0.7.1 4369b031
|
||||
kafka 0.8.0 HEAD
|
||||
kubernetes 0.24.0 62cb694d
|
||||
kubernetes 0.25.0 70f82667
|
||||
kubernetes 0.25.1 acd4663a
|
||||
kubernetes 0.25.2 HEAD
|
||||
kubernetes 0.25.0 HEAD
|
||||
mysql 0.1.0 263e47be
|
||||
mysql 0.2.0 c24a103f
|
||||
mysql 0.3.0 53f2365e
|
||||
@@ -103,9 +101,7 @@ postgres 0.12.0 6130f43d
|
||||
postgres 0.12.1 632224a3
|
||||
postgres 0.14.0 62cb694d
|
||||
postgres 0.15.1 4369b031
|
||||
postgres 0.16.0 70f82667
|
||||
postgres 0.17.0 acd4663a
|
||||
postgres 0.17.1 HEAD
|
||||
postgres 0.16.0 HEAD
|
||||
rabbitmq 0.1.0 263e47be
|
||||
rabbitmq 0.2.0 53f2365e
|
||||
rabbitmq 0.3.0 6c5cf5bf
|
||||
@@ -157,8 +153,7 @@ virtual-machine 0.9.1 93bdf411
|
||||
virtual-machine 0.10.0 6130f43d
|
||||
virtual-machine 0.10.2 632224a3
|
||||
virtual-machine 0.11.0 4369b031
|
||||
virtual-machine 0.12.0 70f82667
|
||||
virtual-machine 0.12.1 HEAD
|
||||
virtual-machine 0.12.0 HEAD
|
||||
vm-disk 0.1.0 d971f2ff
|
||||
vm-disk 0.1.1 6130f43d
|
||||
vm-disk 0.1.2 632224a3
|
||||
@@ -175,8 +170,7 @@ vm-instance 0.6.0 721c12a7
|
||||
vm-instance 0.7.0 6130f43d
|
||||
vm-instance 0.7.2 632224a3
|
||||
vm-instance 0.8.0 4369b031
|
||||
vm-instance 0.9.0 70f82667
|
||||
vm-instance 0.10.0 HEAD
|
||||
vm-instance 0.9.0 HEAD
|
||||
vpn 0.1.0 263e47be
|
||||
vpn 0.2.0 53f2365e
|
||||
vpn 0.3.0 6c5cf5bf
|
||||
|
||||
@@ -17,7 +17,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.12.1
|
||||
version: 0.12.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -3,13 +3,6 @@ kind: Role
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
resourceNames:
|
||||
- {{ include "virtual-machine.fullname" . }}
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups:
|
||||
- cozystack.io
|
||||
resources:
|
||||
|
||||
@@ -9,7 +9,7 @@ stringData:
|
||||
key{{ $k }}: {{ quote $v }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if or .Values.cloudInit .Values.sshKeys }}
|
||||
{{- if .Values.cloudInit }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
@@ -17,17 +17,5 @@ metadata:
|
||||
name: {{ include "virtual-machine.fullname" . }}-cloud-init
|
||||
stringData:
|
||||
userdata: |
|
||||
{{- if .Values.cloudInit }}
|
||||
{{- .Values.cloudInit | nindent 4 }}
|
||||
{{- else if and (.Values.sshKeys) (not .Values.cloudInit) }}
|
||||
{{- /*
|
||||
We usually provide ssh keys in cloud-init metadata, because userdata it not typed and can be used for any purpose.
|
||||
However, if user provides ssh keys but not cloud-init, we still need to provide a minimal cloud-init config to avoid errors.
|
||||
*/}}
|
||||
#cloud-config
|
||||
ssh_authorized_keys:
|
||||
{{- range .Values.sshKeys }}
|
||||
- {{ quote . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- .Values.cloudInit | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -92,7 +92,7 @@ spec:
|
||||
- disk:
|
||||
bus: scsi
|
||||
name: systemdisk
|
||||
{{- if or .Values.cloudInit .Values.sshKeys }}
|
||||
{{- if .Values.sshKeys }}
|
||||
- disk:
|
||||
bus: virtio
|
||||
name: cloudinitdisk
|
||||
@@ -122,11 +122,28 @@ spec:
|
||||
- name: systemdisk
|
||||
dataVolume:
|
||||
name: {{ include "virtual-machine.fullname" . }}
|
||||
{{- if or .Values.cloudInit .Values.sshKeys }}
|
||||
|
||||
{{- if and .Values.sshKeys .Values.cloudInit }}
|
||||
- name: cloudinitdisk
|
||||
cloudInitNoCloud:
|
||||
secretRef:
|
||||
name: {{ include "virtual-machine.fullname" . }}-cloud-init
|
||||
{{- else if .Values.sshKeys }}
|
||||
- name: cloudinitdisk
|
||||
cloudInitNoCloud:
|
||||
userData: |
|
||||
{{ printf "%s" "#cloud-config" }}
|
||||
ssh_authorized_keys:
|
||||
{{- range .Values.sshKeys }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
chpasswd:
|
||||
expire: false
|
||||
{{- else }}
|
||||
- name: cloudinitdisk
|
||||
cloudInitNoCloud:
|
||||
userData: |
|
||||
{{ printf "%s" "#cloud-config" }}
|
||||
{{- end }}
|
||||
|
||||
networks:
|
||||
|
||||
@@ -17,10 +17,10 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.10.0
|
||||
version: 0.9.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: 0.10.0
|
||||
appVersion: 0.8.0
|
||||
|
||||
@@ -3,13 +3,6 @@ kind: Role
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
resourceNames:
|
||||
- {{ include "virtual-machine.fullname" . }}
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups:
|
||||
- cozystack.io
|
||||
resources:
|
||||
|
||||
@@ -9,7 +9,7 @@ stringData:
|
||||
key{{ $k }}: {{ quote $v }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if or .Values.cloudInit .Values.sshKeys }}
|
||||
{{- if .Values.cloudInit }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
@@ -17,17 +17,5 @@ metadata:
|
||||
name: {{ include "virtual-machine.fullname" . }}-cloud-init
|
||||
stringData:
|
||||
userdata: |
|
||||
{{- if .Values.cloudInit }}
|
||||
{{- .Values.cloudInit | nindent 4 }}
|
||||
{{- else if and (.Values.sshKeys) (not .Values.cloudInit) }}
|
||||
{{- /*
|
||||
We usually provide ssh keys in cloud-init metadata, because userdata it not typed and can be used for any purpose.
|
||||
However, if user provides ssh keys but not cloud-init, we still need to provide a minimal cloud-init config to avoid errors.
|
||||
*/}}
|
||||
#cloud-config
|
||||
ssh_authorized_keys:
|
||||
{{- range .Values.sshKeys }}
|
||||
- {{ quote . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- .Values.cloudInit | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -54,24 +54,24 @@ spec:
|
||||
disks:
|
||||
{{- range $i, $disk := .Values.disks }}
|
||||
- name: disk-{{ $disk.name }}
|
||||
{{- $dv := lookup "cdi.kubevirt.io/v1beta1" "DataVolume" $.Release.Namespace (printf "vm-disk-%s" $disk.name) }}
|
||||
{{- if $dv }}
|
||||
{{- if and (hasKey $dv.metadata.annotations "vm-disk.cozystack.io/optical") (eq (index $dv.metadata.annotations "vm-disk.cozystack.io/optical") "true") }}
|
||||
cdrom:
|
||||
{{- $disk := lookup "cdi.kubevirt.io/v1beta1" "DataVolume" $.Release.Namespace (printf "vm-disk-%s" $disk.name) }}
|
||||
{{- if $disk }}
|
||||
{{- if and (hasKey $disk.metadata.annotations "vm-disk.cozystack.io/optical") (eq (index $disk.metadata.annotations "vm-disk.cozystack.io/optical") "true") }}
|
||||
cdrom: {}
|
||||
{{- else }}
|
||||
disk:
|
||||
disk: {}
|
||||
{{- end }}
|
||||
{{- if eq $i 0 }}
|
||||
bootOrder: 1
|
||||
{{- end }}
|
||||
{{- with $disk.bus }}
|
||||
bus: {{ . }}
|
||||
{{- end }}
|
||||
bootOrder: {{ add $i 1 }}
|
||||
{{- else }}
|
||||
{{- fail (printf "Specified disk not exists in cluster: %s" .name) }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if or .Values.cloudInit .Values.sshKeys }}
|
||||
{{- if or .Values.sshKeys .Values.cloudInit }}
|
||||
- name: cloudinitdisk
|
||||
disk: {}
|
||||
disk:
|
||||
bus: virtio
|
||||
{{- end }}
|
||||
interfaces:
|
||||
- name: default
|
||||
@@ -95,11 +95,27 @@ spec:
|
||||
dataVolume:
|
||||
name: vm-disk-{{ .name }}
|
||||
{{- end }}
|
||||
{{- if or .Values.cloudInit .Values.sshKeys }}
|
||||
{{- if and .Values.sshKeys .Values.cloudInit }}
|
||||
- name: cloudinitdisk
|
||||
cloudInitNoCloud:
|
||||
secretRef:
|
||||
name: {{ include "virtual-machine.fullname" . }}-cloud-init
|
||||
{{- else if .Values.sshKeys }}
|
||||
- name: cloudinitdisk
|
||||
cloudInitNoCloud:
|
||||
userData: |
|
||||
{{ printf "%s" "#cloud-config" }}
|
||||
ssh_authorized_keys:
|
||||
{{- range .Values.sshKeys }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
chpasswd:
|
||||
expire: false
|
||||
{{- else }}
|
||||
- name: cloudinitdisk
|
||||
cloudInitNoCloud:
|
||||
userData: |
|
||||
{{ printf "%s" "#cloud-config" }}
|
||||
{{- end }}
|
||||
networks:
|
||||
- name: default
|
||||
|
||||
@@ -22,7 +22,6 @@ instanceProfile: ubuntu
|
||||
## disks:
|
||||
## - name: example-system
|
||||
## - name: example-data
|
||||
## bus: sata
|
||||
disks: []
|
||||
|
||||
## @param gpus [array] List of GPUs to attach
|
||||
|
||||
@@ -56,7 +56,7 @@ This setting is ignored if the corresponding `resources` value is set.
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `2` | `2Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
cozystack:
|
||||
image: ghcr.io/cozystack/cozystack/installer:v0.33.1@sha256:03a0002be9cf5926643c295bbf05c3e250401b0f0595b9fcd147d53534f368f5
|
||||
image: ghcr.io/cozystack/cozystack/installer:v0.33.0@sha256:6cdc5d9062b536929152214e8a6a6b8096b64a17592e04a3633f58d21ff43a63
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
e2e:
|
||||
image: ghcr.io/cozystack/cozystack/e2e-sandbox:v0.33.1@sha256:eed183a4104b1c142f6c4a358338749efe73baefddd53d7fe4c7149ecb892ce1
|
||||
image: ghcr.io/cozystack/cozystack/e2e-sandbox:v0.33.0@sha256:fd169ae7ee7b0b10ee34f02353ae96c182ca7b6cede771c8fc6539894416104f
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/matchbox:v0.33.1@sha256:ca3638c620215ace26ace3f7e8b27391847ab2158b5a67f070f43dcbea071532
|
||||
ghcr.io/cozystack/cozystack/matchbox:v0.33.0@sha256:adc133234a48f3496441334348aeab400ee29b8514129c110b892fa1e0dff1d8
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/s3manager:v0.5.0@sha256:b748d9add5fc4080b143d8690ca1ad851d911948ac8eb296dd9005d53d153c05
|
||||
ghcr.io/cozystack/cozystack/s3manager:v0.5.0@sha256:2759763d35ba35144ba10ba4d2b9effd875f4f0d01d9694b010f491ba6eb6d46
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
cozystackAPI:
|
||||
image: ghcr.io/cozystack/cozystack/cozystack-api:v0.33.1@sha256:ee6b71d3ab1c1484490ff1dc57a7df82813c4f18d6393f149d32acf656aa779d
|
||||
image: ghcr.io/cozystack/cozystack/cozystack-api:v0.33.0@sha256:d9bee0e9f73a950784e43d907552c21044d01eed728e1185455308e49d00c00d
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
cozystackController:
|
||||
image: ghcr.io/cozystack/cozystack/cozystack-controller:v0.33.1@sha256:4777488e14f0313b153b153388c78ab89e3a39582c30266f2321704df1976922
|
||||
image: ghcr.io/cozystack/cozystack/cozystack-controller:v0.33.0@sha256:a1fceb277007846bc85ceee0afd1f5d1122496174203c718c1275a1038cb07f6
|
||||
debug: false
|
||||
disableTelemetry: false
|
||||
cozystackVersion: "v0.33.1"
|
||||
cozystackVersion: "v0.33.0"
|
||||
|
||||
@@ -76,7 +76,7 @@ data:
|
||||
"kubeappsNamespace": {{ .Release.Namespace | quote }},
|
||||
"helmGlobalNamespace": {{ include "kubeapps.helmGlobalPackagingNamespace" . | quote }},
|
||||
"carvelGlobalNamespace": {{ .Values.kubeappsapis.pluginConfig.kappController.packages.v1alpha1.globalPackagingNamespace | quote }},
|
||||
"appVersion": "v0.33.1",
|
||||
"appVersion": "v0.33.0",
|
||||
"authProxyEnabled": {{ .Values.authProxy.enabled }},
|
||||
"oauthLoginURI": {{ .Values.authProxy.oauthLoginURI | quote }},
|
||||
"oauthLogoutURI": {{ .Values.authProxy.oauthLogoutURI | quote }},
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
FROM bitnami/node:20.15.1 AS build
|
||||
WORKDIR /app
|
||||
|
||||
ARG COMMIT_REF=e1382f51c6db1bca0a8ecd454407c8e282fe0243
|
||||
ARG COMMIT_REF=6856b66f9244ef1b2703a2f30899366e0ba040de
|
||||
RUN wget -O- https://github.com/cozystack/kubeapps/archive/${COMMIT_REF}.tar.gz | tar xzf - --strip-components=2 kubeapps-${COMMIT_REF}/dashboard
|
||||
|
||||
RUN yarn install --frozen-lockfile
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
# syntax = docker/dockerfile:1
|
||||
|
||||
FROM alpine AS source
|
||||
ARG COMMIT_REF=e1382f51c6db1bca0a8ecd454407c8e282fe0243
|
||||
ARG COMMIT_REF=6856b66f9244ef1b2703a2f30899366e0ba040de
|
||||
RUN apk add --no-cache patch
|
||||
WORKDIR /source
|
||||
RUN wget -O- https://github.com/cozystack/kubeapps/archive/${COMMIT_REF}.tar.gz | tar xzf - --strip-components=1
|
||||
|
||||
@@ -19,7 +19,7 @@ kubeapps:
|
||||
image:
|
||||
registry: ghcr.io/cozystack/cozystack
|
||||
repository: dashboard
|
||||
tag: v0.33.1
|
||||
tag: v0.33.0
|
||||
digest: "sha256:5e514516bd3dc0c693bb346ddeb9740e0439a59deb2a56b87317286e3ce79ac9"
|
||||
redis:
|
||||
master:
|
||||
@@ -37,8 +37,8 @@ kubeapps:
|
||||
image:
|
||||
registry: ghcr.io/cozystack/cozystack
|
||||
repository: kubeapps-apis
|
||||
tag: v0.33.1
|
||||
digest: "sha256:ea5b21a27c97b14880042d2a642670e3461e7d946c65b5b557d2eb8df9f03a87"
|
||||
tag: v0.33.0
|
||||
digest: "sha256:8c60134b9216e0cd8ffc044c14c872b76c1a95879b4cf7887541980ade9e8c65"
|
||||
pluginConfig:
|
||||
flux:
|
||||
packages:
|
||||
|
||||
2
packages/system/hetzner-ccm/Chart.yaml
Normal file
2
packages/system/hetzner-ccm/Chart.yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
name: hetzner-ccm
|
||||
version: 1.26.0 # Placeholder, the actual version will be automatically set during the build process
|
||||
10
packages/system/hetzner-ccm/Makefile
Normal file
10
packages/system/hetzner-ccm/Makefile
Normal file
@@ -0,0 +1,10 @@
|
||||
export NAME=hetzner-ccm
|
||||
export NAMESPACE=kube-system
|
||||
|
||||
include ../../../scripts/package.mk
|
||||
|
||||
update:
|
||||
rm -rf charts
|
||||
helm repo add hcloud https://charts.hetzner.cloud
|
||||
helm repo update hcloud
|
||||
helm pull hcloud/hcloud-cloud-controller-manager --untar --untardir charts
|
||||
@@ -0,0 +1,23 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
@@ -0,0 +1,96 @@
|
||||
---
|
||||
# Source: hcloud-cloud-controller-manager/templates/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: hcloud-cloud-controller-manager
|
||||
namespace: kube-system
|
||||
---
|
||||
# Source: hcloud-cloud-controller-manager/templates/clusterrolebinding.yaml
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: "system:hcloud-cloud-controller-manager"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: hcloud-cloud-controller-manager
|
||||
namespace: kube-system
|
||||
---
|
||||
# Source: hcloud-cloud-controller-manager/templates/deployment.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: hcloud-cloud-controller-manager
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/instance: 'hcloud-hccm'
|
||||
app.kubernetes.io/name: 'hcloud-cloud-controller-manager'
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: 'hcloud-hccm'
|
||||
app.kubernetes.io/name: 'hcloud-cloud-controller-manager'
|
||||
spec:
|
||||
serviceAccountName: hcloud-cloud-controller-manager
|
||||
dnsPolicy: Default
|
||||
tolerations:
|
||||
# Allow HCCM itself to schedule on nodes that have not yet been initialized by HCCM.
|
||||
- key: "node.cloudprovider.kubernetes.io/uninitialized"
|
||||
value: "true"
|
||||
effect: "NoSchedule"
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
|
||||
# Allow HCCM to schedule on control plane nodes.
|
||||
- key: "node-role.kubernetes.io/master"
|
||||
effect: NoSchedule
|
||||
operator: Exists
|
||||
- key: "node-role.kubernetes.io/control-plane"
|
||||
effect: NoSchedule
|
||||
operator: Exists
|
||||
|
||||
- key: "node.kubernetes.io/not-ready"
|
||||
effect: "NoExecute"
|
||||
containers:
|
||||
- name: hcloud-cloud-controller-manager
|
||||
args:
|
||||
- "--allow-untagged-cloud"
|
||||
- "--cloud-provider=hcloud"
|
||||
- "--route-reconciliation-period=30s"
|
||||
- "--webhook-secure-port=0"
|
||||
- "--leader-elect=false"
|
||||
env:
|
||||
- name: HCLOUD_TOKEN
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: token
|
||||
name: hcloud
|
||||
- name: ROBOT_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: robot-password
|
||||
name: hcloud
|
||||
optional: true
|
||||
- name: ROBOT_USER
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: robot-user
|
||||
name: hcloud
|
||||
optional: true
|
||||
image: docker.io/hetznercloud/hcloud-cloud-controller-manager:v1.26.0 # x-releaser-pleaser-version
|
||||
ports:
|
||||
- name: metrics
|
||||
containerPort: 8233
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
priorityClassName: "system-cluster-critical"
|
||||
@@ -0,0 +1,113 @@
|
||||
---
|
||||
# Source: hcloud-cloud-controller-manager/templates/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: hcloud-cloud-controller-manager
|
||||
namespace: kube-system
|
||||
---
|
||||
# Source: hcloud-cloud-controller-manager/templates/clusterrolebinding.yaml
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: "system:hcloud-cloud-controller-manager"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: hcloud-cloud-controller-manager
|
||||
namespace: kube-system
|
||||
---
|
||||
# Source: hcloud-cloud-controller-manager/templates/daemonset.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: hcloud-cloud-controller-manager
|
||||
namespace: kube-system
|
||||
spec:
|
||||
revisionHistoryLimit: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/instance: 'hcloud-hccm'
|
||||
app.kubernetes.io/name: 'hcloud-cloud-controller-manager'
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: 'hcloud-hccm'
|
||||
app.kubernetes.io/name: 'hcloud-cloud-controller-manager'
|
||||
pod-label: pod-label
|
||||
annotations:
|
||||
pod-annotation: pod-annotation
|
||||
spec:
|
||||
serviceAccountName: hcloud-cloud-controller-manager
|
||||
dnsPolicy: Default
|
||||
tolerations:
|
||||
# Allow HCCM itself to schedule on nodes that have not yet been initialized by HCCM.
|
||||
- key: "node.cloudprovider.kubernetes.io/uninitialized"
|
||||
value: "true"
|
||||
effect: "NoSchedule"
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
|
||||
# Allow HCCM to schedule on control plane nodes.
|
||||
- key: "node-role.kubernetes.io/master"
|
||||
effect: NoSchedule
|
||||
operator: Exists
|
||||
- key: "node-role.kubernetes.io/control-plane"
|
||||
effect: NoSchedule
|
||||
operator: Exists
|
||||
|
||||
- key: "node.kubernetes.io/not-ready"
|
||||
effect: "NoExecute"
|
||||
|
||||
- effect: NoSchedule
|
||||
key: example-key
|
||||
operator: Exists
|
||||
nodeSelector:
|
||||
|
||||
foo: bar
|
||||
containers:
|
||||
- name: hcloud-cloud-controller-manager
|
||||
command:
|
||||
- "/bin/hcloud-cloud-controller-manager"
|
||||
- "--allow-untagged-cloud"
|
||||
- "--cloud-provider=hcloud"
|
||||
- "--route-reconciliation-period=30s"
|
||||
- "--webhook-secure-port=0"
|
||||
env:
|
||||
- name: HCLOUD_TOKEN
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: token
|
||||
name: hcloud
|
||||
- name: ROBOT_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: robot-password
|
||||
name: hcloud
|
||||
optional: true
|
||||
- name: ROBOT_USER
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: robot-user
|
||||
name: hcloud
|
||||
optional: true
|
||||
image: docker.io/hetznercloud/hcloud-cloud-controller-manager:v1.26.0 # x-releaser-pleaser-version
|
||||
ports:
|
||||
- name: metrics
|
||||
containerPort: 8233
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
volumeMounts:
|
||||
- mountPath: /var/run/secrets/hcloud
|
||||
name: token-volume
|
||||
readOnly: true
|
||||
priorityClassName: system-cluster-critical
|
||||
volumes:
|
||||
- name: token-volume
|
||||
secret:
|
||||
secretName: hcloud-token
|
||||
@@ -0,0 +1,51 @@
|
||||
kind: DaemonSet
|
||||
|
||||
monitoring:
|
||||
podMonitor:
|
||||
labels:
|
||||
environment: staging
|
||||
annotations:
|
||||
release: kube-prometheus-stack
|
||||
|
||||
additionalTolerations:
|
||||
- key: "example-key"
|
||||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
|
||||
nodeSelector:
|
||||
foo: bar
|
||||
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: topology.kubernetes.io/zone
|
||||
operator: In
|
||||
values:
|
||||
- antarctica-east1
|
||||
- antarctica-west1
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 1
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: another-node-label-key
|
||||
operator: In
|
||||
values:
|
||||
- another-node-label-value
|
||||
|
||||
podLabels:
|
||||
pod-label: pod-label
|
||||
|
||||
podAnnotations:
|
||||
pod-annotation: pod-annotation
|
||||
|
||||
extraVolumeMounts:
|
||||
- name: token-volume
|
||||
readOnly: true
|
||||
mountPath: /var/run/secrets/hcloud
|
||||
|
||||
extraVolumes:
|
||||
- name: token-volume
|
||||
secret:
|
||||
secretName: hcloud-token
|
||||
@@ -0,0 +1,4 @@
|
||||
apiVersion: v2
|
||||
name: hcloud-cloud-controller-manager
|
||||
type: application
|
||||
version: 1.26.0
|
||||
@@ -0,0 +1,61 @@
|
||||
# hcloud-cloud-controller-manager Helm Chart
|
||||
|
||||
This Helm chart is the recommended installation method for [hcloud-cloud-controller-manager](https://github.com/hetznercloud/hcloud-cloud-controller-manager).
|
||||
|
||||
## Quickstart
|
||||
|
||||
First, [install Helm 3](https://helm.sh/docs/intro/install/).
|
||||
|
||||
The following snippet will deploy hcloud-cloud-controller-manager to the kube-system namespace.
|
||||
|
||||
```sh
|
||||
# Sync the Hetzner Cloud helm chart repository to your local computer.
|
||||
helm repo add hcloud https://charts.hetzner.cloud
|
||||
helm repo update hcloud
|
||||
|
||||
# Install the latest version of the hcloud-cloud-controller-manager chart.
|
||||
helm install hccm hcloud/hcloud-cloud-controller-manager -n kube-system
|
||||
|
||||
# If you want to install hccm with private networking support (see main Deployment guide for more info).
|
||||
helm install hccm hcloud/hcloud-cloud-controller-manager -n kube-system --set networking.enabled=true
|
||||
```
|
||||
|
||||
Please note that additional configuration is necessary. See the main [Deployment](https://github.com/hetznercloud/hcloud-cloud-controller-manager#deployment) guide.
|
||||
|
||||
If you're unfamiliar with Helm it would behoove you to peep around the documentation. Perhaps start with the [Quickstart Guide](https://helm.sh/docs/intro/quickstart/)?
|
||||
|
||||
### Upgrading from static manifests
|
||||
|
||||
If you previously installed hcloud-cloud-controller-manager with this command:
|
||||
|
||||
```sh
|
||||
kubectl apply -f https://github.com/hetznercloud/hcloud-cloud-controller-manager/releases/latest/download/ccm.yaml
|
||||
```
|
||||
|
||||
You can uninstall that same deployment, by running the following command:
|
||||
|
||||
```sh
|
||||
kubectl delete -f https://github.com/hetznercloud/hcloud-cloud-controller-manager/releases/latest/download/ccm.yaml
|
||||
```
|
||||
|
||||
Then you can follow the Quickstart installation steps above.
|
||||
|
||||
## Configuration
|
||||
|
||||
This chart aims to be highly flexible. Please review the [values.yaml](./values.yaml) for a full list of configuration options.
|
||||
|
||||
If you've already deployed hccm using the `helm install` command above, you can easily change configuration values:
|
||||
|
||||
```sh
|
||||
helm upgrade hccm hcloud/hcloud-cloud-controller-manager -n kube-system --set monitoring.podMonitor.enabled=true
|
||||
```
|
||||
|
||||
### Multiple replicas / DaemonSet
|
||||
|
||||
You can choose between different deployment options. By default the chart will deploy a single replica as a Deployment.
|
||||
|
||||
If you want to change the replica count you can adjust the value `replicaCount` inside the helm values.
|
||||
If you have more than 1 replica leader election will be turned on automatically.
|
||||
|
||||
If you want to deploy hccm as a DaemonSet you can set `kind` to `DaemonSet` inside the values.
|
||||
To adjust on which nodes the DaemonSet should be deployed you can use the `nodeSelector` and `additionalTolerations` values.
|
||||
@@ -0,0 +1,5 @@
|
||||
{{ if (and $.Values.monitoring.enabled $.Values.monitoring.podMonitor.enabled) }}
|
||||
{{ if not ($.Capabilities.APIVersions.Has "monitoring.coreos.com/v1/PodMonitor") }}
|
||||
WARNING: monitoring.podMonitoring.enabled=true but PodMonitor could not be installed: the CRD was not detected.
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
@@ -0,0 +1,7 @@
|
||||
{{- define "hcloud-cloud-controller-manager.name" -}}
|
||||
{{- $.Values.nameOverride | default $.Chart.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "hcloud-cloud-controller-manager.selectorLabels" -}}
|
||||
{{- tpl (toYaml $.Values.selectorLabels) $ }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,14 @@
|
||||
{{- if .Values.rbac.create }}
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: "system:{{ include "hcloud-cloud-controller-manager.name" . }}"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "hcloud-cloud-controller-manager.name" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,108 @@
|
||||
{{- if eq $.Values.kind "DaemonSet" }}
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: {{ include "hcloud-cloud-controller-manager.name" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
revisionHistoryLimit: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "hcloud-cloud-controller-manager.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "hcloud-cloud-controller-manager.selectorLabels" . | nindent 8 }}
|
||||
{{- if .Values.podLabels }}
|
||||
{{- toYaml .Values.podLabels | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.podAnnotations | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.image.pullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "hcloud-cloud-controller-manager.name" . }}
|
||||
dnsPolicy: Default
|
||||
tolerations:
|
||||
# Allow HCCM itself to schedule on nodes that have not yet been initialized by HCCM.
|
||||
- key: "node.cloudprovider.kubernetes.io/uninitialized"
|
||||
value: "true"
|
||||
effect: "NoSchedule"
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
|
||||
# Allow HCCM to schedule on control plane nodes.
|
||||
- key: "node-role.kubernetes.io/master"
|
||||
effect: NoSchedule
|
||||
operator: Exists
|
||||
- key: "node-role.kubernetes.io/control-plane"
|
||||
effect: NoSchedule
|
||||
operator: Exists
|
||||
|
||||
- key: "node.kubernetes.io/not-ready"
|
||||
effect: "NoExecute"
|
||||
|
||||
{{- if gt (len .Values.additionalTolerations) 0 }}
|
||||
{{ toYaml .Values.additionalTolerations | nindent 8 }}
|
||||
{{- end }}
|
||||
|
||||
{{- if gt (len .Values.nodeSelector) 0 }}
|
||||
nodeSelector:
|
||||
{{ toYaml .Values.nodeSelector | nindent 8 }}
|
||||
{{- end }}
|
||||
|
||||
{{- if $.Values.networking.enabled }}
|
||||
hostNetwork: true
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: hcloud-cloud-controller-manager
|
||||
command:
|
||||
- "/bin/hcloud-cloud-controller-manager"
|
||||
{{- range $key, $value := $.Values.args }}
|
||||
{{- if not (eq $value nil) }}
|
||||
- "--{{ $key }}{{ if $value }}={{ $value }}{{ end }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $.Values.networking.enabled }}
|
||||
- "--allocate-node-cidrs=true"
|
||||
- "--cluster-cidr={{ $.Values.networking.clusterCIDR }}"
|
||||
{{- end }}
|
||||
env:
|
||||
{{- range $key, $value := $.Values.env }}
|
||||
- name: {{ $key }}
|
||||
{{- tpl (toYaml $value) $ | nindent 14 }}
|
||||
{{- end }}
|
||||
{{- if $.Values.networking.enabled }}
|
||||
- name: HCLOUD_NETWORK
|
||||
{{- tpl (toYaml $.Values.networking.network) $ | nindent 14 }}
|
||||
{{- end }}
|
||||
{{- if not $.Values.monitoring.enabled }}
|
||||
- name: HCLOUD_METRICS_ENABLED
|
||||
value: "false"
|
||||
{{- end }}
|
||||
{{- if $.Values.robot.enabled }}
|
||||
- name: ROBOT_ENABLED
|
||||
value: "true"
|
||||
{{- end }}
|
||||
image: {{ $.Values.image.repository }}:{{ tpl $.Values.image.tag . }} # x-releaser-pleaser-version
|
||||
ports:
|
||||
{{- if $.Values.monitoring.enabled }}
|
||||
- name: metrics
|
||||
containerPort: 8233
|
||||
{{- end }}
|
||||
resources:
|
||||
{{- toYaml $.Values.resources | nindent 12 }}
|
||||
{{- with .Values.extraVolumeMounts }}
|
||||
volumeMounts:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
priorityClassName: system-cluster-critical
|
||||
{{- with .Values.extraVolumes }}
|
||||
volumes:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,118 @@
|
||||
{{- if eq $.Values.kind "Deployment" }}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "hcloud-cloud-controller-manager.name" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
revisionHistoryLimit: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "hcloud-cloud-controller-manager.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "hcloud-cloud-controller-manager.selectorLabels" . | nindent 8 }}
|
||||
{{- if .Values.podLabels }}
|
||||
{{- toYaml .Values.podLabels | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.podAnnotations | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.image.pullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "hcloud-cloud-controller-manager.name" . }}
|
||||
dnsPolicy: Default
|
||||
tolerations:
|
||||
# Allow HCCM itself to schedule on nodes that have not yet been initialized by HCCM.
|
||||
- key: "node.cloudprovider.kubernetes.io/uninitialized"
|
||||
value: "true"
|
||||
effect: "NoSchedule"
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
|
||||
# Allow HCCM to schedule on control plane nodes.
|
||||
- key: "node-role.kubernetes.io/master"
|
||||
effect: NoSchedule
|
||||
operator: Exists
|
||||
- key: "node-role.kubernetes.io/control-plane"
|
||||
effect: NoSchedule
|
||||
operator: Exists
|
||||
|
||||
- key: "node.kubernetes.io/not-ready"
|
||||
effect: "NoExecute"
|
||||
|
||||
{{- if gt (len .Values.additionalTolerations) 0 }}
|
||||
{{ toYaml .Values.additionalTolerations | nindent 8 }}
|
||||
{{- end }}
|
||||
|
||||
{{- if gt (len .Values.nodeSelector) 0 }}
|
||||
nodeSelector:
|
||||
{{ toYaml .Values.nodeSelector | nindent 8 }}
|
||||
{{- end }}
|
||||
|
||||
{{- if gt (len .Values.affinity) 0 }}
|
||||
affinity:
|
||||
{{ toYaml .Values.affinity | nindent 8 }}
|
||||
{{- end }}
|
||||
|
||||
{{- if $.Values.networking.enabled }}
|
||||
hostNetwork: true
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: hcloud-cloud-controller-manager
|
||||
args:
|
||||
{{- range $key, $value := $.Values.args }}
|
||||
{{- if not (eq $value nil) }}
|
||||
- "--{{ $key }}{{ if $value }}={{ $value }}{{ end }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $.Values.networking.enabled }}
|
||||
- "--allocate-node-cidrs=true"
|
||||
- "--cluster-cidr={{ $.Values.networking.clusterCIDR }}"
|
||||
{{- end }}
|
||||
{{- if (eq (int $.Values.replicaCount) 1) }}
|
||||
- "--leader-elect=false"
|
||||
{{- end }}
|
||||
env:
|
||||
{{- range $key, $value := $.Values.env }}
|
||||
- name: {{ $key }}
|
||||
{{- tpl (toYaml $value) $ | nindent 14 }}
|
||||
{{- end }}
|
||||
{{- if $.Values.networking.enabled }}
|
||||
- name: HCLOUD_NETWORK
|
||||
{{- tpl (toYaml $.Values.networking.network) $ | nindent 14 }}
|
||||
{{- end }}
|
||||
{{- if not $.Values.monitoring.enabled }}
|
||||
- name: HCLOUD_METRICS_ENABLED
|
||||
value: "false"
|
||||
{{- end }}
|
||||
{{- if $.Values.robot.enabled }}
|
||||
- name: ROBOT_ENABLED
|
||||
value: "true"
|
||||
{{- end }}
|
||||
image: {{ $.Values.image.repository }}:{{ tpl $.Values.image.tag . }} # x-releaser-pleaser-version
|
||||
ports:
|
||||
{{- if $.Values.monitoring.enabled }}
|
||||
- name: metrics
|
||||
containerPort: 8233
|
||||
{{- end }}
|
||||
resources:
|
||||
{{- toYaml $.Values.resources | nindent 12 }}
|
||||
{{- with .Values.extraVolumeMounts }}
|
||||
volumeMounts:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.priorityClassName }}
|
||||
priorityClassName: {{ .Values.priorityClassName | quote }}
|
||||
{{- end }}
|
||||
{{- with .Values.extraVolumes }}
|
||||
volumes:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,22 @@
|
||||
{{ if (and $.Values.monitoring.enabled $.Values.monitoring.podMonitor.enabled) }}
|
||||
{{ if $.Capabilities.APIVersions.Has "monitoring.coreos.com/v1/PodMonitor" }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PodMonitor
|
||||
metadata:
|
||||
name: {{ include "hcloud-cloud-controller-manager.name" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- with $.Values.monitoring.podMonitor.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
{{- range $key, $value := .Values.monitoring.podMonitor.annotations }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- tpl (toYaml $.Values.monitoring.podMonitor.spec) $ | nindent 2 }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "hcloud-cloud-controller-manager.selectorLabels" . | nindent 6 }}
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
@@ -0,0 +1,5 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "hcloud-cloud-controller-manager.name" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
@@ -0,0 +1,154 @@
|
||||
# hccm program command line arguments.
|
||||
# The following flags are managed by the chart and should *not* be set directly here:
|
||||
# --allocate-node-cidrs
|
||||
# --cluster-cidr
|
||||
# --leader-elect
|
||||
args:
|
||||
cloud-provider: hcloud
|
||||
allow-untagged-cloud: ""
|
||||
|
||||
# Read issue #395 to understand how changes to this value affect you.
|
||||
# https://github.com/hetznercloud/hcloud-cloud-controller-manager/issues/395
|
||||
route-reconciliation-period: 30s
|
||||
|
||||
# We do not use the webhooks feature and there is no need to bind a port that is unused.
|
||||
# https://github.com/kubernetes/kubernetes/issues/120043
|
||||
# https://github.com/hetznercloud/hcloud-cloud-controller-manager/issues/492
|
||||
webhook-secure-port: "0"
|
||||
|
||||
# Change deployment kind from "Deployment" to "DaemonSet"
|
||||
kind: Deployment
|
||||
|
||||
# change replicaCount (only used when kind is "Deployment")
|
||||
replicaCount: 1
|
||||
|
||||
# hccm environment variables
|
||||
env:
|
||||
# The following variables are managed by the chart and should *not* be set here:
|
||||
# HCLOUD_METRICS_ENABLED - see monitoring.enabled
|
||||
# HCLOUD_NETWORK - see networking.enabled
|
||||
# ROBOT_ENABLED - see robot.enabled
|
||||
|
||||
# You can also use a file to provide secrets to the hcloud-cloud-controller-manager.
|
||||
# This is currently possible for HCLOUD_TOKEN, ROBOT_USER, and ROBOT_PASSWORD.
|
||||
# Use the env var appended with _FILE (e.g. HCLOUD_TOKEN_FILE) and set the value to the file path that should be read
|
||||
# The file must be provided externally (e.g. via secret injection).
|
||||
# Example:
|
||||
# HCLOUD_TOKEN_FILE:
|
||||
# value: "/etc/hetzner/token"
|
||||
# to disable reading the token from the secret you have to disable the original env var:
|
||||
# HCLOUD_TOKEN: null
|
||||
|
||||
HCLOUD_TOKEN:
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: hcloud
|
||||
key: token
|
||||
|
||||
ROBOT_USER:
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: hcloud
|
||||
key: robot-user
|
||||
optional: true
|
||||
ROBOT_PASSWORD:
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: hcloud
|
||||
key: robot-password
|
||||
optional: true
|
||||
|
||||
image:
|
||||
repository: docker.io/hetznercloud/hcloud-cloud-controller-manager
|
||||
tag: "v{{ $.Chart.Version }}"
|
||||
# Optionally specify an array of imagePullSecrets.
|
||||
# Secrets must be manually created in the namespace.
|
||||
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
# e.g:
|
||||
# pullSecrets:
|
||||
# - myRegistryKeySecretName
|
||||
#
|
||||
pullSecrets: []
|
||||
|
||||
monitoring:
|
||||
# When enabled, the hccm Pod will serve metrics on port :8233
|
||||
enabled: true
|
||||
podMonitor:
|
||||
# When enabled (and metrics.enabled=true), a PodMonitor will be deployed to scrape metrics.
|
||||
# The PodMonitor [1] CRD must already exist in the target cluster.
|
||||
enabled: false
|
||||
# PodMonitor Labels
|
||||
labels: {}
|
||||
# release: kube-prometheus-stack
|
||||
# PodMonitor Annotations
|
||||
annotations: {}
|
||||
# PodMonitorSpec to be deployed. The "selector" field is set elsewhere and should *not* be used here.
|
||||
# https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.PodMonitorSpec
|
||||
spec:
|
||||
podMetricsEndpoints:
|
||||
- port: metrics
|
||||
|
||||
nameOverride: ~
|
||||
|
||||
networking:
|
||||
# If enabled, hcloud-ccm will be deployed with networking support.
|
||||
enabled: false
|
||||
# If networking is enabled, clusterCIDR must match the PodCIDR subnet your cluster has been configured with.
|
||||
# The default "10.244.0.0/16" assumes you're using Flannel with default configuration.
|
||||
clusterCIDR: 10.244.0.0/16
|
||||
network:
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: hcloud
|
||||
key: network
|
||||
|
||||
# Resource requests for the deployed hccm Pod.
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
|
||||
selectorLabels:
|
||||
app.kubernetes.io/name: '{{ include "hcloud-cloud-controller-manager.name" $ }}'
|
||||
app.kubernetes.io/instance: "{{ $.Release.Name }}"
|
||||
|
||||
additionalTolerations: []
|
||||
|
||||
# nodeSelector:
|
||||
# node-role.kubernetes.io/control-plane: ""
|
||||
nodeSelector: {}
|
||||
|
||||
# Set the affinity for pods. (Only works with kind=Deployment)
|
||||
affinity: {}
|
||||
|
||||
# pods priorityClassName
|
||||
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption
|
||||
priorityClassName: "system-cluster-critical"
|
||||
|
||||
robot:
|
||||
# Set to true to enable support for Robot (Dedicated) servers.
|
||||
enabled: false
|
||||
|
||||
rbac:
|
||||
# Create a cluster role binding with admin access for the service account.
|
||||
create: true
|
||||
|
||||
podLabels: {}
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
# Mounts the specified volume to the hcloud-cloud-controller-manager container.
|
||||
extraVolumeMounts: []
|
||||
# # Example
|
||||
# extraVolumeMounts:
|
||||
# - name: token-volume
|
||||
# readOnly: true
|
||||
# mountPath: /var/run/secrets/hcloud
|
||||
|
||||
# Adds extra volumes to the pod.
|
||||
extraVolumes: []
|
||||
# # Example
|
||||
# extraVolumes:
|
||||
# - name: token-volume
|
||||
# secret:
|
||||
# secretName: hcloud-token
|
||||
172
packages/system/hetzner-ccm/values.yaml
Normal file
172
packages/system/hetzner-ccm/values.yaml
Normal file
@@ -0,0 +1,172 @@
|
||||
# hccm program command line arguments.
|
||||
# The following flags are managed by the chart and should *not* be set directly here:
|
||||
# --allocate-node-cidrs
|
||||
# --cluster-cidr
|
||||
# --leader-elect
|
||||
args:
|
||||
cloud-provider: hcloud
|
||||
allow-untagged-cloud: ""
|
||||
|
||||
# Read issue #395 to understand how changes to this value affect you.
|
||||
# https://github.com/hetznercloud/hcloud-cloud-controller-manager/issues/395
|
||||
route-reconciliation-period: 30s
|
||||
|
||||
# We do not use the webhooks feature and there is no need to bind a port that is unused.
|
||||
# https://github.com/kubernetes/kubernetes/issues/120043
|
||||
# https://github.com/hetznercloud/hcloud-cloud-controller-manager/issues/492
|
||||
webhook-secure-port: "0"
|
||||
|
||||
|
||||
# Change deployment kind from "Deployment" to "DaemonSet"
|
||||
kind: Deployment
|
||||
|
||||
|
||||
# change replicaCount (only used when kind is "Deployment")
|
||||
replicaCount: 1
|
||||
|
||||
|
||||
# hccm environment variables
|
||||
env:
|
||||
# The following variables are managed by the chart and should *not* be set here:
|
||||
# HCLOUD_METRICS_ENABLED - see monitoring.enabled
|
||||
# HCLOUD_NETWORK - see networking.enabled
|
||||
# ROBOT_ENABLED - see robot.enabled
|
||||
|
||||
# You can also use a file to provide secrets to the hcloud-cloud-controller-manager.
|
||||
# This is currently possible for HCLOUD_TOKEN, ROBOT_USER, and ROBOT_PASSWORD.
|
||||
# Use the env var appended with _FILE (e.g. HCLOUD_TOKEN_FILE) and set the value to the file path that should be read
|
||||
# The file must be provided externally (e.g. via secret injection).
|
||||
# Example:
|
||||
# HCLOUD_TOKEN_FILE:
|
||||
# value: "/etc/hetzner/token"
|
||||
# to disable reading the token from the secret you have to disable the original env var:
|
||||
# HCLOUD_TOKEN: null
|
||||
|
||||
HCLOUD_TOKEN:
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: hcloud
|
||||
key: token
|
||||
|
||||
ROBOT_USER:
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: hcloud
|
||||
key: robot-user
|
||||
optional: true
|
||||
ROBOT_PASSWORD:
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: hcloud
|
||||
key: robot-password
|
||||
optional: true
|
||||
|
||||
|
||||
image:
|
||||
repository: docker.io/hetznercloud/hcloud-cloud-controller-manager
|
||||
tag: "v{{ $.Chart.Version }}"
|
||||
# Optionally specify an array of imagePullSecrets.
|
||||
# Secrets must be manually created in the namespace.
|
||||
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
# e.g:
|
||||
# pullSecrets:
|
||||
# - myRegistryKeySecretName
|
||||
#
|
||||
pullSecrets: []
|
||||
|
||||
|
||||
monitoring:
|
||||
# When enabled, the hccm Pod will serve metrics on port :8233
|
||||
enabled: false
|
||||
podMonitor:
|
||||
# When enabled (and metrics.enabled=true), a PodMonitor will be deployed to scrape metrics.
|
||||
# The PodMonitor [1] CRD must already exist in the target cluster.
|
||||
enabled: false
|
||||
# PodMonitor Labels
|
||||
labels: {}
|
||||
# release: kube-prometheus-stack
|
||||
# PodMonitor Annotations
|
||||
annotations: {}
|
||||
# PodMonitorSpec to be deployed. The "selector" field is set elsewhere and should *not* be used here.
|
||||
# https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.PodMonitorSpec
|
||||
spec:
|
||||
podMetricsEndpoints:
|
||||
- port: metrics
|
||||
|
||||
|
||||
nameOverride: "hetzner-ccm"
|
||||
|
||||
|
||||
networking:
|
||||
# If enabled, hcloud-ccm will be deployed with networking support.
|
||||
enabled: false
|
||||
# If networking is enabled, clusterCIDR must match the PodCIDR subnet your cluster has been configured with.
|
||||
# The default "10.244.0.0/16" assumes you're using Flannel with default configuration.
|
||||
clusterCIDR: 10.244.0.0/16
|
||||
network:
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: hcloud
|
||||
key: network
|
||||
|
||||
|
||||
# Resource requests for the deployed hccm Pod.
|
||||
resources:
|
||||
cpu: ""
|
||||
memory: ""
|
||||
|
||||
|
||||
selectorLabels:
|
||||
app.kubernetes.io/name: '{{ include "hcloud-cloud-controller-manager.name" $ }}'
|
||||
app.kubernetes.io/instance: "{{ $.Release.Name }}"
|
||||
|
||||
|
||||
additionalTolerations: []
|
||||
|
||||
|
||||
# nodeSelector:
|
||||
# node-role.kubernetes.io/control-plane: ""
|
||||
nodeSelector: {}
|
||||
|
||||
|
||||
# Set the affinity for pods. (Only works with kind=Deployment)
|
||||
affinity: {}
|
||||
|
||||
|
||||
# pods priorityClassName
|
||||
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption
|
||||
priorityClassName: "system-cluster-critical"
|
||||
|
||||
|
||||
robot:
|
||||
# Set to true to enable support for Robot (Dedicated) servers.
|
||||
enabled: false
|
||||
|
||||
|
||||
rbac:
|
||||
# Create a cluster role binding with admin access for the service account.
|
||||
create: true
|
||||
|
||||
|
||||
podLabels: {}
|
||||
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
|
||||
# Mounts the specified volume to the hcloud-cloud-controller-manager container.
|
||||
extraVolumeMounts: []
|
||||
# # Example
|
||||
# extraVolumeMounts:
|
||||
# - name: token-volume
|
||||
# readOnly: true
|
||||
# mountPath: /var/run/secrets/hcloud
|
||||
|
||||
|
||||
# Adds extra volumes to the pod.
|
||||
extraVolumes: []
|
||||
# # Example
|
||||
# extraVolumes:
|
||||
# - name: token-volume
|
||||
# secret:
|
||||
# secretName: hcloud-token
|
||||
2
packages/system/hetzner-robotlb/Chart.yaml
Normal file
2
packages/system/hetzner-robotlb/Chart.yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
name: hetzner-robotlb
|
||||
version: 0.1.3 # Placeholder, the actual version will be automatically set during the build process
|
||||
9
packages/system/hetzner-robotlb/Makefile
Normal file
9
packages/system/hetzner-robotlb/Makefile
Normal file
@@ -0,0 +1,9 @@
|
||||
export NAME=hetzner-robotlb
|
||||
export NAMESPACE=kube-system
|
||||
|
||||
include ../../../scripts/package.mk
|
||||
|
||||
update:
|
||||
rm -rf charts
|
||||
mkdir -p charts
|
||||
helm pull oci://ghcr.io/intreecom/charts/robotlb --untar --untardir charts
|
||||
23
packages/system/hetzner-robotlb/charts/robotlb/.helmignore
Normal file
23
packages/system/hetzner-robotlb/charts/robotlb/.helmignore
Normal file
@@ -0,0 +1,23 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
@@ -0,0 +1,6 @@
|
||||
apiVersion: v2
|
||||
appVersion: 0.0.5
|
||||
description: A Helm chart for robotlb (loadbalancer on hetzner cloud).
|
||||
name: robotlb
|
||||
type: application
|
||||
version: 0.1.3
|
||||
@@ -0,0 +1,4 @@
|
||||
The RobotLB Operator was successfully installed.
|
||||
Please follow the readme to create loadbalanced services.
|
||||
|
||||
README: https://github.com/intreecom/robotlb
|
||||
@@ -0,0 +1,62 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "robotlb.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "robotlb.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "robotlb.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "robotlb.labels" -}}
|
||||
helm.sh/chart: {{ include "robotlb.chart" . }}
|
||||
{{ include "robotlb.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "robotlb.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "robotlb.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "robotlb.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "robotlb.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,66 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "robotlb.fullname" . }}
|
||||
labels:
|
||||
{{- include "robotlb.labels" . | nindent 4 }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "robotlb.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
{{- with .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "robotlb.labels" . | nindent 8 }}
|
||||
{{- with .Values.podLabels }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "robotlb.serviceAccountName" . }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
command:
|
||||
- /usr/local/bin/robotlb
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- with .Values.envs }}
|
||||
env:
|
||||
{{- range $key, $val := . }}
|
||||
- name: {{ $key | quote }}
|
||||
value: {{ $val | quote }}
|
||||
{{ end -}}
|
||||
{{- end }}
|
||||
{{- with .Values.existingSecrets }}
|
||||
envFrom:
|
||||
{{- range $val := . }}
|
||||
- secretRef:
|
||||
name: {{ $val | quote }}
|
||||
{{ end -}}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,21 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ include "robotlb.fullname" . }}-cr
|
||||
rules:
|
||||
{{- toYaml .Values.serviceAccount.permissions | nindent 2 }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ include "robotlb.fullname" . }}-crb
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ include "robotlb.fullname" . }}-cr
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "robotlb.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,13 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "robotlb.serviceAccountName" . }}
|
||||
labels:
|
||||
{{- include "robotlb.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
|
||||
{{- end }}
|
||||
73
packages/system/hetzner-robotlb/charts/robotlb/values.yaml
Normal file
73
packages/system/hetzner-robotlb/charts/robotlb/values.yaml
Normal file
@@ -0,0 +1,73 @@
|
||||
# Default values for robotlb.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
image:
|
||||
repository: ghcr.io/intreecom/robotlb
|
||||
pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: ""
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
envs:
|
||||
ROBOTLB_LOG_LEVEL: "INFO"
|
||||
|
||||
existingSecrets: []
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Automatically mount a ServiceAccount's API credentials?
|
||||
automount: true
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
# This is a list of cluster permissions to apply to the service account.
|
||||
# By default it grants all permissions.
|
||||
permissions:
|
||||
- apiGroups: [""]
|
||||
resources: [services, services/status]
|
||||
verbs: [get, list, patch, update, watch]
|
||||
- apiGroups: [""]
|
||||
resources: [nodes, pods]
|
||||
verbs: [get, list, watch]
|
||||
|
||||
podAnnotations: {}
|
||||
podLabels: {}
|
||||
|
||||
podSecurityContext:
|
||||
{}
|
||||
# fsGroup: 2000
|
||||
|
||||
securityContext:
|
||||
{}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
resources:
|
||||
{}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
81
packages/system/hetzner-robotlb/values.yaml
Normal file
81
packages/system/hetzner-robotlb/values.yaml
Normal file
@@ -0,0 +1,81 @@
|
||||
image:
|
||||
repository: ghcr.io/intreecom/robotlb
|
||||
pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: ""
|
||||
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: "hetzner-robotlb"
|
||||
|
||||
|
||||
envs:
|
||||
ROBOTLB_LOG_LEVEL: "INFO"
|
||||
|
||||
|
||||
existingSecrets: []
|
||||
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Automatically mount a ServiceAccount's API credentials?
|
||||
automount: true
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
# This is a list of cluster permissions to apply to the service account.
|
||||
# By default it grants all permissions.
|
||||
permissions:
|
||||
- apiGroups: [""]
|
||||
resources: [services, services/status]
|
||||
verbs: [get, list, patch, update, watch]
|
||||
- apiGroups: [""]
|
||||
resources: [nodes, pods]
|
||||
verbs: [get, list, watch]
|
||||
|
||||
|
||||
podAnnotations: {}
|
||||
podLabels: {}
|
||||
|
||||
|
||||
# fsGroup: 2000
|
||||
podSecurityContext:
|
||||
{}
|
||||
|
||||
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
securityContext:
|
||||
{}
|
||||
|
||||
|
||||
## Number of robotlb replicas
|
||||
replicas: 1
|
||||
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# resources:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
resources:
|
||||
cpu: ""
|
||||
memory: ""
|
||||
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
|
||||
tolerations: []
|
||||
|
||||
|
||||
affinity: {}
|
||||
@@ -3,7 +3,7 @@ kamaji:
|
||||
deploy: false
|
||||
image:
|
||||
pullPolicy: IfNotPresent
|
||||
tag: v0.33.1@sha256:09fc5c9aeb97880780abfc6d82c216725d6f79e13494bf2399766c882b88f66b
|
||||
tag: v0.33.0@sha256:afaf5f003eb990377c21623d17bb00e7a95a1021e1c36b318cb451b80c8d37a2
|
||||
repository: ghcr.io/cozystack/cozystack/kamaji
|
||||
resources:
|
||||
limits:
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
portSecurity: true
|
||||
routes: ""
|
||||
image: ghcr.io/cozystack/cozystack/kubeovn-webhook:v0.33.1@sha256:595851560856e3ba7f408f259acf84599494984a9f0252de289bcb1a7fc5b9da
|
||||
image: ghcr.io/cozystack/cozystack/kubeovn-webhook:v0.33.0@sha256:926fa45edd2149e4bc4bb54710832c8fb7aa46c85cf6adb7cd486e0b956cdbfa
|
||||
|
||||
@@ -64,4 +64,4 @@ global:
|
||||
images:
|
||||
kubeovn:
|
||||
repository: kubeovn
|
||||
tag: v1.13.13@sha256:c0ffc9a0498b6f8fc392f8fc6ea43d0c7eedeeabda8ef96bca004ec4466a6bf2
|
||||
tag: v1.13.13@sha256:6315d11876b78f3c24e54a73063d05c63137c4210dcd7620bd983db5fedf469a
|
||||
|
||||
@@ -163,7 +163,7 @@ spec:
|
||||
privileged: true
|
||||
allowPrivilegeEscalation: true
|
||||
imagePullPolicy: Always
|
||||
image: {{ .Values.csiDriver.image }}
|
||||
image: ghcr.io/kvaps/test:kubevirt-csi-driver
|
||||
args:
|
||||
- "--endpoint=unix:/csi/csi.sock"
|
||||
- "--node-name=$(KUBE_NODE_NAME)"
|
||||
|
||||
@@ -1,3 +1 @@
|
||||
storageClass: replicated
|
||||
csiDriver:
|
||||
image: ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.25.1@sha256:445c2727b04ac68595b43c988ff17b3d69a7b22b0644fde3b10c65b47a7bc036
|
||||
|
||||
@@ -236,15 +236,6 @@ func (o *AppsServerOptions) Config() (*apiserver.Config, error) {
|
||||
},
|
||||
}
|
||||
|
||||
// make `.spec` schemaless so any keys are accepted
|
||||
if specProp, ok := newDef.Properties["spec"]; ok {
|
||||
specProp.AdditionalProperties = &spec.SchemaOrBool{
|
||||
Allows: true,
|
||||
Schema: &spec.Schema{},
|
||||
}
|
||||
newDef.Properties["spec"] = specProp
|
||||
}
|
||||
|
||||
// 3. Save the new resource definition under the correct name
|
||||
defs[resourceName] = *newDef
|
||||
klog.V(6).Infof("PostProcessSpec: Added OpenAPI definition for %s\n", resourceName)
|
||||
|
||||
@@ -76,7 +76,6 @@ type REST struct {
|
||||
gvr schema.GroupVersionResource
|
||||
gvk schema.GroupVersionKind
|
||||
kindName string
|
||||
singularName string
|
||||
releaseConfig config.ReleaseConfig
|
||||
}
|
||||
|
||||
@@ -94,7 +93,6 @@ func NewREST(dynamicClient dynamic.Interface, config *config.Resource) *REST {
|
||||
Version: "v1alpha1",
|
||||
}.WithKind(config.Application.Kind),
|
||||
kindName: config.Application.Kind,
|
||||
singularName: config.Application.Singular,
|
||||
releaseConfig: config.Release,
|
||||
}
|
||||
}
|
||||
@@ -106,7 +104,7 @@ func (r *REST) NamespaceScoped() bool {
|
||||
|
||||
// GetSingularName returns the singular name of the resource
|
||||
func (r *REST) GetSingularName() string {
|
||||
return r.singularName
|
||||
return r.gvr.Resource
|
||||
}
|
||||
|
||||
// Create handles the creation of a new Application by converting it to a HelmRelease
|
||||
@@ -425,15 +423,6 @@ func (r *REST) Update(ctx context.Context, name string, objInfo rest.UpdatedObje
|
||||
return nil, false, fmt.Errorf("conversion error: %v", err)
|
||||
}
|
||||
|
||||
// Ensure ResourceVersion
|
||||
if helmRelease.ResourceVersion == "" {
|
||||
cur, err := r.dynamicClient.Resource(helmReleaseGVR).Namespace(helmRelease.Namespace).Get(ctx, helmRelease.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("failed to fetch current HelmRelease: %w", err)
|
||||
}
|
||||
helmRelease.SetResourceVersion(cur.GetResourceVersion())
|
||||
}
|
||||
|
||||
// Merge system labels (from config) directly
|
||||
helmRelease.Labels = mergeMaps(r.releaseConfig.Labels, helmRelease.Labels)
|
||||
// Merge user labels with prefix
|
||||
|
||||
Reference in New Issue
Block a user