mirror of
https://github.com/outbackdingo/cozystack.git
synced 2026-01-28 18:18:41 +00:00
Compare commits
20 Commits
v0.33.1
...
tests-w-re
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
47dd7d19f8 | ||
|
|
f628e7d9c7 | ||
|
|
68d1646ae7 | ||
|
|
8fde834e39 | ||
|
|
e99d238647 | ||
|
|
e9435c2d3d | ||
|
|
da3ee5d0ea | ||
|
|
411a465b14 | ||
|
|
cad57cd922 | ||
|
|
fe1776b4c8 | ||
|
|
d9779d55ea | ||
|
|
74d3c89235 | ||
|
|
c831f53444 | ||
|
|
2c68eee9f8 | ||
|
|
e6ffb4f4e5 | ||
|
|
e63cc1890e | ||
|
|
1079472a2a | ||
|
|
1609931e3f | ||
|
|
699d38d8b9 | ||
|
|
f251cba363 |
@@ -2,6 +2,18 @@
|
||||
|
||||
@test "Create DB ClickHouse" {
|
||||
name='test'
|
||||
withResources='true'
|
||||
if [ "$withResources" == 'true' ]; then
|
||||
resources=$(cat <<EOF
|
||||
resources:
|
||||
resources:
|
||||
cpu: 500m
|
||||
memory: 768Mi
|
||||
EOF
|
||||
)
|
||||
else
|
||||
resources=' resources: {}'
|
||||
fi
|
||||
kubectl apply -f- <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: ClickHouse
|
||||
@@ -27,15 +39,13 @@ spec:
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
resources: {}
|
||||
$resources
|
||||
resourcesPreset: "nano"
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr clickhouse-$name --timeout=20s --for=condition=ready
|
||||
timeout 180 sh -ec "until kubectl -n tenant-test get svc chendpoint-clickhouse-$name -o jsonpath='{.spec.ports[*].port}' | grep -q '8123 9000'; do sleep 10; done"
|
||||
kubectl -n tenant-test wait statefulset.apps/chi-clickhouse-$name-clickhouse-0-0 --timeout=120s --for=jsonpath='{.status.replicas}'=1
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints chi-clickhouse-$name-clickhouse-0-0 -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
timeout 100 sh -ec "until kubectl -n tenant-test get svc chi-clickhouse-$name-clickhouse-0-0 -o jsonpath='{.spec.ports[*].port}' | grep -q '9000 8123 9009'; do sleep 10; done"
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get sts chi-clickhouse-$name-clickhouse-0-1 ; do sleep 10; done"
|
||||
kubectl -n tenant-test wait statefulset.apps/chi-clickhouse-$name-clickhouse-0-1 --timeout=140s --for=jsonpath='{.status.replicas}'=1
|
||||
kubectl -n tenant-test wait --timeout=40s hr clickhouse-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=130s clickhouses $name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=120s sts chi-clickhouse-$name-clickhouse-0-0 --for=jsonpath='{.status.replicas}'=1
|
||||
timeout 210 sh -ec "until kubectl -n tenant-test wait svc chendpoint-clickhouse-$name --for=jsonpath='{.spec.ports[0].port}'=8123; do sleep 10; done"
|
||||
kubectl -n tenant-test delete clickhouse.apps.cozystack.io $name
|
||||
}
|
||||
|
||||
@@ -2,6 +2,18 @@
|
||||
|
||||
@test "Create Kafka" {
|
||||
name='test'
|
||||
withResources='true'
|
||||
if [ "$withResources" == 'true' ]; then
|
||||
resources=$(cat <<EOF
|
||||
resources:
|
||||
resources:
|
||||
cpu: 500m
|
||||
memory: 768Mi
|
||||
EOF
|
||||
)
|
||||
else
|
||||
resources='resources: {}'
|
||||
fi
|
||||
kubectl apply -f- <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Kafka
|
||||
@@ -14,13 +26,13 @@ spec:
|
||||
size: 10Gi
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
resources: {}
|
||||
$resources
|
||||
resourcesPreset: "nano"
|
||||
zookeeper:
|
||||
size: 5Gi
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
resources:
|
||||
$resources
|
||||
resourcesPreset: "nano"
|
||||
topics:
|
||||
- name: testResults
|
||||
@@ -38,14 +50,9 @@ spec:
|
||||
replicas: 2
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr kafka-$name --timeout=30s --for=condition=ready
|
||||
kubectl wait kafkas -n tenant-test test --timeout=60s --for=condition=ready
|
||||
timeout 60 sh -ec "until kubectl -n tenant-test get pvc data-kafka-$name-zookeeper-0; do sleep 10; done"
|
||||
kubectl -n tenant-test wait pvc data-kafka-$name-zookeeper-0 --timeout=50s --for=jsonpath='{.status.phase}'=Bound
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc kafka-$name-zookeeper-client -o jsonpath='{.spec.ports[0].port}' | grep -q '2181'; do sleep 10; done"
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc kafka-$name-zookeeper-nodes -o jsonpath='{.spec.ports[*].port}' | grep -q '2181 2888 3888'; do sleep 10; done"
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints kafka-$name-zookeeper-nodes -o jsonpath='{.subsets[*].addresses[0].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test wait --timeout=30s hr kafka-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=1m kafkas $name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=50s pvc data-kafka-$name-zookeeper-0 --for=jsonpath='{.status.phase}'=Bound
|
||||
kubectl -n tenant-test wait --timeout=40s svc kafka-$name-zookeeper-client --for=jsonpath='{.spec.ports[0].port}'=2181
|
||||
kubectl -n tenant-test delete kafka.apps.cozystack.io $name
|
||||
kubectl -n tenant-test delete pvc data-kafka-$name-zookeeper-0
|
||||
kubectl -n tenant-test delete pvc data-kafka-$name-zookeeper-1
|
||||
}
|
||||
|
||||
@@ -1,16 +1,17 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
@test "Create a tenant Kubernetes control plane" {
|
||||
name='test'
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Kubernetes
|
||||
metadata:
|
||||
name: test
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
addons:
|
||||
certManager:
|
||||
enabled: false
|
||||
enabled: true
|
||||
valuesOverride: {}
|
||||
cilium:
|
||||
valuesOverride: {}
|
||||
@@ -24,10 +25,12 @@ spec:
|
||||
valuesOverride: {}
|
||||
ingressNginx:
|
||||
enabled: true
|
||||
hosts: []
|
||||
hosts:
|
||||
- example.org
|
||||
exposeMethod: Proxied
|
||||
valuesOverride: {}
|
||||
monitoringAgents:
|
||||
enabled: false
|
||||
enabled: true
|
||||
valuesOverride: {}
|
||||
verticalPodAutoscaler:
|
||||
valuesOverride: {}
|
||||
@@ -61,12 +64,39 @@ spec:
|
||||
- ingress-nginx
|
||||
storageClass: replicated
|
||||
EOF
|
||||
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
|
||||
timeout 10 sh -ec 'until kubectl get kamajicontrolplane -n tenant-test kubernetes-test; do sleep 1; done'
|
||||
kubectl wait --for=condition=TenantControlPlaneCreated kamajicontrolplane -n tenant-test kubernetes-test --timeout=4m
|
||||
kubectl wait tcp -n tenant-test kubernetes-test --timeout=2m --for=jsonpath='{.status.kubernetesResources.version.status}'=Ready
|
||||
kubectl wait deploy --timeout=4m --for=condition=available -n tenant-test kubernetes-test kubernetes-test-cluster-autoscaler kubernetes-test-kccm kubernetes-test-kcsi-controller
|
||||
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=1m --for=jsonpath='{.status.replicas}'=2
|
||||
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=10m --for=jsonpath='{.status.v1beta2.readyReplicas}'=2
|
||||
kubectl -n tenant-test delete kuberneteses.apps.cozystack.io test
|
||||
sleep 10
|
||||
kubectl wait --timeout=20s namespace tenant-test --for=jsonpath='{.status.phase}'=Active
|
||||
kubectl -n tenant-test wait --timeout=10s kamajicontrolplane kubernetes-$name --for=jsonpath='{.status.conditions[0].status}'=True
|
||||
kubectl -n tenant-test wait --timeout=4m kamajicontrolplane kubernetes-$name --for=condition=TenantControlPlaneCreated
|
||||
kubectl -n tenant-test wait --timeout=210s tcp kubernetes-$name --for=jsonpath='{.status.kubernetesResources.version.status}'=Ready
|
||||
kubectl -n tenant-test wait --timeout=4m deploy kubernetes-$name kubernetes-$name-cluster-autoscaler kubernetes-$name-kccm kubernetes-$name-kcsi-controller --for=condition=available
|
||||
kubectl -n tenant-test wait --timeout=1m machinedeployment kubernetes-$name-md0 --for=jsonpath='{.status.replicas}'=2
|
||||
kubectl -n tenant-test wait --timeout=10m machinedeployment kubernetes-$name-md0 --for=jsonpath='{.status.v1beta2.readyReplicas}'=2
|
||||
# ingress / load balancer
|
||||
kubectl -n tenant-test wait --timeout=5m hr kubernetes-$name-monitoring-agents --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=5m hr kubernetes-$name-ingress-nginx --for=condition=ready
|
||||
kubectl -n tenant-test get secret kubernetes-$name-admin-kubeconfig -o go-template='{{ printf "%s\n" (index .data "admin.conf" | base64decode) }}' > admin.conf
|
||||
KUBECONFIG=admin.conf kubectl -n cozy-ingress-nginx wait --timeout=3m deploy ingress-nginx-defaultbackend --for=jsonpath='{.status.conditions[0].status}'=True
|
||||
KUBECONFIG=admin.conf kubectl -n cozy-monitoring wait --timeout=3m deploy cozy-monitoring-agents-metrics-server --for=jsonpath='{.status.conditions[0].status}'=True
|
||||
}
|
||||
|
||||
@test "Create a PVC in tenant Kubernetes" {
|
||||
name='test'
|
||||
KUBECONFIG=admin.conf kubectl apply -f - <<EOF
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: pvc-$name
|
||||
namespace: cozy-monitoring
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
EOF
|
||||
sleep 10
|
||||
KUBECONFIG=admin.conf kubectl -n cozy-monitoring wait --timeout=20s pvc pvc-$name --for=jsonpath='{.status.phase}'=Bound
|
||||
KUBECONFIG=admin.conf kubectl -n cozy-monitoring delete pvc pvc-$name
|
||||
kubectl -n tenant-test delete kuberneteses.apps.cozystack.io $name
|
||||
}
|
||||
@@ -2,6 +2,18 @@
|
||||
|
||||
@test "Create DB MySQL" {
|
||||
name='test'
|
||||
withResources='true'
|
||||
if [ "$withResources" == 'true' ]; then
|
||||
resources=$(cat <<EOF
|
||||
resources:
|
||||
resources:
|
||||
cpu: 3000m
|
||||
memory: 3Gi
|
||||
EOF
|
||||
)
|
||||
else
|
||||
resources=' resources: {}'
|
||||
fi
|
||||
kubectl apply -f- <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: MySQL
|
||||
@@ -31,16 +43,15 @@ spec:
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
resources: {}
|
||||
$resources
|
||||
resourcesPreset: "nano"
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr mysql-$name --timeout=30s --for=condition=ready
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get svc mysql-$name -o jsonpath='{.spec.ports[0].port}' | grep -q '3306'; do sleep 10; done"
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints mysql-$name -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test wait statefulset.apps/mysql-$name --timeout=110s --for=jsonpath='{.status.replicas}'=2
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get svc mysql-$name-metrics -o jsonpath='{.spec.ports[0].port}' | grep -q '9104'; do sleep 10; done"
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get endpoints mysql-$name-metrics -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test wait deployment.apps/mysql-$name-metrics --timeout=90s --for=jsonpath='{.status.replicas}'=1
|
||||
sleep 10
|
||||
kubectl -n tenant-test wait --timeout=30s hr mysql-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=130s mysqls $name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=110s sts mysql-$name --for=jsonpath='{.status.replicas}'=2
|
||||
sleep 60
|
||||
kubectl -n tenant-test wait --timeout=60s deploy mysql-$name-metrics --for=jsonpath='{.status.replicas}'=1
|
||||
kubectl -n tenant-test wait --timeout=100s svc mysql-$name --for=jsonpath='{.spec.ports[0].port}'=3306
|
||||
kubectl -n tenant-test delete mysqls.apps.cozystack.io $name
|
||||
}
|
||||
|
||||
@@ -2,6 +2,18 @@
|
||||
|
||||
@test "Create DB PostgreSQL" {
|
||||
name='test'
|
||||
withResources='true'
|
||||
if [ "$withResources" == 'true' ]; then
|
||||
resources=$(cat <<EOF
|
||||
resources:
|
||||
resources:
|
||||
cpu: 500m
|
||||
memory: 768Mi
|
||||
EOF
|
||||
)
|
||||
else
|
||||
resources=' resources: {}'
|
||||
fi
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Postgres
|
||||
@@ -36,19 +48,14 @@ spec:
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
resources: {}
|
||||
$resources
|
||||
resourcesPreset: "nano"
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr postgres-$name --timeout=100s --for=condition=ready
|
||||
kubectl -n tenant-test wait job.batch postgres-$name-init-job --timeout=50s --for=condition=Complete
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-r -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-ro -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-rw -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
||||
timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-r -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
# for some reason it takes longer for the read-only endpoint to be ready
|
||||
#timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-ro -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-rw -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test wait --timeout=200s hr postgres-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=130s postgreses $name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=50s job.batch postgres-$name-init-job --for=condition=Complete
|
||||
kubectl -n tenant-test wait --timeout=40s svc postgres-$name-r --for=jsonpath='{.spec.ports[0].port}'=5432
|
||||
kubectl -n tenant-test delete postgreses.apps.cozystack.io $name
|
||||
kubectl -n tenant-test delete job.batch/postgres-$name-init-job
|
||||
}
|
||||
|
||||
@@ -2,6 +2,18 @@
|
||||
|
||||
@test "Create Redis" {
|
||||
name='test'
|
||||
withResources='true'
|
||||
if [ "$withResources" == 'true' ]; then
|
||||
resources=$(cat <<EOF
|
||||
resources:
|
||||
resources:
|
||||
cpu: 500m
|
||||
memory: 768Mi
|
||||
EOF
|
||||
)
|
||||
else
|
||||
resources='resources: {}'
|
||||
fi
|
||||
kubectl apply -f- <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Redis
|
||||
@@ -14,13 +26,15 @@ spec:
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
authEnabled: true
|
||||
resources: {}
|
||||
$resources
|
||||
resourcesPreset: "nano"
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr redis-$name --timeout=20s --for=condition=ready
|
||||
kubectl -n tenant-test wait pvc redisfailover-persistent-data-rfr-redis-$name-0 --timeout=50s --for=jsonpath='{.status.phase}'=Bound
|
||||
kubectl -n tenant-test wait deploy rfs-redis-$name --timeout=90s --for=condition=available
|
||||
kubectl -n tenant-test wait sts rfr-redis-$name --timeout=90s --for=jsonpath='{.status.replicas}'=2
|
||||
kubectl -n tenant-test wait --timeout=20s hr redis-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=130s redis.apps.cozystack.io $name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=50s pvc redisfailover-persistent-data-rfr-redis-$name-0 --for=jsonpath='{.status.phase}'=Bound
|
||||
kubectl -n tenant-test wait --timeout=90s sts rfr-redis-$name --for=jsonpath='{.status.replicas}'=2
|
||||
sleep 45
|
||||
kubectl -n tenant-test wait --timeout=45s deploy rfs-redis-$name --for=condition=available
|
||||
kubectl -n tenant-test delete redis.apps.cozystack.io $name
|
||||
}
|
||||
|
||||
@@ -2,6 +2,14 @@
|
||||
|
||||
@test "Create a Virtual Machine" {
|
||||
name='test'
|
||||
withResources='true'
|
||||
if [ "$withResources" == 'true' ]; then
|
||||
cores="1000m"
|
||||
memory="1Gi
|
||||
else
|
||||
cores="2000m"
|
||||
memory="2Gi
|
||||
fi
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: VirtualMachine
|
||||
@@ -9,6 +17,12 @@ metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
domain:
|
||||
cpu:
|
||||
cores: "$cores"
|
||||
resources:
|
||||
requests:
|
||||
memory: "$memory"
|
||||
external: false
|
||||
externalMethod: PortList
|
||||
externalPorts:
|
||||
@@ -20,9 +34,6 @@ spec:
|
||||
storage: 5Gi
|
||||
storageClass: replicated
|
||||
gpus: []
|
||||
resources:
|
||||
cpu: ""
|
||||
memory: ""
|
||||
sshKeys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF
|
||||
test@test
|
||||
@@ -37,11 +48,12 @@ spec:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF test@test
|
||||
cloudInitSeed: ""
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr virtual-machine-$name --timeout=10s --for=condition=ready
|
||||
kubectl -n tenant-test wait dv virtual-machine-$name --timeout=150s --for=condition=ready
|
||||
kubectl -n tenant-test wait pvc virtual-machine-$name --timeout=100s --for=jsonpath='{.status.phase}'=Bound
|
||||
kubectl -n tenant-test wait vm virtual-machine-$name --timeout=100s --for=condition=ready
|
||||
timeout 120 sh -ec "until kubectl -n tenant-test get vmi virtual-machine-$name -o jsonpath='{.status.interfaces[0].ipAddress}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
sleep 10
|
||||
kubectl -n tenant-test wait --timeout=10s hr virtual-machine-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=130s virtualmachines $name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=130s pvc virtual-machine-$name --for=jsonpath='{.status.phase}'=Bound
|
||||
kubectl -n tenant-test wait --timeout=150s dv virtual-machine-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=100s vm virtual-machine-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=150s vmi virtual-machine-$name --for=jsonpath='{status.phase}'=Running
|
||||
kubectl -n tenant-test delete virtualmachines.apps.cozystack.io $name
|
||||
}
|
||||
|
||||
@@ -17,21 +17,37 @@ spec:
|
||||
storageClass: replicated
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr vm-disk-$name --timeout=5s --for=condition=ready
|
||||
kubectl -n tenant-test wait dv vm-disk-$name --timeout=150s --for=condition=ready
|
||||
kubectl -n tenant-test wait pvc vm-disk-$name --timeout=100s --for=jsonpath='{.status.phase}'=Bound
|
||||
kubectl -n tenant-test wait --timeout=5s hr vm-disk-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=130s vmdisks $name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=130s pvc vm-disk-$name --for=jsonpath='{.status.phase}'=Bound
|
||||
kubectl -n tenant-test wait --timeout=150s dv vm-disk-$name --for=condition=ready
|
||||
}
|
||||
|
||||
@test "Create a VM Instance" {
|
||||
diskName='test'
|
||||
name='test'
|
||||
kubectl apply -f - <<EOF
|
||||
withResources='true'
|
||||
if [ "$withResources" == 'true' ]; then
|
||||
cores="1000m"
|
||||
memory="1Gi
|
||||
else
|
||||
cores="2000m"
|
||||
memory="2Gi
|
||||
fi
|
||||
kubectl -n tenant-test get vminstances.apps.cozystack.io $name ||
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: VMInstance
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
domain:
|
||||
cpu:
|
||||
cores: "$cores"
|
||||
resources:
|
||||
requests:
|
||||
memory: "$memory"
|
||||
external: false
|
||||
externalMethod: PortList
|
||||
externalPorts:
|
||||
@@ -42,9 +58,6 @@ spec:
|
||||
disks:
|
||||
- name: $diskName
|
||||
gpus: []
|
||||
resources:
|
||||
cpu: ""
|
||||
memory: ""
|
||||
sshKeys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF
|
||||
test@test
|
||||
@@ -60,9 +73,10 @@ spec:
|
||||
cloudInitSeed: ""
|
||||
EOF
|
||||
sleep 5
|
||||
timeout 20 sh -ec "until kubectl -n tenant-test get vmi vm-instance-$name -o jsonpath='{.status.interfaces[0].ipAddress}' | grep -q '[0-9]'; do sleep 5; done"
|
||||
kubectl -n tenant-test wait hr vm-instance-$name --timeout=5s --for=condition=ready
|
||||
kubectl -n tenant-test wait vm vm-instance-$name --timeout=20s --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=5s hr vm-instance-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=130s vminstances $name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=20s vm vm-instance-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=40s vmi vm-instance-$name --for=jsonpath='{status.phase}'=Running
|
||||
kubectl -n tenant-test delete vminstances.apps.cozystack.io $name
|
||||
kubectl -n tenant-test delete vmdisks.apps.cozystack.io $diskName
|
||||
}
|
||||
|
||||
@@ -75,6 +75,6 @@ This setting is ignored if the corresponding `resources` value is set.
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `large` | `2` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
@@ -62,6 +62,6 @@ This setting is ignored if the corresponding `resources` value is set.
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `large` | `2` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
@@ -100,7 +100,7 @@ This setting is ignored if the corresponding `resources` value is set.
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `large` | `2` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@ This setting is ignored if the corresponding `resources` value is set.
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `large` | `2` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.25.1
|
||||
version: 0.25.2
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -146,7 +146,7 @@ This setting is ignored if the corresponding `resources` value is set.
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `large` | `2` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
|
||||
@@ -40,6 +40,7 @@ spec:
|
||||
{{ .Release.Name }}-fluxcd-operator
|
||||
{{ .Release.Name }}-fluxcd
|
||||
{{ .Release.Name }}-gpu-operator
|
||||
{{ .Release.Name }}-velero
|
||||
-p '{"spec": {"suspend": true}}'
|
||||
--type=merge --field-manager=flux-client-side-apply || true
|
||||
---
|
||||
@@ -79,6 +80,8 @@ rules:
|
||||
- {{ .Release.Name }}-fluxcd-operator
|
||||
- {{ .Release.Name }}-fluxcd
|
||||
- {{ .Release.Name }}-gpu-operator
|
||||
- {{ .Release.Name }}-velero
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
|
||||
@@ -119,7 +119,7 @@ This setting is ignored if the corresponding `resources` value is set.
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `large` | `2` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@ This setting is ignored if the corresponding `resources` value is set.
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `large` | `2` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.17.0
|
||||
version: 0.17.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -11,7 +11,50 @@ This managed service is controlled by the CloudNativePG operator, ensuring effic
|
||||
- Docs: <https://cloudnative-pg.io/docs/>
|
||||
- Github: <https://github.com/cloudnative-pg/cloudnative-pg>
|
||||
|
||||
## HowTos
|
||||
## Operations
|
||||
|
||||
### How to enable backups
|
||||
|
||||
To back up a PostgreSQL application, an external S3-compatible storage is required.
|
||||
|
||||
To start regular backups, update the application, setting `backup.enabled` to `true`, and fill in the path and credentials to an `backup.*`:
|
||||
|
||||
```yaml
|
||||
## @param backup.enabled Enable regular backups
|
||||
## @param backup.schedule Cron schedule for automated backups
|
||||
## @param backup.retentionPolicy Retention policy
|
||||
## @param backup.destinationPath Path to store the backup (i.e. s3://bucket/path/to/folder)
|
||||
## @param backup.endpointURL S3 Endpoint used to upload data to the cloud
|
||||
## @param backup.s3AccessKey Access key for S3, used for authentication
|
||||
## @param backup.s3SecretKey Secret key for S3, used for authentication
|
||||
backup:
|
||||
enabled: false
|
||||
retentionPolicy: 30d
|
||||
destinationPath: s3://bucket/path/to/folder/
|
||||
endpointURL: http://minio-gateway-service:9000
|
||||
schedule: "0 2 * * * *"
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
```
|
||||
|
||||
### How to recover a backup
|
||||
|
||||
CloudNativePG supports point-in-time-recovery.
|
||||
Recovering a backup is done by creating a new database instance and restoring the data in it.
|
||||
|
||||
Create a new PostgreSQL application with a different name, but identical configuration.
|
||||
Set `bootstrap.enabled` to `true` and fill in the name of the database instance to recover from and the recovery time:
|
||||
|
||||
```yaml
|
||||
## @param bootstrap.enabled Restore database cluster from a backup
|
||||
## @param bootstrap.recoveryTime Timestamp (PITR) up to which recovery will proceed, expressed in RFC 3339 format. If left empty, will restore latest
|
||||
## @param bootstrap.oldName Name of database cluster before deleting
|
||||
##
|
||||
bootstrap:
|
||||
enabled: false
|
||||
recoveryTime: "" # leave empty for latest or exact timestamp; example: 2020-11-26 15:22:00.00000+00
|
||||
oldName: "<previous-postgres-instance>"
|
||||
```
|
||||
|
||||
### How to switch primary/secondary replica
|
||||
|
||||
@@ -19,24 +62,6 @@ See:
|
||||
|
||||
- <https://cloudnative-pg.io/documentation/1.15/rolling_update/#manual-updates-supervised>
|
||||
|
||||
### How to restore backup
|
||||
|
||||
find snapshot:
|
||||
|
||||
```bash
|
||||
restic -r s3:s3.example.org/postgres-backups/database_name snapshots
|
||||
```
|
||||
|
||||
restore:
|
||||
|
||||
```bash
|
||||
restic -r s3:s3.example.org/postgres-backups/database_name restore latest --target /tmp/
|
||||
```
|
||||
|
||||
more details:
|
||||
|
||||
- <https://blog.aenix.io/restic-effective-backup-from-stdin-4bc1e8f083c1>
|
||||
|
||||
## Parameters
|
||||
|
||||
### Common parameters
|
||||
@@ -60,23 +85,23 @@ more details:
|
||||
|
||||
### Backup parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | -------------------------------------------------------------------- | ----------------------------------- |
|
||||
| `backup.enabled` | Enable pereiodic backups | `false` |
|
||||
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * * *` |
|
||||
| `backup.retentionPolicy` | The retention policy | `30d` |
|
||||
| `backup.destinationPath` | The path where to store the backup (i.e. s3://bucket/path/to/folder) | `s3://BUCKET_NAME/` |
|
||||
| `backup.endpointURL` | Endpoint to be used to upload data to the cloud | `http://minio-gateway-service:9000` |
|
||||
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | ---------------------------------------------------------- | ----------------------------------- |
|
||||
| `backup.enabled` | Enable regular backups | `false` |
|
||||
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * * *` |
|
||||
| `backup.retentionPolicy` | Retention policy | `30d` |
|
||||
| `backup.destinationPath` | Path to store the backup (i.e. s3://bucket/path/to/folder) | `s3://bucket/path/to/folder/` |
|
||||
| `backup.endpointURL` | S3 Endpoint used to upload data to the cloud | `http://minio-gateway-service:9000` |
|
||||
| `backup.s3AccessKey` | Access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||
| `backup.s3SecretKey` | Secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||
|
||||
### Bootstrap parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | --------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| `bootstrap.enabled` | Restore cluster from backup | `false` |
|
||||
| `bootstrap.recoveryTime` | Time stamp up to which recovery will proceed, expressed in RFC 3339 format, if empty, will restore latest | `""` |
|
||||
| `bootstrap.oldName` | Name of cluster before deleting | `""` |
|
||||
| `bootstrap.enabled` | Restore database cluster from a backup | `false` |
|
||||
| `bootstrap.recoveryTime` | Timestamp (PITR) up to which recovery will proceed, expressed in RFC 3339 format. If left empty, will restore latest | `""` |
|
||||
| `bootstrap.oldName` | Name of database cluster before deleting | `""` |
|
||||
| `resources` | Explicit CPU and memory configuration for each PostgreSQL replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
|
||||
| `resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `micro` |
|
||||
|
||||
@@ -103,7 +128,7 @@ This setting is ignored if the corresponding `resources` value is set.
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `large` | `2` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
|
||||
@@ -62,7 +62,7 @@
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enable pereiodic backups",
|
||||
"description": "Enable regular backups",
|
||||
"default": false
|
||||
},
|
||||
"schedule": {
|
||||
@@ -72,27 +72,27 @@
|
||||
},
|
||||
"retentionPolicy": {
|
||||
"type": "string",
|
||||
"description": "The retention policy",
|
||||
"description": "Retention policy",
|
||||
"default": "30d"
|
||||
},
|
||||
"destinationPath": {
|
||||
"type": "string",
|
||||
"description": "The path where to store the backup (i.e. s3://bucket/path/to/folder)",
|
||||
"default": "s3://BUCKET_NAME/"
|
||||
"description": "Path to store the backup (i.e. s3://bucket/path/to/folder)",
|
||||
"default": "s3://bucket/path/to/folder/"
|
||||
},
|
||||
"endpointURL": {
|
||||
"type": "string",
|
||||
"description": "Endpoint to be used to upload data to the cloud",
|
||||
"description": "S3 Endpoint used to upload data to the cloud",
|
||||
"default": "http://minio-gateway-service:9000"
|
||||
},
|
||||
"s3AccessKey": {
|
||||
"type": "string",
|
||||
"description": "The access key for S3, used for authentication",
|
||||
"description": "Access key for S3, used for authentication",
|
||||
"default": "oobaiRus9pah8PhohL1ThaeTa4UVa7gu"
|
||||
},
|
||||
"s3SecretKey": {
|
||||
"type": "string",
|
||||
"description": "The secret key for S3, used for authentication",
|
||||
"description": "Secret key for S3, used for authentication",
|
||||
"default": "ju3eum4dekeich9ahM1te8waeGai0oog"
|
||||
}
|
||||
}
|
||||
@@ -102,17 +102,17 @@
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Restore cluster from backup",
|
||||
"description": "Restore database cluster from a backup",
|
||||
"default": false
|
||||
},
|
||||
"recoveryTime": {
|
||||
"type": "string",
|
||||
"description": "Time stamp up to which recovery will proceed, expressed in RFC 3339 format, if empty, will restore latest",
|
||||
"description": "Timestamp (PITR) up to which recovery will proceed, expressed in RFC 3339 format. If left empty, will restore latest",
|
||||
"default": ""
|
||||
},
|
||||
"oldName": {
|
||||
"type": "string",
|
||||
"description": "Name of cluster before deleting",
|
||||
"description": "Name of database cluster before deleting",
|
||||
"default": ""
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,17 +59,17 @@ databases: {}
|
||||
|
||||
## @section Backup parameters
|
||||
|
||||
## @param backup.enabled Enable pereiodic backups
|
||||
## @param backup.enabled Enable regular backups
|
||||
## @param backup.schedule Cron schedule for automated backups
|
||||
## @param backup.retentionPolicy The retention policy
|
||||
## @param backup.destinationPath The path where to store the backup (i.e. s3://bucket/path/to/folder)
|
||||
## @param backup.endpointURL Endpoint to be used to upload data to the cloud
|
||||
## @param backup.s3AccessKey The access key for S3, used for authentication
|
||||
## @param backup.s3SecretKey The secret key for S3, used for authentication
|
||||
## @param backup.retentionPolicy Retention policy
|
||||
## @param backup.destinationPath Path to store the backup (i.e. s3://bucket/path/to/folder)
|
||||
## @param backup.endpointURL S3 Endpoint used to upload data to the cloud
|
||||
## @param backup.s3AccessKey Access key for S3, used for authentication
|
||||
## @param backup.s3SecretKey Secret key for S3, used for authentication
|
||||
backup:
|
||||
enabled: false
|
||||
retentionPolicy: 30d
|
||||
destinationPath: s3://BUCKET_NAME/
|
||||
destinationPath: s3://bucket/path/to/folder/
|
||||
endpointURL: http://minio-gateway-service:9000
|
||||
schedule: "0 2 * * * *"
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
@@ -77,9 +77,9 @@ backup:
|
||||
|
||||
## @section Bootstrap parameters
|
||||
|
||||
## @param bootstrap.enabled Restore cluster from backup
|
||||
## @param bootstrap.recoveryTime Time stamp up to which recovery will proceed, expressed in RFC 3339 format, if empty, will restore latest
|
||||
## @param bootstrap.oldName Name of cluster before deleting
|
||||
## @param bootstrap.enabled Restore database cluster from a backup
|
||||
## @param bootstrap.recoveryTime Timestamp (PITR) up to which recovery will proceed, expressed in RFC 3339 format. If left empty, will restore latest
|
||||
## @param bootstrap.oldName Name of database cluster before deleting
|
||||
##
|
||||
bootstrap:
|
||||
enabled: false
|
||||
|
||||
@@ -45,6 +45,6 @@ This setting is ignored if the corresponding `resources` value is set.
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `large` | `2` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
@@ -52,6 +52,6 @@ This setting is ignored if the corresponding `resources` value is set.
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `large` | `2` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
@@ -55,7 +55,8 @@ kafka 0.7.1 4369b031
|
||||
kafka 0.8.0 HEAD
|
||||
kubernetes 0.24.0 62cb694d
|
||||
kubernetes 0.25.0 70f82667
|
||||
kubernetes 0.25.1 HEAD
|
||||
kubernetes 0.25.1 acd4663a
|
||||
kubernetes 0.25.2 HEAD
|
||||
mysql 0.1.0 263e47be
|
||||
mysql 0.2.0 c24a103f
|
||||
mysql 0.3.0 53f2365e
|
||||
@@ -103,7 +104,8 @@ postgres 0.12.1 632224a3
|
||||
postgres 0.14.0 62cb694d
|
||||
postgres 0.15.1 4369b031
|
||||
postgres 0.16.0 70f82667
|
||||
postgres 0.17.0 HEAD
|
||||
postgres 0.17.0 acd4663a
|
||||
postgres 0.17.1 HEAD
|
||||
rabbitmq 0.1.0 263e47be
|
||||
rabbitmq 0.2.0 53f2365e
|
||||
rabbitmq 0.3.0 6c5cf5bf
|
||||
@@ -155,7 +157,8 @@ virtual-machine 0.9.1 93bdf411
|
||||
virtual-machine 0.10.0 6130f43d
|
||||
virtual-machine 0.10.2 632224a3
|
||||
virtual-machine 0.11.0 4369b031
|
||||
virtual-machine 0.12.0 HEAD
|
||||
virtual-machine 0.12.0 70f82667
|
||||
virtual-machine 0.12.1 HEAD
|
||||
vm-disk 0.1.0 d971f2ff
|
||||
vm-disk 0.1.1 6130f43d
|
||||
vm-disk 0.1.2 632224a3
|
||||
@@ -172,7 +175,8 @@ vm-instance 0.6.0 721c12a7
|
||||
vm-instance 0.7.0 6130f43d
|
||||
vm-instance 0.7.2 632224a3
|
||||
vm-instance 0.8.0 4369b031
|
||||
vm-instance 0.9.0 HEAD
|
||||
vm-instance 0.9.0 70f82667
|
||||
vm-instance 0.10.0 HEAD
|
||||
vpn 0.1.0 263e47be
|
||||
vpn 0.2.0 53f2365e
|
||||
vpn 0.3.0 6c5cf5bf
|
||||
|
||||
@@ -17,7 +17,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.12.0
|
||||
version: 0.12.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -3,6 +3,13 @@ kind: Role
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
resourceNames:
|
||||
- {{ include "virtual-machine.fullname" . }}
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups:
|
||||
- cozystack.io
|
||||
resources:
|
||||
|
||||
@@ -9,7 +9,7 @@ stringData:
|
||||
key{{ $k }}: {{ quote $v }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.cloudInit }}
|
||||
{{- if or .Values.cloudInit .Values.sshKeys }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
@@ -17,5 +17,17 @@ metadata:
|
||||
name: {{ include "virtual-machine.fullname" . }}-cloud-init
|
||||
stringData:
|
||||
userdata: |
|
||||
{{- .Values.cloudInit | nindent 4 }}
|
||||
{{- if .Values.cloudInit }}
|
||||
{{- .Values.cloudInit | nindent 4 }}
|
||||
{{- else if and (.Values.sshKeys) (not .Values.cloudInit) }}
|
||||
{{- /*
|
||||
We usually provide ssh keys in cloud-init metadata, because userdata it not typed and can be used for any purpose.
|
||||
However, if user provides ssh keys but not cloud-init, we still need to provide a minimal cloud-init config to avoid errors.
|
||||
*/}}
|
||||
#cloud-config
|
||||
ssh_authorized_keys:
|
||||
{{- range .Values.sshKeys }}
|
||||
- {{ quote . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -92,7 +92,7 @@ spec:
|
||||
- disk:
|
||||
bus: scsi
|
||||
name: systemdisk
|
||||
{{- if .Values.sshKeys }}
|
||||
{{- if or .Values.cloudInit .Values.sshKeys }}
|
||||
- disk:
|
||||
bus: virtio
|
||||
name: cloudinitdisk
|
||||
@@ -122,28 +122,11 @@ spec:
|
||||
- name: systemdisk
|
||||
dataVolume:
|
||||
name: {{ include "virtual-machine.fullname" . }}
|
||||
|
||||
{{- if and .Values.sshKeys .Values.cloudInit }}
|
||||
{{- if or .Values.cloudInit .Values.sshKeys }}
|
||||
- name: cloudinitdisk
|
||||
cloudInitNoCloud:
|
||||
secretRef:
|
||||
name: {{ include "virtual-machine.fullname" . }}-cloud-init
|
||||
{{- else if .Values.sshKeys }}
|
||||
- name: cloudinitdisk
|
||||
cloudInitNoCloud:
|
||||
userData: |
|
||||
{{ printf "%s" "#cloud-config" }}
|
||||
ssh_authorized_keys:
|
||||
{{- range .Values.sshKeys }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
chpasswd:
|
||||
expire: false
|
||||
{{- else }}
|
||||
- name: cloudinitdisk
|
||||
cloudInitNoCloud:
|
||||
userData: |
|
||||
{{ printf "%s" "#cloud-config" }}
|
||||
{{- end }}
|
||||
|
||||
networks:
|
||||
|
||||
@@ -17,10 +17,10 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.9.0
|
||||
version: 0.10.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: 0.8.0
|
||||
appVersion: 0.10.0
|
||||
|
||||
@@ -3,6 +3,13 @@ kind: Role
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
resourceNames:
|
||||
- {{ include "virtual-machine.fullname" . }}
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups:
|
||||
- cozystack.io
|
||||
resources:
|
||||
|
||||
@@ -9,7 +9,7 @@ stringData:
|
||||
key{{ $k }}: {{ quote $v }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.cloudInit }}
|
||||
{{- if or .Values.cloudInit .Values.sshKeys }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
@@ -17,5 +17,17 @@ metadata:
|
||||
name: {{ include "virtual-machine.fullname" . }}-cloud-init
|
||||
stringData:
|
||||
userdata: |
|
||||
{{- .Values.cloudInit | nindent 4 }}
|
||||
{{- if .Values.cloudInit }}
|
||||
{{- .Values.cloudInit | nindent 4 }}
|
||||
{{- else if and (.Values.sshKeys) (not .Values.cloudInit) }}
|
||||
{{- /*
|
||||
We usually provide ssh keys in cloud-init metadata, because userdata it not typed and can be used for any purpose.
|
||||
However, if user provides ssh keys but not cloud-init, we still need to provide a minimal cloud-init config to avoid errors.
|
||||
*/}}
|
||||
#cloud-config
|
||||
ssh_authorized_keys:
|
||||
{{- range .Values.sshKeys }}
|
||||
- {{ quote . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -54,24 +54,24 @@ spec:
|
||||
disks:
|
||||
{{- range $i, $disk := .Values.disks }}
|
||||
- name: disk-{{ $disk.name }}
|
||||
{{- $disk := lookup "cdi.kubevirt.io/v1beta1" "DataVolume" $.Release.Namespace (printf "vm-disk-%s" $disk.name) }}
|
||||
{{- if $disk }}
|
||||
{{- if and (hasKey $disk.metadata.annotations "vm-disk.cozystack.io/optical") (eq (index $disk.metadata.annotations "vm-disk.cozystack.io/optical") "true") }}
|
||||
cdrom: {}
|
||||
{{- $dv := lookup "cdi.kubevirt.io/v1beta1" "DataVolume" $.Release.Namespace (printf "vm-disk-%s" $disk.name) }}
|
||||
{{- if $dv }}
|
||||
{{- if and (hasKey $dv.metadata.annotations "vm-disk.cozystack.io/optical") (eq (index $dv.metadata.annotations "vm-disk.cozystack.io/optical") "true") }}
|
||||
cdrom:
|
||||
{{- else }}
|
||||
disk: {}
|
||||
{{- end }}
|
||||
{{- if eq $i 0 }}
|
||||
bootOrder: 1
|
||||
disk:
|
||||
{{- end }}
|
||||
{{- with $disk.bus }}
|
||||
bus: {{ . }}
|
||||
{{- end }}
|
||||
bootOrder: {{ add $i 1 }}
|
||||
{{- else }}
|
||||
{{- fail (printf "Specified disk not exists in cluster: %s" .name) }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if or .Values.sshKeys .Values.cloudInit }}
|
||||
{{- if or .Values.cloudInit .Values.sshKeys }}
|
||||
- name: cloudinitdisk
|
||||
disk:
|
||||
bus: virtio
|
||||
disk: {}
|
||||
{{- end }}
|
||||
interfaces:
|
||||
- name: default
|
||||
@@ -95,27 +95,11 @@ spec:
|
||||
dataVolume:
|
||||
name: vm-disk-{{ .name }}
|
||||
{{- end }}
|
||||
{{- if and .Values.sshKeys .Values.cloudInit }}
|
||||
{{- if or .Values.cloudInit .Values.sshKeys }}
|
||||
- name: cloudinitdisk
|
||||
cloudInitNoCloud:
|
||||
secretRef:
|
||||
name: {{ include "virtual-machine.fullname" . }}-cloud-init
|
||||
{{- else if .Values.sshKeys }}
|
||||
- name: cloudinitdisk
|
||||
cloudInitNoCloud:
|
||||
userData: |
|
||||
{{ printf "%s" "#cloud-config" }}
|
||||
ssh_authorized_keys:
|
||||
{{- range .Values.sshKeys }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
chpasswd:
|
||||
expire: false
|
||||
{{- else }}
|
||||
- name: cloudinitdisk
|
||||
cloudInitNoCloud:
|
||||
userData: |
|
||||
{{ printf "%s" "#cloud-config" }}
|
||||
{{- end }}
|
||||
networks:
|
||||
- name: default
|
||||
|
||||
@@ -22,6 +22,7 @@ instanceProfile: ubuntu
|
||||
## disks:
|
||||
## - name: example-system
|
||||
## - name: example-data
|
||||
## bus: sata
|
||||
disks: []
|
||||
|
||||
## @param gpus [array] List of GPUs to attach
|
||||
|
||||
@@ -56,7 +56,7 @@ This setting is ignored if the corresponding `resources` value is set.
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `large` | `2` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
FROM bitnami/node:20.15.1 AS build
|
||||
WORKDIR /app
|
||||
|
||||
ARG COMMIT_REF=6856b66f9244ef1b2703a2f30899366e0ba040de
|
||||
ARG COMMIT_REF=e1382f51c6db1bca0a8ecd454407c8e282fe0243
|
||||
RUN wget -O- https://github.com/cozystack/kubeapps/archive/${COMMIT_REF}.tar.gz | tar xzf - --strip-components=2 kubeapps-${COMMIT_REF}/dashboard
|
||||
|
||||
RUN yarn install --frozen-lockfile
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
# syntax = docker/dockerfile:1
|
||||
|
||||
FROM alpine AS source
|
||||
ARG COMMIT_REF=6856b66f9244ef1b2703a2f30899366e0ba040de
|
||||
ARG COMMIT_REF=e1382f51c6db1bca0a8ecd454407c8e282fe0243
|
||||
RUN apk add --no-cache patch
|
||||
WORKDIR /source
|
||||
RUN wget -O- https://github.com/cozystack/kubeapps/archive/${COMMIT_REF}.tar.gz | tar xzf - --strip-components=1
|
||||
|
||||
@@ -236,6 +236,15 @@ func (o *AppsServerOptions) Config() (*apiserver.Config, error) {
|
||||
},
|
||||
}
|
||||
|
||||
// make `.spec` schemaless so any keys are accepted
|
||||
if specProp, ok := newDef.Properties["spec"]; ok {
|
||||
specProp.AdditionalProperties = &spec.SchemaOrBool{
|
||||
Allows: true,
|
||||
Schema: &spec.Schema{},
|
||||
}
|
||||
newDef.Properties["spec"] = specProp
|
||||
}
|
||||
|
||||
// 3. Save the new resource definition under the correct name
|
||||
defs[resourceName] = *newDef
|
||||
klog.V(6).Infof("PostProcessSpec: Added OpenAPI definition for %s\n", resourceName)
|
||||
|
||||
@@ -76,6 +76,7 @@ type REST struct {
|
||||
gvr schema.GroupVersionResource
|
||||
gvk schema.GroupVersionKind
|
||||
kindName string
|
||||
singularName string
|
||||
releaseConfig config.ReleaseConfig
|
||||
}
|
||||
|
||||
@@ -93,6 +94,7 @@ func NewREST(dynamicClient dynamic.Interface, config *config.Resource) *REST {
|
||||
Version: "v1alpha1",
|
||||
}.WithKind(config.Application.Kind),
|
||||
kindName: config.Application.Kind,
|
||||
singularName: config.Application.Singular,
|
||||
releaseConfig: config.Release,
|
||||
}
|
||||
}
|
||||
@@ -104,7 +106,7 @@ func (r *REST) NamespaceScoped() bool {
|
||||
|
||||
// GetSingularName returns the singular name of the resource
|
||||
func (r *REST) GetSingularName() string {
|
||||
return r.gvr.Resource
|
||||
return r.singularName
|
||||
}
|
||||
|
||||
// Create handles the creation of a new Application by converting it to a HelmRelease
|
||||
@@ -423,6 +425,15 @@ func (r *REST) Update(ctx context.Context, name string, objInfo rest.UpdatedObje
|
||||
return nil, false, fmt.Errorf("conversion error: %v", err)
|
||||
}
|
||||
|
||||
// Ensure ResourceVersion
|
||||
if helmRelease.ResourceVersion == "" {
|
||||
cur, err := r.dynamicClient.Resource(helmReleaseGVR).Namespace(helmRelease.Namespace).Get(ctx, helmRelease.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("failed to fetch current HelmRelease: %w", err)
|
||||
}
|
||||
helmRelease.SetResourceVersion(cur.GetResourceVersion())
|
||||
}
|
||||
|
||||
// Merge system labels (from config) directly
|
||||
helmRelease.Labels = mergeMaps(r.releaseConfig.Labels, helmRelease.Labels)
|
||||
// Merge user labels with prefix
|
||||
|
||||
Reference in New Issue
Block a user