mirror of
https://github.com/outbackdingo/cozystack.git
synced 2026-01-28 18:18:41 +00:00
Compare commits
1 Commits
tests-w-re
...
experiment
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
07f82cb5dd |
3
Makefile
3
Makefile
@@ -57,3 +57,6 @@ generate:
|
||||
|
||||
upload_assets: manifests
|
||||
hack/upload-assets.sh
|
||||
|
||||
controller-gen:
|
||||
controller-gen object:headerFile="hack/boilerplate.go.txt" paths="./internal/controller/" paths="./api/v1alpha1/"
|
||||
|
||||
26
api/v1alpha1/cozystack_types.go
Normal file
26
api/v1alpha1/cozystack_types.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package v1alpha1
|
||||
|
||||
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
// Cozystack is the Schema for the Cozystack API
|
||||
type Cozystack struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
//Status CozystackStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
// WorkloadList contains a list of Workload
|
||||
type CozystackList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []Cozystack `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&Cozystack{}, &CozystackList{})
|
||||
}
|
||||
@@ -25,6 +25,63 @@ import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Cozystack) DeepCopyInto(out *Cozystack) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cozystack.
|
||||
func (in *Cozystack) DeepCopy() *Cozystack {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Cozystack)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *Cozystack) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CozystackList) DeepCopyInto(out *CozystackList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]Cozystack, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CozystackList.
|
||||
func (in *CozystackList) DeepCopy() *CozystackList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(CozystackList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *CozystackList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in Selector) DeepCopyInto(out *Selector) {
|
||||
{
|
||||
|
||||
@@ -206,6 +206,14 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&controller.CozystackReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "CozystackConfigReconciler")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// +kubebuilder:scaffold:builder
|
||||
|
||||
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
|
||||
|
||||
@@ -2,18 +2,6 @@
|
||||
|
||||
@test "Create DB ClickHouse" {
|
||||
name='test'
|
||||
withResources='true'
|
||||
if [ "$withResources" == 'true' ]; then
|
||||
resources=$(cat <<EOF
|
||||
resources:
|
||||
resources:
|
||||
cpu: 500m
|
||||
memory: 768Mi
|
||||
EOF
|
||||
)
|
||||
else
|
||||
resources=' resources: {}'
|
||||
fi
|
||||
kubectl apply -f- <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: ClickHouse
|
||||
@@ -39,13 +27,15 @@ spec:
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
$resources
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait --timeout=40s hr clickhouse-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=130s clickhouses $name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=120s sts chi-clickhouse-$name-clickhouse-0-0 --for=jsonpath='{.status.replicas}'=1
|
||||
timeout 210 sh -ec "until kubectl -n tenant-test wait svc chendpoint-clickhouse-$name --for=jsonpath='{.spec.ports[0].port}'=8123; do sleep 10; done"
|
||||
kubectl -n tenant-test delete clickhouse.apps.cozystack.io $name
|
||||
kubectl -n tenant-test wait hr clickhouse-$name --timeout=20s --for=condition=ready
|
||||
timeout 180 sh -ec "until kubectl -n tenant-test get svc chendpoint-clickhouse-$name -o jsonpath='{.spec.ports[*].port}' | grep -q '8123 9000'; do sleep 10; done"
|
||||
kubectl -n tenant-test wait statefulset.apps/chi-clickhouse-$name-clickhouse-0-0 --timeout=120s --for=jsonpath='{.status.replicas}'=1
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints chi-clickhouse-$name-clickhouse-0-0 -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
timeout 100 sh -ec "until kubectl -n tenant-test get svc chi-clickhouse-$name-clickhouse-0-0 -o jsonpath='{.spec.ports[*].port}' | grep -q '9000 8123 9009'; do sleep 10; done"
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get sts chi-clickhouse-$name-clickhouse-0-1 ; do sleep 10; done"
|
||||
kubectl -n tenant-test wait statefulset.apps/chi-clickhouse-$name-clickhouse-0-1 --timeout=140s --for=jsonpath='{.status.replicas}'=1
|
||||
}
|
||||
|
||||
@@ -2,18 +2,6 @@
|
||||
|
||||
@test "Create Kafka" {
|
||||
name='test'
|
||||
withResources='true'
|
||||
if [ "$withResources" == 'true' ]; then
|
||||
resources=$(cat <<EOF
|
||||
resources:
|
||||
resources:
|
||||
cpu: 500m
|
||||
memory: 768Mi
|
||||
EOF
|
||||
)
|
||||
else
|
||||
resources='resources: {}'
|
||||
fi
|
||||
kubectl apply -f- <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Kafka
|
||||
@@ -26,13 +14,13 @@ spec:
|
||||
size: 10Gi
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
$resources
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
zookeeper:
|
||||
size: 5Gi
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
$resources
|
||||
resources:
|
||||
resourcesPreset: "nano"
|
||||
topics:
|
||||
- name: testResults
|
||||
@@ -50,9 +38,14 @@ spec:
|
||||
replicas: 2
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait --timeout=30s hr kafka-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=1m kafkas $name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=50s pvc data-kafka-$name-zookeeper-0 --for=jsonpath='{.status.phase}'=Bound
|
||||
kubectl -n tenant-test wait --timeout=40s svc kafka-$name-zookeeper-client --for=jsonpath='{.spec.ports[0].port}'=2181
|
||||
kubectl -n tenant-test wait hr kafka-$name --timeout=30s --for=condition=ready
|
||||
kubectl wait kafkas -n tenant-test test --timeout=60s --for=condition=ready
|
||||
timeout 60 sh -ec "until kubectl -n tenant-test get pvc data-kafka-$name-zookeeper-0; do sleep 10; done"
|
||||
kubectl -n tenant-test wait pvc data-kafka-$name-zookeeper-0 --timeout=50s --for=jsonpath='{.status.phase}'=Bound
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc kafka-$name-zookeeper-client -o jsonpath='{.spec.ports[0].port}' | grep -q '2181'; do sleep 10; done"
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc kafka-$name-zookeeper-nodes -o jsonpath='{.spec.ports[*].port}' | grep -q '2181 2888 3888'; do sleep 10; done"
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints kafka-$name-zookeeper-nodes -o jsonpath='{.subsets[*].addresses[0].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test delete kafka.apps.cozystack.io $name
|
||||
kubectl -n tenant-test delete pvc data-kafka-$name-zookeeper-0
|
||||
kubectl -n tenant-test delete pvc data-kafka-$name-zookeeper-1
|
||||
}
|
||||
|
||||
@@ -1,17 +1,16 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
@test "Create a tenant Kubernetes control plane" {
|
||||
name='test'
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Kubernetes
|
||||
metadata:
|
||||
name: $name
|
||||
name: test
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
addons:
|
||||
certManager:
|
||||
enabled: true
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
cilium:
|
||||
valuesOverride: {}
|
||||
@@ -25,12 +24,10 @@ spec:
|
||||
valuesOverride: {}
|
||||
ingressNginx:
|
||||
enabled: true
|
||||
hosts:
|
||||
- example.org
|
||||
exposeMethod: Proxied
|
||||
hosts: []
|
||||
valuesOverride: {}
|
||||
monitoringAgents:
|
||||
enabled: true
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
verticalPodAutoscaler:
|
||||
valuesOverride: {}
|
||||
@@ -64,39 +61,12 @@ spec:
|
||||
- ingress-nginx
|
||||
storageClass: replicated
|
||||
EOF
|
||||
sleep 10
|
||||
kubectl wait --timeout=20s namespace tenant-test --for=jsonpath='{.status.phase}'=Active
|
||||
kubectl -n tenant-test wait --timeout=10s kamajicontrolplane kubernetes-$name --for=jsonpath='{.status.conditions[0].status}'=True
|
||||
kubectl -n tenant-test wait --timeout=4m kamajicontrolplane kubernetes-$name --for=condition=TenantControlPlaneCreated
|
||||
kubectl -n tenant-test wait --timeout=210s tcp kubernetes-$name --for=jsonpath='{.status.kubernetesResources.version.status}'=Ready
|
||||
kubectl -n tenant-test wait --timeout=4m deploy kubernetes-$name kubernetes-$name-cluster-autoscaler kubernetes-$name-kccm kubernetes-$name-kcsi-controller --for=condition=available
|
||||
kubectl -n tenant-test wait --timeout=1m machinedeployment kubernetes-$name-md0 --for=jsonpath='{.status.replicas}'=2
|
||||
kubectl -n tenant-test wait --timeout=10m machinedeployment kubernetes-$name-md0 --for=jsonpath='{.status.v1beta2.readyReplicas}'=2
|
||||
# ingress / load balancer
|
||||
kubectl -n tenant-test wait --timeout=5m hr kubernetes-$name-monitoring-agents --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=5m hr kubernetes-$name-ingress-nginx --for=condition=ready
|
||||
kubectl -n tenant-test get secret kubernetes-$name-admin-kubeconfig -o go-template='{{ printf "%s\n" (index .data "admin.conf" | base64decode) }}' > admin.conf
|
||||
KUBECONFIG=admin.conf kubectl -n cozy-ingress-nginx wait --timeout=3m deploy ingress-nginx-defaultbackend --for=jsonpath='{.status.conditions[0].status}'=True
|
||||
KUBECONFIG=admin.conf kubectl -n cozy-monitoring wait --timeout=3m deploy cozy-monitoring-agents-metrics-server --for=jsonpath='{.status.conditions[0].status}'=True
|
||||
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
|
||||
timeout 10 sh -ec 'until kubectl get kamajicontrolplane -n tenant-test kubernetes-test; do sleep 1; done'
|
||||
kubectl wait --for=condition=TenantControlPlaneCreated kamajicontrolplane -n tenant-test kubernetes-test --timeout=4m
|
||||
kubectl wait tcp -n tenant-test kubernetes-test --timeout=2m --for=jsonpath='{.status.kubernetesResources.version.status}'=Ready
|
||||
kubectl wait deploy --timeout=4m --for=condition=available -n tenant-test kubernetes-test kubernetes-test-cluster-autoscaler kubernetes-test-kccm kubernetes-test-kcsi-controller
|
||||
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=1m --for=jsonpath='{.status.replicas}'=2
|
||||
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=10m --for=jsonpath='{.status.v1beta2.readyReplicas}'=2
|
||||
kubectl -n tenant-test delete kuberneteses.apps.cozystack.io test
|
||||
}
|
||||
|
||||
@test "Create a PVC in tenant Kubernetes" {
|
||||
name='test'
|
||||
KUBECONFIG=admin.conf kubectl apply -f - <<EOF
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: pvc-$name
|
||||
namespace: cozy-monitoring
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
EOF
|
||||
sleep 10
|
||||
KUBECONFIG=admin.conf kubectl -n cozy-monitoring wait --timeout=20s pvc pvc-$name --for=jsonpath='{.status.phase}'=Bound
|
||||
KUBECONFIG=admin.conf kubectl -n cozy-monitoring delete pvc pvc-$name
|
||||
kubectl -n tenant-test delete kuberneteses.apps.cozystack.io $name
|
||||
}
|
||||
@@ -2,18 +2,6 @@
|
||||
|
||||
@test "Create DB MySQL" {
|
||||
name='test'
|
||||
withResources='true'
|
||||
if [ "$withResources" == 'true' ]; then
|
||||
resources=$(cat <<EOF
|
||||
resources:
|
||||
resources:
|
||||
cpu: 3000m
|
||||
memory: 3Gi
|
||||
EOF
|
||||
)
|
||||
else
|
||||
resources=' resources: {}'
|
||||
fi
|
||||
kubectl apply -f- <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: MySQL
|
||||
@@ -43,15 +31,16 @@ spec:
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
$resources
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
EOF
|
||||
sleep 10
|
||||
kubectl -n tenant-test wait --timeout=30s hr mysql-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=130s mysqls $name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=110s sts mysql-$name --for=jsonpath='{.status.replicas}'=2
|
||||
sleep 60
|
||||
kubectl -n tenant-test wait --timeout=60s deploy mysql-$name-metrics --for=jsonpath='{.status.replicas}'=1
|
||||
kubectl -n tenant-test wait --timeout=100s svc mysql-$name --for=jsonpath='{.spec.ports[0].port}'=3306
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr mysql-$name --timeout=30s --for=condition=ready
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get svc mysql-$name -o jsonpath='{.spec.ports[0].port}' | grep -q '3306'; do sleep 10; done"
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints mysql-$name -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test wait statefulset.apps/mysql-$name --timeout=110s --for=jsonpath='{.status.replicas}'=2
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get svc mysql-$name-metrics -o jsonpath='{.spec.ports[0].port}' | grep -q '9104'; do sleep 10; done"
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get endpoints mysql-$name-metrics -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test wait deployment.apps/mysql-$name-metrics --timeout=90s --for=jsonpath='{.status.replicas}'=1
|
||||
kubectl -n tenant-test delete mysqls.apps.cozystack.io $name
|
||||
}
|
||||
|
||||
@@ -2,18 +2,6 @@
|
||||
|
||||
@test "Create DB PostgreSQL" {
|
||||
name='test'
|
||||
withResources='true'
|
||||
if [ "$withResources" == 'true' ]; then
|
||||
resources=$(cat <<EOF
|
||||
resources:
|
||||
resources:
|
||||
cpu: 500m
|
||||
memory: 768Mi
|
||||
EOF
|
||||
)
|
||||
else
|
||||
resources=' resources: {}'
|
||||
fi
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Postgres
|
||||
@@ -48,14 +36,19 @@ spec:
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
$resources
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait --timeout=200s hr postgres-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=130s postgreses $name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=50s job.batch postgres-$name-init-job --for=condition=Complete
|
||||
kubectl -n tenant-test wait --timeout=40s svc postgres-$name-r --for=jsonpath='{.spec.ports[0].port}'=5432
|
||||
kubectl -n tenant-test wait hr postgres-$name --timeout=100s --for=condition=ready
|
||||
kubectl -n tenant-test wait job.batch postgres-$name-init-job --timeout=50s --for=condition=Complete
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-r -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-ro -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-rw -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
||||
timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-r -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
# for some reason it takes longer for the read-only endpoint to be ready
|
||||
#timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-ro -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-rw -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test delete postgreses.apps.cozystack.io $name
|
||||
kubectl -n tenant-test delete job.batch/postgres-$name-init-job
|
||||
}
|
||||
|
||||
@@ -2,18 +2,6 @@
|
||||
|
||||
@test "Create Redis" {
|
||||
name='test'
|
||||
withResources='true'
|
||||
if [ "$withResources" == 'true' ]; then
|
||||
resources=$(cat <<EOF
|
||||
resources:
|
||||
resources:
|
||||
cpu: 500m
|
||||
memory: 768Mi
|
||||
EOF
|
||||
)
|
||||
else
|
||||
resources='resources: {}'
|
||||
fi
|
||||
kubectl apply -f- <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Redis
|
||||
@@ -26,15 +14,13 @@ spec:
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
authEnabled: true
|
||||
$resources
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait --timeout=20s hr redis-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=130s redis.apps.cozystack.io $name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=50s pvc redisfailover-persistent-data-rfr-redis-$name-0 --for=jsonpath='{.status.phase}'=Bound
|
||||
kubectl -n tenant-test wait --timeout=90s sts rfr-redis-$name --for=jsonpath='{.status.replicas}'=2
|
||||
sleep 45
|
||||
kubectl -n tenant-test wait --timeout=45s deploy rfs-redis-$name --for=condition=available
|
||||
kubectl -n tenant-test wait hr redis-$name --timeout=20s --for=condition=ready
|
||||
kubectl -n tenant-test wait pvc redisfailover-persistent-data-rfr-redis-$name-0 --timeout=50s --for=jsonpath='{.status.phase}'=Bound
|
||||
kubectl -n tenant-test wait deploy rfs-redis-$name --timeout=90s --for=condition=available
|
||||
kubectl -n tenant-test wait sts rfr-redis-$name --timeout=90s --for=jsonpath='{.status.replicas}'=2
|
||||
kubectl -n tenant-test delete redis.apps.cozystack.io $name
|
||||
}
|
||||
|
||||
@@ -2,14 +2,6 @@
|
||||
|
||||
@test "Create a Virtual Machine" {
|
||||
name='test'
|
||||
withResources='true'
|
||||
if [ "$withResources" == 'true' ]; then
|
||||
cores="1000m"
|
||||
memory="1Gi
|
||||
else
|
||||
cores="2000m"
|
||||
memory="2Gi
|
||||
fi
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: VirtualMachine
|
||||
@@ -17,12 +9,6 @@ metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
domain:
|
||||
cpu:
|
||||
cores: "$cores"
|
||||
resources:
|
||||
requests:
|
||||
memory: "$memory"
|
||||
external: false
|
||||
externalMethod: PortList
|
||||
externalPorts:
|
||||
@@ -34,6 +20,9 @@ spec:
|
||||
storage: 5Gi
|
||||
storageClass: replicated
|
||||
gpus: []
|
||||
resources:
|
||||
cpu: ""
|
||||
memory: ""
|
||||
sshKeys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF
|
||||
test@test
|
||||
@@ -48,12 +37,11 @@ spec:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF test@test
|
||||
cloudInitSeed: ""
|
||||
EOF
|
||||
sleep 10
|
||||
kubectl -n tenant-test wait --timeout=10s hr virtual-machine-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=130s virtualmachines $name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=130s pvc virtual-machine-$name --for=jsonpath='{.status.phase}'=Bound
|
||||
kubectl -n tenant-test wait --timeout=150s dv virtual-machine-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=100s vm virtual-machine-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=150s vmi virtual-machine-$name --for=jsonpath='{status.phase}'=Running
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr virtual-machine-$name --timeout=10s --for=condition=ready
|
||||
kubectl -n tenant-test wait dv virtual-machine-$name --timeout=150s --for=condition=ready
|
||||
kubectl -n tenant-test wait pvc virtual-machine-$name --timeout=100s --for=jsonpath='{.status.phase}'=Bound
|
||||
kubectl -n tenant-test wait vm virtual-machine-$name --timeout=100s --for=condition=ready
|
||||
timeout 120 sh -ec "until kubectl -n tenant-test get vmi virtual-machine-$name -o jsonpath='{.status.interfaces[0].ipAddress}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test delete virtualmachines.apps.cozystack.io $name
|
||||
}
|
||||
|
||||
@@ -17,37 +17,21 @@ spec:
|
||||
storageClass: replicated
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait --timeout=5s hr vm-disk-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=130s vmdisks $name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=130s pvc vm-disk-$name --for=jsonpath='{.status.phase}'=Bound
|
||||
kubectl -n tenant-test wait --timeout=150s dv vm-disk-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait hr vm-disk-$name --timeout=5s --for=condition=ready
|
||||
kubectl -n tenant-test wait dv vm-disk-$name --timeout=150s --for=condition=ready
|
||||
kubectl -n tenant-test wait pvc vm-disk-$name --timeout=100s --for=jsonpath='{.status.phase}'=Bound
|
||||
}
|
||||
|
||||
@test "Create a VM Instance" {
|
||||
diskName='test'
|
||||
name='test'
|
||||
withResources='true'
|
||||
if [ "$withResources" == 'true' ]; then
|
||||
cores="1000m"
|
||||
memory="1Gi
|
||||
else
|
||||
cores="2000m"
|
||||
memory="2Gi
|
||||
fi
|
||||
kubectl -n tenant-test get vminstances.apps.cozystack.io $name ||
|
||||
kubectl create -f - <<EOF
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: VMInstance
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
domain:
|
||||
cpu:
|
||||
cores: "$cores"
|
||||
resources:
|
||||
requests:
|
||||
memory: "$memory"
|
||||
external: false
|
||||
externalMethod: PortList
|
||||
externalPorts:
|
||||
@@ -58,6 +42,9 @@ spec:
|
||||
disks:
|
||||
- name: $diskName
|
||||
gpus: []
|
||||
resources:
|
||||
cpu: ""
|
||||
memory: ""
|
||||
sshKeys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF
|
||||
test@test
|
||||
@@ -73,10 +60,9 @@ spec:
|
||||
cloudInitSeed: ""
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait --timeout=5s hr vm-instance-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=130s vminstances $name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=20s vm vm-instance-$name --for=condition=ready
|
||||
kubectl -n tenant-test wait --timeout=40s vmi vm-instance-$name --for=jsonpath='{status.phase}'=Running
|
||||
timeout 20 sh -ec "until kubectl -n tenant-test get vmi vm-instance-$name -o jsonpath='{.status.interfaces[0].ipAddress}' | grep -q '[0-9]'; do sleep 5; done"
|
||||
kubectl -n tenant-test wait hr vm-instance-$name --timeout=5s --for=condition=ready
|
||||
kubectl -n tenant-test wait vm vm-instance-$name --timeout=20s --for=condition=ready
|
||||
kubectl -n tenant-test delete vminstances.apps.cozystack.io $name
|
||||
kubectl -n tenant-test delete vmdisks.apps.cozystack.io $diskName
|
||||
}
|
||||
|
||||
52
internal/controller/cozystack_controller.go
Normal file
52
internal/controller/cozystack_controller.go
Normal file
@@ -0,0 +1,52 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||
)
|
||||
|
||||
type CozystackReconciler struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
func (r *CozystackReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
l := log.FromContext(ctx)
|
||||
c := &cozyv1alpha1.Cozystack{}
|
||||
|
||||
if err := r.Get(ctx, req.NamespacedName, c); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
l.Error(err, "Unable to fetch Cozystack")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if !c.DeletionTimestamp.IsZero() {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if c.Name != "cozystack" || c.Namespace != "cozy-system" {
|
||||
l.Info("only cozy-system/cozystack Cozystack is allowed in a cluster")
|
||||
err := r.Delete(ctx, c)
|
||||
if err != nil {
|
||||
l.Error(err, "Unable to delete invalid Cozystack")
|
||||
}
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (r *CozystackReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&cozyv1alpha1.Cozystack{}).
|
||||
Complete(r)
|
||||
}
|
||||
@@ -75,6 +75,6 @@ This setting is ignored if the corresponding `resources` value is set.
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `2` | `2Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
@@ -62,6 +62,6 @@ This setting is ignored if the corresponding `resources` value is set.
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `2` | `2Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
@@ -100,7 +100,7 @@ This setting is ignored if the corresponding `resources` value is set.
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `2` | `2Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@ This setting is ignored if the corresponding `resources` value is set.
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `2` | `2Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.25.2
|
||||
version: 0.25.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -64,8 +64,6 @@ image-kubevirt-csi-driver:
|
||||
--load=$(LOAD)
|
||||
echo "$(REGISTRY)/kubevirt-csi-driver:$(call settag,$(KUBERNETES_PKG_TAG))@$$(yq e '."containerimage.digest"' images/kubevirt-csi-driver.json -o json -r)" \
|
||||
> images/kubevirt-csi-driver.tag
|
||||
IMAGE=$$(cat images/kubevirt-csi-driver.tag) \
|
||||
yq -i '.csiDriver.image = strenv(IMAGE)' ../../system/kubevirt-csi-node/values.yaml
|
||||
rm -f images/kubevirt-csi-driver.json
|
||||
|
||||
|
||||
|
||||
@@ -146,7 +146,7 @@ This setting is ignored if the corresponding `resources` value is set.
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `2` | `2Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.25.1@sha256:3a8170433e1632e5cc2b6d9db34d0605e8e6c63c158282c38450415e700e932e
|
||||
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.25.0@sha256:3a8170433e1632e5cc2b6d9db34d0605e8e6c63c158282c38450415e700e932e
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.25.1@sha256:412ed2b3c77249bd1b973e6dc9c87976d31863717fb66ba74ccda573af737eb1
|
||||
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.25.0@sha256:412ed2b3c77249bd1b973e6dc9c87976d31863717fb66ba74ccda573af737eb1
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.25.1@sha256:445c2727b04ac68595b43c988ff17b3d69a7b22b0644fde3b10c65b47a7bc036
|
||||
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.25.0@sha256:445c2727b04ac68595b43c988ff17b3d69a7b22b0644fde3b10c65b47a7bc036
|
||||
|
||||
@@ -13,17 +13,11 @@ rules:
|
||||
resources: ["datavolumes"]
|
||||
verbs: ["get", "create", "delete"]
|
||||
- apiGroups: ["kubevirt.io"]
|
||||
resources: ["virtualmachineinstances", "virtualmachines"]
|
||||
resources: ["virtualmachineinstances"]
|
||||
verbs: ["list", "get"]
|
||||
- apiGroups: ["subresources.kubevirt.io"]
|
||||
resources: ["virtualmachines/addvolume", "virtualmachines/removevolume"]
|
||||
resources: ["virtualmachineinstances/addvolume", "virtualmachineinstances/removevolume"]
|
||||
verbs: ["update"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshots"]
|
||||
verbs: ["get", "create", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "patch"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
|
||||
@@ -40,7 +40,6 @@ spec:
|
||||
{{ .Release.Name }}-fluxcd-operator
|
||||
{{ .Release.Name }}-fluxcd
|
||||
{{ .Release.Name }}-gpu-operator
|
||||
{{ .Release.Name }}-velero
|
||||
-p '{"spec": {"suspend": true}}'
|
||||
--type=merge --field-manager=flux-client-side-apply || true
|
||||
---
|
||||
@@ -80,8 +79,6 @@ rules:
|
||||
- {{ .Release.Name }}-fluxcd-operator
|
||||
- {{ .Release.Name }}-fluxcd
|
||||
- {{ .Release.Name }}-gpu-operator
|
||||
- {{ .Release.Name }}-velero
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
|
||||
@@ -119,7 +119,7 @@ This setting is ignored if the corresponding `resources` value is set.
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `2` | `2Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@ This setting is ignored if the corresponding `resources` value is set.
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `2` | `2Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.17.1
|
||||
version: 0.16.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -11,50 +11,7 @@ This managed service is controlled by the CloudNativePG operator, ensuring effic
|
||||
- Docs: <https://cloudnative-pg.io/docs/>
|
||||
- Github: <https://github.com/cloudnative-pg/cloudnative-pg>
|
||||
|
||||
## Operations
|
||||
|
||||
### How to enable backups
|
||||
|
||||
To back up a PostgreSQL application, an external S3-compatible storage is required.
|
||||
|
||||
To start regular backups, update the application, setting `backup.enabled` to `true`, and fill in the path and credentials to an `backup.*`:
|
||||
|
||||
```yaml
|
||||
## @param backup.enabled Enable regular backups
|
||||
## @param backup.schedule Cron schedule for automated backups
|
||||
## @param backup.retentionPolicy Retention policy
|
||||
## @param backup.destinationPath Path to store the backup (i.e. s3://bucket/path/to/folder)
|
||||
## @param backup.endpointURL S3 Endpoint used to upload data to the cloud
|
||||
## @param backup.s3AccessKey Access key for S3, used for authentication
|
||||
## @param backup.s3SecretKey Secret key for S3, used for authentication
|
||||
backup:
|
||||
enabled: false
|
||||
retentionPolicy: 30d
|
||||
destinationPath: s3://bucket/path/to/folder/
|
||||
endpointURL: http://minio-gateway-service:9000
|
||||
schedule: "0 2 * * * *"
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
```
|
||||
|
||||
### How to recover a backup
|
||||
|
||||
CloudNativePG supports point-in-time-recovery.
|
||||
Recovering a backup is done by creating a new database instance and restoring the data in it.
|
||||
|
||||
Create a new PostgreSQL application with a different name, but identical configuration.
|
||||
Set `bootstrap.enabled` to `true` and fill in the name of the database instance to recover from and the recovery time:
|
||||
|
||||
```yaml
|
||||
## @param bootstrap.enabled Restore database cluster from a backup
|
||||
## @param bootstrap.recoveryTime Timestamp (PITR) up to which recovery will proceed, expressed in RFC 3339 format. If left empty, will restore latest
|
||||
## @param bootstrap.oldName Name of database cluster before deleting
|
||||
##
|
||||
bootstrap:
|
||||
enabled: false
|
||||
recoveryTime: "" # leave empty for latest or exact timestamp; example: 2020-11-26 15:22:00.00000+00
|
||||
oldName: "<previous-postgres-instance>"
|
||||
```
|
||||
## HowTos
|
||||
|
||||
### How to switch primary/secondary replica
|
||||
|
||||
@@ -62,6 +19,24 @@ See:
|
||||
|
||||
- <https://cloudnative-pg.io/documentation/1.15/rolling_update/#manual-updates-supervised>
|
||||
|
||||
### How to restore backup
|
||||
|
||||
find snapshot:
|
||||
|
||||
```bash
|
||||
restic -r s3:s3.example.org/postgres-backups/database_name snapshots
|
||||
```
|
||||
|
||||
restore:
|
||||
|
||||
```bash
|
||||
restic -r s3:s3.example.org/postgres-backups/database_name restore latest --target /tmp/
|
||||
```
|
||||
|
||||
more details:
|
||||
|
||||
- <https://blog.aenix.io/restic-effective-backup-from-stdin-4bc1e8f083c1>
|
||||
|
||||
## Parameters
|
||||
|
||||
### Common parameters
|
||||
@@ -85,23 +60,23 @@ See:
|
||||
|
||||
### Backup parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | ---------------------------------------------------------- | ----------------------------------- |
|
||||
| `backup.enabled` | Enable regular backups | `false` |
|
||||
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * * *` |
|
||||
| `backup.retentionPolicy` | Retention policy | `30d` |
|
||||
| `backup.destinationPath` | Path to store the backup (i.e. s3://bucket/path/to/folder) | `s3://bucket/path/to/folder/` |
|
||||
| `backup.endpointURL` | S3 Endpoint used to upload data to the cloud | `http://minio-gateway-service:9000` |
|
||||
| `backup.s3AccessKey` | Access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||
| `backup.s3SecretKey` | Secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | -------------------------------------------------------------------- | ----------------------------------- |
|
||||
| `backup.enabled` | Enable pereiodic backups | `false` |
|
||||
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * * *` |
|
||||
| `backup.retentionPolicy` | The retention policy | `30d` |
|
||||
| `backup.destinationPath` | The path where to store the backup (i.e. s3://bucket/path/to/folder) | `s3://BUCKET_NAME/` |
|
||||
| `backup.endpointURL` | Endpoint to be used to upload data to the cloud | `http://minio-gateway-service:9000` |
|
||||
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||
|
||||
### Bootstrap parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | --------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| `bootstrap.enabled` | Restore database cluster from a backup | `false` |
|
||||
| `bootstrap.recoveryTime` | Timestamp (PITR) up to which recovery will proceed, expressed in RFC 3339 format. If left empty, will restore latest | `""` |
|
||||
| `bootstrap.oldName` | Name of database cluster before deleting | `""` |
|
||||
| `bootstrap.enabled` | Restore cluster from backup | `false` |
|
||||
| `bootstrap.recoveryTime` | Time stamp up to which recovery will proceed, expressed in RFC 3339 format, if empty, will restore latest | `""` |
|
||||
| `bootstrap.oldName` | Name of cluster before deleting | `""` |
|
||||
| `resources` | Explicit CPU and memory configuration for each PostgreSQL replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
|
||||
| `resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `micro` |
|
||||
|
||||
@@ -128,7 +103,7 @@ This setting is ignored if the corresponding `resources` value is set.
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `2` | `2Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ stringData:
|
||||
until pg_isready ; do sleep 5; done
|
||||
|
||||
echo "== create users"
|
||||
{{- if and .Values.users (not (hasKey .Values.users "postgres")) }}
|
||||
{{- if .Values.users }}
|
||||
psql -v ON_ERROR_STOP=1 <<\EOT
|
||||
{{- range $user, $u := .Values.users }}
|
||||
SELECT 'CREATE ROLE "{{ $user }}" LOGIN INHERIT;'
|
||||
@@ -47,8 +47,6 @@ stringData:
|
||||
COMMENT ON ROLE "{{ $user }}" IS 'user managed by helm';
|
||||
{{- end }}
|
||||
EOT
|
||||
{{- else if and .Values.users (hasKey .Values.users "postgres") }}
|
||||
{{- fail "`users.postgres` is forbidden by policy. Use a different username." }}
|
||||
{{- end }}
|
||||
|
||||
echo "== delete users"
|
||||
|
||||
@@ -62,7 +62,7 @@
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enable regular backups",
|
||||
"description": "Enable pereiodic backups",
|
||||
"default": false
|
||||
},
|
||||
"schedule": {
|
||||
@@ -72,27 +72,27 @@
|
||||
},
|
||||
"retentionPolicy": {
|
||||
"type": "string",
|
||||
"description": "Retention policy",
|
||||
"description": "The retention policy",
|
||||
"default": "30d"
|
||||
},
|
||||
"destinationPath": {
|
||||
"type": "string",
|
||||
"description": "Path to store the backup (i.e. s3://bucket/path/to/folder)",
|
||||
"default": "s3://bucket/path/to/folder/"
|
||||
"description": "The path where to store the backup (i.e. s3://bucket/path/to/folder)",
|
||||
"default": "s3://BUCKET_NAME/"
|
||||
},
|
||||
"endpointURL": {
|
||||
"type": "string",
|
||||
"description": "S3 Endpoint used to upload data to the cloud",
|
||||
"description": "Endpoint to be used to upload data to the cloud",
|
||||
"default": "http://minio-gateway-service:9000"
|
||||
},
|
||||
"s3AccessKey": {
|
||||
"type": "string",
|
||||
"description": "Access key for S3, used for authentication",
|
||||
"description": "The access key for S3, used for authentication",
|
||||
"default": "oobaiRus9pah8PhohL1ThaeTa4UVa7gu"
|
||||
},
|
||||
"s3SecretKey": {
|
||||
"type": "string",
|
||||
"description": "Secret key for S3, used for authentication",
|
||||
"description": "The secret key for S3, used for authentication",
|
||||
"default": "ju3eum4dekeich9ahM1te8waeGai0oog"
|
||||
}
|
||||
}
|
||||
@@ -102,17 +102,17 @@
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Restore database cluster from a backup",
|
||||
"description": "Restore cluster from backup",
|
||||
"default": false
|
||||
},
|
||||
"recoveryTime": {
|
||||
"type": "string",
|
||||
"description": "Timestamp (PITR) up to which recovery will proceed, expressed in RFC 3339 format. If left empty, will restore latest",
|
||||
"description": "Time stamp up to which recovery will proceed, expressed in RFC 3339 format, if empty, will restore latest",
|
||||
"default": ""
|
||||
},
|
||||
"oldName": {
|
||||
"type": "string",
|
||||
"description": "Name of database cluster before deleting",
|
||||
"description": "Name of cluster before deleting",
|
||||
"default": ""
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,17 +59,17 @@ databases: {}
|
||||
|
||||
## @section Backup parameters
|
||||
|
||||
## @param backup.enabled Enable regular backups
|
||||
## @param backup.enabled Enable pereiodic backups
|
||||
## @param backup.schedule Cron schedule for automated backups
|
||||
## @param backup.retentionPolicy Retention policy
|
||||
## @param backup.destinationPath Path to store the backup (i.e. s3://bucket/path/to/folder)
|
||||
## @param backup.endpointURL S3 Endpoint used to upload data to the cloud
|
||||
## @param backup.s3AccessKey Access key for S3, used for authentication
|
||||
## @param backup.s3SecretKey Secret key for S3, used for authentication
|
||||
## @param backup.retentionPolicy The retention policy
|
||||
## @param backup.destinationPath The path where to store the backup (i.e. s3://bucket/path/to/folder)
|
||||
## @param backup.endpointURL Endpoint to be used to upload data to the cloud
|
||||
## @param backup.s3AccessKey The access key for S3, used for authentication
|
||||
## @param backup.s3SecretKey The secret key for S3, used for authentication
|
||||
backup:
|
||||
enabled: false
|
||||
retentionPolicy: 30d
|
||||
destinationPath: s3://bucket/path/to/folder/
|
||||
destinationPath: s3://BUCKET_NAME/
|
||||
endpointURL: http://minio-gateway-service:9000
|
||||
schedule: "0 2 * * * *"
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
@@ -77,9 +77,9 @@ backup:
|
||||
|
||||
## @section Bootstrap parameters
|
||||
|
||||
## @param bootstrap.enabled Restore database cluster from a backup
|
||||
## @param bootstrap.recoveryTime Timestamp (PITR) up to which recovery will proceed, expressed in RFC 3339 format. If left empty, will restore latest
|
||||
## @param bootstrap.oldName Name of database cluster before deleting
|
||||
## @param bootstrap.enabled Restore cluster from backup
|
||||
## @param bootstrap.recoveryTime Time stamp up to which recovery will proceed, expressed in RFC 3339 format, if empty, will restore latest
|
||||
## @param bootstrap.oldName Name of cluster before deleting
|
||||
##
|
||||
bootstrap:
|
||||
enabled: false
|
||||
|
||||
@@ -45,6 +45,6 @@ This setting is ignored if the corresponding `resources` value is set.
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `2` | `2Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
@@ -52,6 +52,6 @@ This setting is ignored if the corresponding `resources` value is set.
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `2` | `2Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
@@ -54,9 +54,7 @@ kafka 0.7.0 6358fd7a
|
||||
kafka 0.7.1 4369b031
|
||||
kafka 0.8.0 HEAD
|
||||
kubernetes 0.24.0 62cb694d
|
||||
kubernetes 0.25.0 70f82667
|
||||
kubernetes 0.25.1 acd4663a
|
||||
kubernetes 0.25.2 HEAD
|
||||
kubernetes 0.25.0 HEAD
|
||||
mysql 0.1.0 263e47be
|
||||
mysql 0.2.0 c24a103f
|
||||
mysql 0.3.0 53f2365e
|
||||
@@ -103,9 +101,7 @@ postgres 0.12.0 6130f43d
|
||||
postgres 0.12.1 632224a3
|
||||
postgres 0.14.0 62cb694d
|
||||
postgres 0.15.1 4369b031
|
||||
postgres 0.16.0 70f82667
|
||||
postgres 0.17.0 acd4663a
|
||||
postgres 0.17.1 HEAD
|
||||
postgres 0.16.0 HEAD
|
||||
rabbitmq 0.1.0 263e47be
|
||||
rabbitmq 0.2.0 53f2365e
|
||||
rabbitmq 0.3.0 6c5cf5bf
|
||||
@@ -157,8 +153,7 @@ virtual-machine 0.9.1 93bdf411
|
||||
virtual-machine 0.10.0 6130f43d
|
||||
virtual-machine 0.10.2 632224a3
|
||||
virtual-machine 0.11.0 4369b031
|
||||
virtual-machine 0.12.0 70f82667
|
||||
virtual-machine 0.12.1 HEAD
|
||||
virtual-machine 0.12.0 HEAD
|
||||
vm-disk 0.1.0 d971f2ff
|
||||
vm-disk 0.1.1 6130f43d
|
||||
vm-disk 0.1.2 632224a3
|
||||
@@ -175,8 +170,7 @@ vm-instance 0.6.0 721c12a7
|
||||
vm-instance 0.7.0 6130f43d
|
||||
vm-instance 0.7.2 632224a3
|
||||
vm-instance 0.8.0 4369b031
|
||||
vm-instance 0.9.0 70f82667
|
||||
vm-instance 0.10.0 HEAD
|
||||
vm-instance 0.9.0 HEAD
|
||||
vpn 0.1.0 263e47be
|
||||
vpn 0.2.0 53f2365e
|
||||
vpn 0.3.0 6c5cf5bf
|
||||
|
||||
@@ -17,7 +17,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.12.1
|
||||
version: 0.12.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -3,13 +3,6 @@ kind: Role
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
resourceNames:
|
||||
- {{ include "virtual-machine.fullname" . }}
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups:
|
||||
- cozystack.io
|
||||
resources:
|
||||
|
||||
@@ -9,7 +9,7 @@ stringData:
|
||||
key{{ $k }}: {{ quote $v }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if or .Values.cloudInit .Values.sshKeys }}
|
||||
{{- if .Values.cloudInit }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
@@ -17,17 +17,5 @@ metadata:
|
||||
name: {{ include "virtual-machine.fullname" . }}-cloud-init
|
||||
stringData:
|
||||
userdata: |
|
||||
{{- if .Values.cloudInit }}
|
||||
{{- .Values.cloudInit | nindent 4 }}
|
||||
{{- else if and (.Values.sshKeys) (not .Values.cloudInit) }}
|
||||
{{- /*
|
||||
We usually provide ssh keys in cloud-init metadata, because userdata it not typed and can be used for any purpose.
|
||||
However, if user provides ssh keys but not cloud-init, we still need to provide a minimal cloud-init config to avoid errors.
|
||||
*/}}
|
||||
#cloud-config
|
||||
ssh_authorized_keys:
|
||||
{{- range .Values.sshKeys }}
|
||||
- {{ quote . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- .Values.cloudInit | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -92,7 +92,7 @@ spec:
|
||||
- disk:
|
||||
bus: scsi
|
||||
name: systemdisk
|
||||
{{- if or .Values.cloudInit .Values.sshKeys }}
|
||||
{{- if .Values.sshKeys }}
|
||||
- disk:
|
||||
bus: virtio
|
||||
name: cloudinitdisk
|
||||
@@ -122,11 +122,28 @@ spec:
|
||||
- name: systemdisk
|
||||
dataVolume:
|
||||
name: {{ include "virtual-machine.fullname" . }}
|
||||
{{- if or .Values.cloudInit .Values.sshKeys }}
|
||||
|
||||
{{- if and .Values.sshKeys .Values.cloudInit }}
|
||||
- name: cloudinitdisk
|
||||
cloudInitNoCloud:
|
||||
secretRef:
|
||||
name: {{ include "virtual-machine.fullname" . }}-cloud-init
|
||||
{{- else if .Values.sshKeys }}
|
||||
- name: cloudinitdisk
|
||||
cloudInitNoCloud:
|
||||
userData: |
|
||||
{{ printf "%s" "#cloud-config" }}
|
||||
ssh_authorized_keys:
|
||||
{{- range .Values.sshKeys }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
chpasswd:
|
||||
expire: false
|
||||
{{- else }}
|
||||
- name: cloudinitdisk
|
||||
cloudInitNoCloud:
|
||||
userData: |
|
||||
{{ printf "%s" "#cloud-config" }}
|
||||
{{- end }}
|
||||
|
||||
networks:
|
||||
|
||||
@@ -17,10 +17,10 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.10.0
|
||||
version: 0.9.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: 0.10.0
|
||||
appVersion: 0.8.0
|
||||
|
||||
@@ -3,13 +3,6 @@ kind: Role
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
resourceNames:
|
||||
- {{ include "virtual-machine.fullname" . }}
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups:
|
||||
- cozystack.io
|
||||
resources:
|
||||
|
||||
@@ -9,7 +9,7 @@ stringData:
|
||||
key{{ $k }}: {{ quote $v }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if or .Values.cloudInit .Values.sshKeys }}
|
||||
{{- if .Values.cloudInit }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
@@ -17,17 +17,5 @@ metadata:
|
||||
name: {{ include "virtual-machine.fullname" . }}-cloud-init
|
||||
stringData:
|
||||
userdata: |
|
||||
{{- if .Values.cloudInit }}
|
||||
{{- .Values.cloudInit | nindent 4 }}
|
||||
{{- else if and (.Values.sshKeys) (not .Values.cloudInit) }}
|
||||
{{- /*
|
||||
We usually provide ssh keys in cloud-init metadata, because userdata it not typed and can be used for any purpose.
|
||||
However, if user provides ssh keys but not cloud-init, we still need to provide a minimal cloud-init config to avoid errors.
|
||||
*/}}
|
||||
#cloud-config
|
||||
ssh_authorized_keys:
|
||||
{{- range .Values.sshKeys }}
|
||||
- {{ quote . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- .Values.cloudInit | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -54,24 +54,24 @@ spec:
|
||||
disks:
|
||||
{{- range $i, $disk := .Values.disks }}
|
||||
- name: disk-{{ $disk.name }}
|
||||
{{- $dv := lookup "cdi.kubevirt.io/v1beta1" "DataVolume" $.Release.Namespace (printf "vm-disk-%s" $disk.name) }}
|
||||
{{- if $dv }}
|
||||
{{- if and (hasKey $dv.metadata.annotations "vm-disk.cozystack.io/optical") (eq (index $dv.metadata.annotations "vm-disk.cozystack.io/optical") "true") }}
|
||||
cdrom:
|
||||
{{- $disk := lookup "cdi.kubevirt.io/v1beta1" "DataVolume" $.Release.Namespace (printf "vm-disk-%s" $disk.name) }}
|
||||
{{- if $disk }}
|
||||
{{- if and (hasKey $disk.metadata.annotations "vm-disk.cozystack.io/optical") (eq (index $disk.metadata.annotations "vm-disk.cozystack.io/optical") "true") }}
|
||||
cdrom: {}
|
||||
{{- else }}
|
||||
disk:
|
||||
disk: {}
|
||||
{{- end }}
|
||||
{{- if eq $i 0 }}
|
||||
bootOrder: 1
|
||||
{{- end }}
|
||||
{{- with $disk.bus }}
|
||||
bus: {{ . }}
|
||||
{{- end }}
|
||||
bootOrder: {{ add $i 1 }}
|
||||
{{- else }}
|
||||
{{- fail (printf "Specified disk not exists in cluster: %s" .name) }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if or .Values.cloudInit .Values.sshKeys }}
|
||||
{{- if or .Values.sshKeys .Values.cloudInit }}
|
||||
- name: cloudinitdisk
|
||||
disk: {}
|
||||
disk:
|
||||
bus: virtio
|
||||
{{- end }}
|
||||
interfaces:
|
||||
- name: default
|
||||
@@ -95,11 +95,27 @@ spec:
|
||||
dataVolume:
|
||||
name: vm-disk-{{ .name }}
|
||||
{{- end }}
|
||||
{{- if or .Values.cloudInit .Values.sshKeys }}
|
||||
{{- if and .Values.sshKeys .Values.cloudInit }}
|
||||
- name: cloudinitdisk
|
||||
cloudInitNoCloud:
|
||||
secretRef:
|
||||
name: {{ include "virtual-machine.fullname" . }}-cloud-init
|
||||
{{- else if .Values.sshKeys }}
|
||||
- name: cloudinitdisk
|
||||
cloudInitNoCloud:
|
||||
userData: |
|
||||
{{ printf "%s" "#cloud-config" }}
|
||||
ssh_authorized_keys:
|
||||
{{- range .Values.sshKeys }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
chpasswd:
|
||||
expire: false
|
||||
{{- else }}
|
||||
- name: cloudinitdisk
|
||||
cloudInitNoCloud:
|
||||
userData: |
|
||||
{{ printf "%s" "#cloud-config" }}
|
||||
{{- end }}
|
||||
networks:
|
||||
- name: default
|
||||
|
||||
@@ -22,7 +22,6 @@ instanceProfile: ubuntu
|
||||
## disks:
|
||||
## - name: example-system
|
||||
## - name: example-data
|
||||
## bus: sata
|
||||
disks: []
|
||||
|
||||
## @param gpus [array] List of GPUs to attach
|
||||
|
||||
@@ -56,7 +56,7 @@ This setting is ignored if the corresponding `resources` value is set.
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `2` | `2Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
cozystack:
|
||||
image: ghcr.io/cozystack/cozystack/installer:v0.33.1@sha256:03a0002be9cf5926643c295bbf05c3e250401b0f0595b9fcd147d53534f368f5
|
||||
image: ghcr.io/cozystack/cozystack/installer:v0.33.0@sha256:6cdc5d9062b536929152214e8a6a6b8096b64a17592e04a3633f58d21ff43a63
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
e2e:
|
||||
image: ghcr.io/cozystack/cozystack/e2e-sandbox:v0.33.1@sha256:eed183a4104b1c142f6c4a358338749efe73baefddd53d7fe4c7149ecb892ce1
|
||||
image: ghcr.io/cozystack/cozystack/e2e-sandbox:v0.33.0@sha256:fd169ae7ee7b0b10ee34f02353ae96c182ca7b6cede771c8fc6539894416104f
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/matchbox:v0.33.1@sha256:ca3638c620215ace26ace3f7e8b27391847ab2158b5a67f070f43dcbea071532
|
||||
ghcr.io/cozystack/cozystack/matchbox:v0.33.0@sha256:adc133234a48f3496441334348aeab400ee29b8514129c110b892fa1e0dff1d8
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/s3manager:v0.5.0@sha256:b748d9add5fc4080b143d8690ca1ad851d911948ac8eb296dd9005d53d153c05
|
||||
ghcr.io/cozystack/cozystack/s3manager:v0.5.0@sha256:2759763d35ba35144ba10ba4d2b9effd875f4f0d01d9694b010f491ba6eb6d46
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
cozystackAPI:
|
||||
image: ghcr.io/cozystack/cozystack/cozystack-api:v0.33.1@sha256:ee6b71d3ab1c1484490ff1dc57a7df82813c4f18d6393f149d32acf656aa779d
|
||||
image: ghcr.io/cozystack/cozystack/cozystack-api:v0.33.0@sha256:d9bee0e9f73a950784e43d907552c21044d01eed728e1185455308e49d00c00d
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
cozystackController:
|
||||
image: ghcr.io/cozystack/cozystack/cozystack-controller:v0.33.1@sha256:4777488e14f0313b153b153388c78ab89e3a39582c30266f2321704df1976922
|
||||
image: ghcr.io/cozystack/cozystack/cozystack-controller:v0.33.0@sha256:a1fceb277007846bc85ceee0afd1f5d1122496174203c718c1275a1038cb07f6
|
||||
debug: false
|
||||
disableTelemetry: false
|
||||
cozystackVersion: "v0.33.1"
|
||||
cozystackVersion: "v0.33.0"
|
||||
|
||||
@@ -76,7 +76,7 @@ data:
|
||||
"kubeappsNamespace": {{ .Release.Namespace | quote }},
|
||||
"helmGlobalNamespace": {{ include "kubeapps.helmGlobalPackagingNamespace" . | quote }},
|
||||
"carvelGlobalNamespace": {{ .Values.kubeappsapis.pluginConfig.kappController.packages.v1alpha1.globalPackagingNamespace | quote }},
|
||||
"appVersion": "v0.33.1",
|
||||
"appVersion": "v0.33.0",
|
||||
"authProxyEnabled": {{ .Values.authProxy.enabled }},
|
||||
"oauthLoginURI": {{ .Values.authProxy.oauthLoginURI | quote }},
|
||||
"oauthLogoutURI": {{ .Values.authProxy.oauthLogoutURI | quote }},
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
FROM bitnami/node:20.15.1 AS build
|
||||
WORKDIR /app
|
||||
|
||||
ARG COMMIT_REF=e1382f51c6db1bca0a8ecd454407c8e282fe0243
|
||||
ARG COMMIT_REF=6856b66f9244ef1b2703a2f30899366e0ba040de
|
||||
RUN wget -O- https://github.com/cozystack/kubeapps/archive/${COMMIT_REF}.tar.gz | tar xzf - --strip-components=2 kubeapps-${COMMIT_REF}/dashboard
|
||||
|
||||
RUN yarn install --frozen-lockfile
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
# syntax = docker/dockerfile:1
|
||||
|
||||
FROM alpine AS source
|
||||
ARG COMMIT_REF=e1382f51c6db1bca0a8ecd454407c8e282fe0243
|
||||
ARG COMMIT_REF=6856b66f9244ef1b2703a2f30899366e0ba040de
|
||||
RUN apk add --no-cache patch
|
||||
WORKDIR /source
|
||||
RUN wget -O- https://github.com/cozystack/kubeapps/archive/${COMMIT_REF}.tar.gz | tar xzf - --strip-components=1
|
||||
|
||||
@@ -19,7 +19,7 @@ kubeapps:
|
||||
image:
|
||||
registry: ghcr.io/cozystack/cozystack
|
||||
repository: dashboard
|
||||
tag: v0.33.1
|
||||
tag: v0.33.0
|
||||
digest: "sha256:5e514516bd3dc0c693bb346ddeb9740e0439a59deb2a56b87317286e3ce79ac9"
|
||||
redis:
|
||||
master:
|
||||
@@ -37,8 +37,8 @@ kubeapps:
|
||||
image:
|
||||
registry: ghcr.io/cozystack/cozystack
|
||||
repository: kubeapps-apis
|
||||
tag: v0.33.1
|
||||
digest: "sha256:ea5b21a27c97b14880042d2a642670e3461e7d946c65b5b557d2eb8df9f03a87"
|
||||
tag: v0.33.0
|
||||
digest: "sha256:8c60134b9216e0cd8ffc044c14c872b76c1a95879b4cf7887541980ade9e8c65"
|
||||
pluginConfig:
|
||||
flux:
|
||||
packages:
|
||||
|
||||
@@ -3,7 +3,7 @@ kamaji:
|
||||
deploy: false
|
||||
image:
|
||||
pullPolicy: IfNotPresent
|
||||
tag: v0.33.1@sha256:09fc5c9aeb97880780abfc6d82c216725d6f79e13494bf2399766c882b88f66b
|
||||
tag: v0.33.0@sha256:afaf5f003eb990377c21623d17bb00e7a95a1021e1c36b318cb451b80c8d37a2
|
||||
repository: ghcr.io/cozystack/cozystack/kamaji
|
||||
resources:
|
||||
limits:
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
portSecurity: true
|
||||
routes: ""
|
||||
image: ghcr.io/cozystack/cozystack/kubeovn-webhook:v0.33.1@sha256:595851560856e3ba7f408f259acf84599494984a9f0252de289bcb1a7fc5b9da
|
||||
image: ghcr.io/cozystack/cozystack/kubeovn-webhook:v0.33.0@sha256:926fa45edd2149e4bc4bb54710832c8fb7aa46c85cf6adb7cd486e0b956cdbfa
|
||||
|
||||
@@ -64,4 +64,4 @@ global:
|
||||
images:
|
||||
kubeovn:
|
||||
repository: kubeovn
|
||||
tag: v1.13.13@sha256:c0ffc9a0498b6f8fc392f8fc6ea43d0c7eedeeabda8ef96bca004ec4466a6bf2
|
||||
tag: v1.13.13@sha256:6315d11876b78f3c24e54a73063d05c63137c4210dcd7620bd983db5fedf469a
|
||||
|
||||
@@ -163,7 +163,7 @@ spec:
|
||||
privileged: true
|
||||
allowPrivilegeEscalation: true
|
||||
imagePullPolicy: Always
|
||||
image: {{ .Values.csiDriver.image }}
|
||||
image: ghcr.io/kvaps/test:kubevirt-csi-driver
|
||||
args:
|
||||
- "--endpoint=unix:/csi/csi.sock"
|
||||
- "--node-name=$(KUBE_NODE_NAME)"
|
||||
|
||||
@@ -1,3 +1 @@
|
||||
storageClass: replicated
|
||||
csiDriver:
|
||||
image: ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.25.1@sha256:445c2727b04ac68595b43c988ff17b3d69a7b22b0644fde3b10c65b47a7bc036
|
||||
|
||||
@@ -236,15 +236,6 @@ func (o *AppsServerOptions) Config() (*apiserver.Config, error) {
|
||||
},
|
||||
}
|
||||
|
||||
// make `.spec` schemaless so any keys are accepted
|
||||
if specProp, ok := newDef.Properties["spec"]; ok {
|
||||
specProp.AdditionalProperties = &spec.SchemaOrBool{
|
||||
Allows: true,
|
||||
Schema: &spec.Schema{},
|
||||
}
|
||||
newDef.Properties["spec"] = specProp
|
||||
}
|
||||
|
||||
// 3. Save the new resource definition under the correct name
|
||||
defs[resourceName] = *newDef
|
||||
klog.V(6).Infof("PostProcessSpec: Added OpenAPI definition for %s\n", resourceName)
|
||||
|
||||
@@ -76,7 +76,6 @@ type REST struct {
|
||||
gvr schema.GroupVersionResource
|
||||
gvk schema.GroupVersionKind
|
||||
kindName string
|
||||
singularName string
|
||||
releaseConfig config.ReleaseConfig
|
||||
}
|
||||
|
||||
@@ -94,7 +93,6 @@ func NewREST(dynamicClient dynamic.Interface, config *config.Resource) *REST {
|
||||
Version: "v1alpha1",
|
||||
}.WithKind(config.Application.Kind),
|
||||
kindName: config.Application.Kind,
|
||||
singularName: config.Application.Singular,
|
||||
releaseConfig: config.Release,
|
||||
}
|
||||
}
|
||||
@@ -106,7 +104,7 @@ func (r *REST) NamespaceScoped() bool {
|
||||
|
||||
// GetSingularName returns the singular name of the resource
|
||||
func (r *REST) GetSingularName() string {
|
||||
return r.singularName
|
||||
return r.gvr.Resource
|
||||
}
|
||||
|
||||
// Create handles the creation of a new Application by converting it to a HelmRelease
|
||||
@@ -425,15 +423,6 @@ func (r *REST) Update(ctx context.Context, name string, objInfo rest.UpdatedObje
|
||||
return nil, false, fmt.Errorf("conversion error: %v", err)
|
||||
}
|
||||
|
||||
// Ensure ResourceVersion
|
||||
if helmRelease.ResourceVersion == "" {
|
||||
cur, err := r.dynamicClient.Resource(helmReleaseGVR).Namespace(helmRelease.Namespace).Get(ctx, helmRelease.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("failed to fetch current HelmRelease: %w", err)
|
||||
}
|
||||
helmRelease.SetResourceVersion(cur.GetResourceVersion())
|
||||
}
|
||||
|
||||
// Merge system labels (from config) directly
|
||||
helmRelease.Labels = mergeMaps(r.releaseConfig.Labels, helmRelease.Labels)
|
||||
// Merge user labels with prefix
|
||||
|
||||
Reference in New Issue
Block a user