Compare commits

..

1 Commits

Author SHA1 Message Date
Ahmad Murzahmatov
47dd7d19f8 [tests] Run tests w/ requests and limits
Signed-off-by: Ahmad Murzahmatov <gwynbleidd2106@yandex.com>
2025-07-09 20:08:39 +06:00
30 changed files with 317 additions and 354 deletions

View File

@@ -118,7 +118,6 @@ jobs:
git config user.name "cozystack-bot"
git config user.email "217169706+cozystack-bot@users.noreply.github.com"
git remote set-url origin https://cozystack-bot:${GH_PAT}@github.com/${GITHUB_REPOSITORY}
git config --unset-all http.https://github.com/.extraheader || true
git add .
git commit -m "Prepare release ${GITHUB_REF#refs/tags/}" -s || echo "No changes to commit"
git push origin HEAD || true

View File

@@ -2,6 +2,18 @@
@test "Create DB ClickHouse" {
name='test'
withResources='true'
if [ "$withResources" == 'true' ]; then
resources=$(cat <<EOF
resources:
resources:
cpu: 500m
memory: 768Mi
EOF
)
else
resources=' resources: {}'
fi
kubectl apply -f- <<EOF
apiVersion: apps.cozystack.io/v1alpha1
kind: ClickHouse
@@ -27,15 +39,13 @@ spec:
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
resources: {}
$resources
resourcesPreset: "nano"
EOF
sleep 5
kubectl -n tenant-test wait hr clickhouse-$name --timeout=20s --for=condition=ready
timeout 180 sh -ec "until kubectl -n tenant-test get svc chendpoint-clickhouse-$name -o jsonpath='{.spec.ports[*].port}' | grep -q '8123 9000'; do sleep 10; done"
kubectl -n tenant-test wait statefulset.apps/chi-clickhouse-$name-clickhouse-0-0 --timeout=120s --for=jsonpath='{.status.replicas}'=1
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints chi-clickhouse-$name-clickhouse-0-0 -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
timeout 100 sh -ec "until kubectl -n tenant-test get svc chi-clickhouse-$name-clickhouse-0-0 -o jsonpath='{.spec.ports[*].port}' | grep -q '9000 8123 9009'; do sleep 10; done"
timeout 80 sh -ec "until kubectl -n tenant-test get sts chi-clickhouse-$name-clickhouse-0-1 ; do sleep 10; done"
kubectl -n tenant-test wait statefulset.apps/chi-clickhouse-$name-clickhouse-0-1 --timeout=140s --for=jsonpath='{.status.replicas}'=1
kubectl -n tenant-test wait --timeout=40s hr clickhouse-$name --for=condition=ready
kubectl -n tenant-test wait --timeout=130s clickhouses $name --for=condition=ready
kubectl -n tenant-test wait --timeout=120s sts chi-clickhouse-$name-clickhouse-0-0 --for=jsonpath='{.status.replicas}'=1
timeout 210 sh -ec "until kubectl -n tenant-test wait svc chendpoint-clickhouse-$name --for=jsonpath='{.spec.ports[0].port}'=8123; do sleep 10; done"
kubectl -n tenant-test delete clickhouse.apps.cozystack.io $name
}

View File

@@ -2,6 +2,18 @@
@test "Create Kafka" {
name='test'
withResources='true'
if [ "$withResources" == 'true' ]; then
resources=$(cat <<EOF
resources:
resources:
cpu: 500m
memory: 768Mi
EOF
)
else
resources='resources: {}'
fi
kubectl apply -f- <<EOF
apiVersion: apps.cozystack.io/v1alpha1
kind: Kafka
@@ -14,13 +26,13 @@ spec:
size: 10Gi
replicas: 2
storageClass: ""
resources: {}
$resources
resourcesPreset: "nano"
zookeeper:
size: 5Gi
replicas: 2
storageClass: ""
resources:
$resources
resourcesPreset: "nano"
topics:
- name: testResults
@@ -38,14 +50,9 @@ spec:
replicas: 2
EOF
sleep 5
kubectl -n tenant-test wait hr kafka-$name --timeout=30s --for=condition=ready
kubectl wait kafkas -n tenant-test test --timeout=60s --for=condition=ready
timeout 60 sh -ec "until kubectl -n tenant-test get pvc data-kafka-$name-zookeeper-0; do sleep 10; done"
kubectl -n tenant-test wait pvc data-kafka-$name-zookeeper-0 --timeout=50s --for=jsonpath='{.status.phase}'=Bound
timeout 40 sh -ec "until kubectl -n tenant-test get svc kafka-$name-zookeeper-client -o jsonpath='{.spec.ports[0].port}' | grep -q '2181'; do sleep 10; done"
timeout 40 sh -ec "until kubectl -n tenant-test get svc kafka-$name-zookeeper-nodes -o jsonpath='{.spec.ports[*].port}' | grep -q '2181 2888 3888'; do sleep 10; done"
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints kafka-$name-zookeeper-nodes -o jsonpath='{.subsets[*].addresses[0].ip}' | grep -q '[0-9]'; do sleep 10; done"
kubectl -n tenant-test wait --timeout=30s hr kafka-$name --for=condition=ready
kubectl -n tenant-test wait --timeout=1m kafkas $name --for=condition=ready
kubectl -n tenant-test wait --timeout=50s pvc data-kafka-$name-zookeeper-0 --for=jsonpath='{.status.phase}'=Bound
kubectl -n tenant-test wait --timeout=40s svc kafka-$name-zookeeper-client --for=jsonpath='{.spec.ports[0].port}'=2181
kubectl -n tenant-test delete kafka.apps.cozystack.io $name
kubectl -n tenant-test delete pvc data-kafka-$name-zookeeper-0
kubectl -n tenant-test delete pvc data-kafka-$name-zookeeper-1
}

View File

@@ -1,16 +1,17 @@
#!/usr/bin/env bats
@test "Create a tenant Kubernetes control plane" {
name='test'
kubectl apply -f - <<EOF
apiVersion: apps.cozystack.io/v1alpha1
kind: Kubernetes
metadata:
name: test
name: $name
namespace: tenant-test
spec:
addons:
certManager:
enabled: false
enabled: true
valuesOverride: {}
cilium:
valuesOverride: {}
@@ -24,10 +25,12 @@ spec:
valuesOverride: {}
ingressNginx:
enabled: true
hosts: []
hosts:
- example.org
exposeMethod: Proxied
valuesOverride: {}
monitoringAgents:
enabled: false
enabled: true
valuesOverride: {}
verticalPodAutoscaler:
valuesOverride: {}
@@ -61,12 +64,39 @@ spec:
- ingress-nginx
storageClass: replicated
EOF
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
timeout 10 sh -ec 'until kubectl get kamajicontrolplane -n tenant-test kubernetes-test; do sleep 1; done'
kubectl wait --for=condition=TenantControlPlaneCreated kamajicontrolplane -n tenant-test kubernetes-test --timeout=4m
kubectl wait tcp -n tenant-test kubernetes-test --timeout=2m --for=jsonpath='{.status.kubernetesResources.version.status}'=Ready
kubectl wait deploy --timeout=4m --for=condition=available -n tenant-test kubernetes-test kubernetes-test-cluster-autoscaler kubernetes-test-kccm kubernetes-test-kcsi-controller
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=1m --for=jsonpath='{.status.replicas}'=2
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=10m --for=jsonpath='{.status.v1beta2.readyReplicas}'=2
kubectl -n tenant-test delete kuberneteses.apps.cozystack.io test
sleep 10
kubectl wait --timeout=20s namespace tenant-test --for=jsonpath='{.status.phase}'=Active
kubectl -n tenant-test wait --timeout=10s kamajicontrolplane kubernetes-$name --for=jsonpath='{.status.conditions[0].status}'=True
kubectl -n tenant-test wait --timeout=4m kamajicontrolplane kubernetes-$name --for=condition=TenantControlPlaneCreated
kubectl -n tenant-test wait --timeout=210s tcp kubernetes-$name --for=jsonpath='{.status.kubernetesResources.version.status}'=Ready
kubectl -n tenant-test wait --timeout=4m deploy kubernetes-$name kubernetes-$name-cluster-autoscaler kubernetes-$name-kccm kubernetes-$name-kcsi-controller --for=condition=available
kubectl -n tenant-test wait --timeout=1m machinedeployment kubernetes-$name-md0 --for=jsonpath='{.status.replicas}'=2
kubectl -n tenant-test wait --timeout=10m machinedeployment kubernetes-$name-md0 --for=jsonpath='{.status.v1beta2.readyReplicas}'=2
# ingress / load balancer
kubectl -n tenant-test wait --timeout=5m hr kubernetes-$name-monitoring-agents --for=condition=ready
kubectl -n tenant-test wait --timeout=5m hr kubernetes-$name-ingress-nginx --for=condition=ready
kubectl -n tenant-test get secret kubernetes-$name-admin-kubeconfig -o go-template='{{ printf "%s\n" (index .data "admin.conf" | base64decode) }}' > admin.conf
KUBECONFIG=admin.conf kubectl -n cozy-ingress-nginx wait --timeout=3m deploy ingress-nginx-defaultbackend --for=jsonpath='{.status.conditions[0].status}'=True
KUBECONFIG=admin.conf kubectl -n cozy-monitoring wait --timeout=3m deploy cozy-monitoring-agents-metrics-server --for=jsonpath='{.status.conditions[0].status}'=True
}
@test "Create a PVC in tenant Kubernetes" {
name='test'
KUBECONFIG=admin.conf kubectl apply -f - <<EOF
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-$name
namespace: cozy-monitoring
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
EOF
sleep 10
KUBECONFIG=admin.conf kubectl -n cozy-monitoring wait --timeout=20s pvc pvc-$name --for=jsonpath='{.status.phase}'=Bound
KUBECONFIG=admin.conf kubectl -n cozy-monitoring delete pvc pvc-$name
kubectl -n tenant-test delete kuberneteses.apps.cozystack.io $name
}

View File

@@ -2,6 +2,18 @@
@test "Create DB MySQL" {
name='test'
withResources='true'
if [ "$withResources" == 'true' ]; then
resources=$(cat <<EOF
resources:
resources:
cpu: 3000m
memory: 3Gi
EOF
)
else
resources=' resources: {}'
fi
kubectl apply -f- <<EOF
apiVersion: apps.cozystack.io/v1alpha1
kind: MySQL
@@ -31,16 +43,15 @@ spec:
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
resources: {}
$resources
resourcesPreset: "nano"
EOF
sleep 5
kubectl -n tenant-test wait hr mysql-$name --timeout=30s --for=condition=ready
timeout 80 sh -ec "until kubectl -n tenant-test get svc mysql-$name -o jsonpath='{.spec.ports[0].port}' | grep -q '3306'; do sleep 10; done"
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints mysql-$name -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
kubectl -n tenant-test wait statefulset.apps/mysql-$name --timeout=110s --for=jsonpath='{.status.replicas}'=2
timeout 80 sh -ec "until kubectl -n tenant-test get svc mysql-$name-metrics -o jsonpath='{.spec.ports[0].port}' | grep -q '9104'; do sleep 10; done"
timeout 40 sh -ec "until kubectl -n tenant-test get endpoints mysql-$name-metrics -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
kubectl -n tenant-test wait deployment.apps/mysql-$name-metrics --timeout=90s --for=jsonpath='{.status.replicas}'=1
sleep 10
kubectl -n tenant-test wait --timeout=30s hr mysql-$name --for=condition=ready
kubectl -n tenant-test wait --timeout=130s mysqls $name --for=condition=ready
kubectl -n tenant-test wait --timeout=110s sts mysql-$name --for=jsonpath='{.status.replicas}'=2
sleep 60
kubectl -n tenant-test wait --timeout=60s deploy mysql-$name-metrics --for=jsonpath='{.status.replicas}'=1
kubectl -n tenant-test wait --timeout=100s svc mysql-$name --for=jsonpath='{.spec.ports[0].port}'=3306
kubectl -n tenant-test delete mysqls.apps.cozystack.io $name
}

View File

@@ -2,6 +2,18 @@
@test "Create DB PostgreSQL" {
name='test'
withResources='true'
if [ "$withResources" == 'true' ]; then
resources=$(cat <<EOF
resources:
resources:
cpu: 500m
memory: 768Mi
EOF
)
else
resources=' resources: {}'
fi
kubectl apply -f - <<EOF
apiVersion: apps.cozystack.io/v1alpha1
kind: Postgres
@@ -36,19 +48,14 @@ spec:
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
resources: {}
$resources
resourcesPreset: "nano"
EOF
sleep 5
kubectl -n tenant-test wait hr postgres-$name --timeout=100s --for=condition=ready
kubectl -n tenant-test wait job.batch postgres-$name-init-job --timeout=50s --for=condition=Complete
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-r -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-ro -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-rw -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-r -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
# for some reason it takes longer for the read-only endpoint to be ready
#timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-ro -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-rw -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
kubectl -n tenant-test wait --timeout=200s hr postgres-$name --for=condition=ready
kubectl -n tenant-test wait --timeout=130s postgreses $name --for=condition=ready
kubectl -n tenant-test wait --timeout=50s job.batch postgres-$name-init-job --for=condition=Complete
kubectl -n tenant-test wait --timeout=40s svc postgres-$name-r --for=jsonpath='{.spec.ports[0].port}'=5432
kubectl -n tenant-test delete postgreses.apps.cozystack.io $name
kubectl -n tenant-test delete job.batch/postgres-$name-init-job
}

View File

@@ -2,6 +2,18 @@
@test "Create Redis" {
name='test'
withResources='true'
if [ "$withResources" == 'true' ]; then
resources=$(cat <<EOF
resources:
resources:
cpu: 500m
memory: 768Mi
EOF
)
else
resources='resources: {}'
fi
kubectl apply -f- <<EOF
apiVersion: apps.cozystack.io/v1alpha1
kind: Redis
@@ -14,13 +26,15 @@ spec:
replicas: 2
storageClass: ""
authEnabled: true
resources: {}
$resources
resourcesPreset: "nano"
EOF
sleep 5
kubectl -n tenant-test wait hr redis-$name --timeout=20s --for=condition=ready
kubectl -n tenant-test wait pvc redisfailover-persistent-data-rfr-redis-$name-0 --timeout=50s --for=jsonpath='{.status.phase}'=Bound
kubectl -n tenant-test wait deploy rfs-redis-$name --timeout=90s --for=condition=available
kubectl -n tenant-test wait sts rfr-redis-$name --timeout=90s --for=jsonpath='{.status.replicas}'=2
kubectl -n tenant-test wait --timeout=20s hr redis-$name --for=condition=ready
kubectl -n tenant-test wait --timeout=130s redis.apps.cozystack.io $name --for=condition=ready
kubectl -n tenant-test wait --timeout=50s pvc redisfailover-persistent-data-rfr-redis-$name-0 --for=jsonpath='{.status.phase}'=Bound
kubectl -n tenant-test wait --timeout=90s sts rfr-redis-$name --for=jsonpath='{.status.replicas}'=2
sleep 45
kubectl -n tenant-test wait --timeout=45s deploy rfs-redis-$name --for=condition=available
kubectl -n tenant-test delete redis.apps.cozystack.io $name
}

View File

@@ -2,6 +2,14 @@
@test "Create a Virtual Machine" {
name='test'
withResources='true'
if [ "$withResources" == 'true' ]; then
cores="1000m"
memory="1Gi
else
cores="2000m"
memory="2Gi
fi
kubectl apply -f - <<EOF
apiVersion: apps.cozystack.io/v1alpha1
kind: VirtualMachine
@@ -9,6 +17,12 @@ metadata:
name: $name
namespace: tenant-test
spec:
domain:
cpu:
cores: "$cores"
resources:
requests:
memory: "$memory"
external: false
externalMethod: PortList
externalPorts:
@@ -20,9 +34,6 @@ spec:
storage: 5Gi
storageClass: replicated
gpus: []
resources:
cpu: ""
memory: ""
sshKeys:
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF
test@test
@@ -37,11 +48,12 @@ spec:
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF test@test
cloudInitSeed: ""
EOF
sleep 5
kubectl -n tenant-test wait hr virtual-machine-$name --timeout=10s --for=condition=ready
kubectl -n tenant-test wait dv virtual-machine-$name --timeout=150s --for=condition=ready
kubectl -n tenant-test wait pvc virtual-machine-$name --timeout=100s --for=jsonpath='{.status.phase}'=Bound
kubectl -n tenant-test wait vm virtual-machine-$name --timeout=100s --for=condition=ready
timeout 120 sh -ec "until kubectl -n tenant-test get vmi virtual-machine-$name -o jsonpath='{.status.interfaces[0].ipAddress}' | grep -q '[0-9]'; do sleep 10; done"
sleep 10
kubectl -n tenant-test wait --timeout=10s hr virtual-machine-$name --for=condition=ready
kubectl -n tenant-test wait --timeout=130s virtualmachines $name --for=condition=ready
kubectl -n tenant-test wait --timeout=130s pvc virtual-machine-$name --for=jsonpath='{.status.phase}'=Bound
kubectl -n tenant-test wait --timeout=150s dv virtual-machine-$name --for=condition=ready
kubectl -n tenant-test wait --timeout=100s vm virtual-machine-$name --for=condition=ready
kubectl -n tenant-test wait --timeout=150s vmi virtual-machine-$name --for=jsonpath='{status.phase}'=Running
kubectl -n tenant-test delete virtualmachines.apps.cozystack.io $name
}

View File

@@ -17,21 +17,37 @@ spec:
storageClass: replicated
EOF
sleep 5
kubectl -n tenant-test wait hr vm-disk-$name --timeout=5s --for=condition=ready
kubectl -n tenant-test wait dv vm-disk-$name --timeout=150s --for=condition=ready
kubectl -n tenant-test wait pvc vm-disk-$name --timeout=100s --for=jsonpath='{.status.phase}'=Bound
kubectl -n tenant-test wait --timeout=5s hr vm-disk-$name --for=condition=ready
kubectl -n tenant-test wait --timeout=130s vmdisks $name --for=condition=ready
kubectl -n tenant-test wait --timeout=130s pvc vm-disk-$name --for=jsonpath='{.status.phase}'=Bound
kubectl -n tenant-test wait --timeout=150s dv vm-disk-$name --for=condition=ready
}
@test "Create a VM Instance" {
diskName='test'
name='test'
kubectl apply -f - <<EOF
withResources='true'
if [ "$withResources" == 'true' ]; then
cores="1000m"
memory="1Gi
else
cores="2000m"
memory="2Gi
fi
kubectl -n tenant-test get vminstances.apps.cozystack.io $name ||
kubectl create -f - <<EOF
apiVersion: apps.cozystack.io/v1alpha1
kind: VMInstance
metadata:
name: $name
namespace: tenant-test
spec:
domain:
cpu:
cores: "$cores"
resources:
requests:
memory: "$memory"
external: false
externalMethod: PortList
externalPorts:
@@ -42,9 +58,6 @@ spec:
disks:
- name: $diskName
gpus: []
resources:
cpu: ""
memory: ""
sshKeys:
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF
test@test
@@ -60,9 +73,10 @@ spec:
cloudInitSeed: ""
EOF
sleep 5
timeout 20 sh -ec "until kubectl -n tenant-test get vmi vm-instance-$name -o jsonpath='{.status.interfaces[0].ipAddress}' | grep -q '[0-9]'; do sleep 5; done"
kubectl -n tenant-test wait hr vm-instance-$name --timeout=5s --for=condition=ready
kubectl -n tenant-test wait vm vm-instance-$name --timeout=20s --for=condition=ready
kubectl -n tenant-test wait --timeout=5s hr vm-instance-$name --for=condition=ready
kubectl -n tenant-test wait --timeout=130s vminstances $name --for=condition=ready
kubectl -n tenant-test wait --timeout=20s vm vm-instance-$name --for=condition=ready
kubectl -n tenant-test wait --timeout=40s vmi vm-instance-$name --for=jsonpath='{status.phase}'=Running
kubectl -n tenant-test delete vminstances.apps.cozystack.io $name
kubectl -n tenant-test delete vmdisks.apps.cozystack.io $diskName
}

View File

@@ -3,7 +3,6 @@ package controller
import (
"context"
"strings"
"time"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -16,10 +15,6 @@ import (
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
)
const (
deletionRequeueDelay = 30 * time.Second
)
// WorkloadMonitorReconciler reconciles a WorkloadMonitor object
type WorkloadReconciler struct {
client.Client
@@ -57,9 +52,6 @@ func (r *WorkloadReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c
// found object, nothing to do
if err == nil {
if !t.GetDeletionTimestamp().IsZero() {
return ctrl.Result{RequeueAfter: deletionRequeueDelay}, nil
}
return ctrl.Result{}, nil
}

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/nginx-cache:0.6.0@sha256:50ac1581e3100bd6c477a71161cb455a341ffaf9e5e2f6086802e4e25271e8af
ghcr.io/cozystack/cozystack/nginx-cache:0.6.0@sha256:b7633717cd7449c0042ae92d8ca9b36e4d69566561f5c7d44e21058e7d05c6d5

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.25.2@sha256:3a8170433e1632e5cc2b6d9db34d0605e8e6c63c158282c38450415e700e932e
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.25.1@sha256:3a8170433e1632e5cc2b6d9db34d0605e8e6c63c158282c38450415e700e932e

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.25.2@sha256:71f9afa218693a890f827cb5cda98ba327302bd9f58afde767740557538e07d9
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.25.1@sha256:412ed2b3c77249bd1b973e6dc9c87976d31863717fb66ba74ccda573af737eb1

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.25.2@sha256:761e7235ff9cb7f6f223f00954943e6a5af32ed6624ee592a8610122f96febb0
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.25.1@sha256:445c2727b04ac68595b43c988ff17b3d69a7b22b0644fde3b10c65b47a7bc036

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/mariadb-backup:0.9.0@sha256:a3789db9e9e065ff60cbac70771b4a8aa1460db3194307cf5ca5d4fe1b412b6b
ghcr.io/cozystack/cozystack/mariadb-backup:0.9.0@sha256:cfd1c37d8ad24e10681d82d6e6ce8a641b4602c1b0ffa8516ae15b4958bb12d4

View File

@@ -1,2 +1,2 @@
cozystack:
image: ghcr.io/cozystack/cozystack/installer:v0.33.3@sha256:cee50a80af792af7427c5e1e32d6841f62b9eed1a857b41729372ba79bd18e68
image: ghcr.io/cozystack/cozystack/installer:v0.33.1@sha256:03a0002be9cf5926643c295bbf05c3e250401b0f0595b9fcd147d53534f368f5

View File

@@ -1,2 +1,2 @@
e2e:
image: ghcr.io/cozystack/cozystack/e2e-sandbox:v0.33.3@sha256:9baa3c1465133b968e775f29f74c8b569fea29c7e5b490551f0a5baf8b6f1270
image: ghcr.io/cozystack/cozystack/e2e-sandbox:v0.33.1@sha256:eed183a4104b1c142f6c4a358338749efe73baefddd53d7fe4c7149ecb892ce1

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/matchbox:v0.33.3@sha256:2e71760570c40d18b29f74f989f120563acefc77c25c477a1d6722f50794a588
ghcr.io/cozystack/cozystack/matchbox:v0.33.1@sha256:ca3638c620215ace26ace3f7e8b27391847ab2158b5a67f070f43dcbea071532

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/s3manager:v0.5.0@sha256:208d8ea43b4b493ee0bea80606f6b3041a02460be79c52ed12aecccd35ec2a02
ghcr.io/cozystack/cozystack/s3manager:v0.5.0@sha256:b748d9add5fc4080b143d8690ca1ad851d911948ac8eb296dd9005d53d153c05

View File

@@ -1,2 +1,2 @@
cozystackAPI:
image: ghcr.io/cozystack/cozystack/cozystack-api:v0.33.3@sha256:c9d123823e2cfd8900d485401f8ac7b805be248a1a40715e746495739165787b
image: ghcr.io/cozystack/cozystack/cozystack-api:v0.33.1@sha256:ee6b71d3ab1c1484490ff1dc57a7df82813c4f18d6393f149d32acf656aa779d

View File

@@ -1,5 +1,5 @@
cozystackController:
image: ghcr.io/cozystack/cozystack/cozystack-controller:v0.33.3@sha256:3b589fc8a102344d66407d814bdcc807a1a97a42163336bc3b5d0c44b086c128
image: ghcr.io/cozystack/cozystack/cozystack-controller:v0.33.1@sha256:4777488e14f0313b153b153388c78ab89e3a39582c30266f2321704df1976922
debug: false
disableTelemetry: false
cozystackVersion: "v0.33.3"
cozystackVersion: "v0.33.1"

View File

@@ -76,7 +76,7 @@ data:
"kubeappsNamespace": {{ .Release.Namespace | quote }},
"helmGlobalNamespace": {{ include "kubeapps.helmGlobalPackagingNamespace" . | quote }},
"carvelGlobalNamespace": {{ .Values.kubeappsapis.pluginConfig.kappController.packages.v1alpha1.globalPackagingNamespace | quote }},
"appVersion": "v0.33.3",
"appVersion": "v0.33.1",
"authProxyEnabled": {{ .Values.authProxy.enabled }},
"oauthLoginURI": {{ .Values.authProxy.oauthLoginURI | quote }},
"oauthLogoutURI": {{ .Values.authProxy.oauthLogoutURI | quote }},

View File

@@ -19,8 +19,8 @@ kubeapps:
image:
registry: ghcr.io/cozystack/cozystack
repository: dashboard
tag: v0.33.3
digest: "sha256:ac2b5348d85fe37ad70a4cc159881c4eaded9175a4b586cfa09a52b0fbe5e1e5"
tag: v0.33.1
digest: "sha256:5e514516bd3dc0c693bb346ddeb9740e0439a59deb2a56b87317286e3ce79ac9"
redis:
master:
resourcesPreset: "none"
@@ -37,8 +37,8 @@ kubeapps:
image:
registry: ghcr.io/cozystack/cozystack
repository: kubeapps-apis
tag: v0.33.3
digest: "sha256:be25afff8d0f8ae8d2e42a824856e9776db0cee285f6f41514803a4e85e17371"
tag: v0.33.1
digest: "sha256:ea5b21a27c97b14880042d2a642670e3461e7d946c65b5b557d2eb8df9f03a87"
pluginConfig:
flux:
packages:

View File

@@ -3,7 +3,7 @@ kamaji:
deploy: false
image:
pullPolicy: IfNotPresent
tag: v0.33.3@sha256:09465ae8285b4ae43203581e443409cd4e1e119dde62a5c14d63ce064fb840b0
tag: v0.33.1@sha256:09fc5c9aeb97880780abfc6d82c216725d6f79e13494bf2399766c882b88f66b
repository: ghcr.io/cozystack/cozystack/kamaji
resources:
limits:

View File

@@ -1,3 +1,3 @@
portSecurity: true
routes: ""
image: ghcr.io/cozystack/cozystack/kubeovn-webhook:v0.33.3@sha256:c7f42022280a565da8b3091ed2f4fe2768fcd392327d23172a532c24794787c6
image: ghcr.io/cozystack/cozystack/kubeovn-webhook:v0.33.1@sha256:595851560856e3ba7f408f259acf84599494984a9f0252de289bcb1a7fc5b9da

View File

@@ -64,4 +64,4 @@ global:
images:
kubeovn:
repository: kubeovn
tag: v1.13.13@sha256:f8edb9f98fe64daf5e3634b8844cea7747e197f3a345d6fe8be6f1d336154cfb
tag: v1.13.13@sha256:c0ffc9a0498b6f8fc392f8fc6ea43d0c7eedeeabda8ef96bca004ec4466a6bf2

View File

@@ -1,3 +1,3 @@
storageClass: replicated
csiDriver:
image: ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.25.2@sha256:761e7235ff9cb7f6f223f00954943e6a5af32ed6624ee592a8610122f96febb0
image: ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.25.1@sha256:445c2727b04ac68595b43c988ff17b3d69a7b22b0644fde3b10c65b47a7bc036

View File

@@ -1,221 +0,0 @@
package server
import (
"encoding/json"
"fmt"
"strings"
"k8s.io/kube-openapi/pkg/spec3"
"k8s.io/kube-openapi/pkg/validation/spec"
)
// -----------------------------------------------------------------------------
// shared helpers
// -----------------------------------------------------------------------------
const (
baseRef = "com.github.cozystack.cozystack.pkg.apis.apps.v1alpha1.Application"
baseListRef = baseRef + "List"
smp = "application/strategic-merge-patch+json"
)
func deepCopySchema(in *spec.Schema) *spec.Schema {
if in == nil {
return nil
}
b, err := json.Marshal(in)
if err != nil {
// Log error or panic since this is unexpected
panic(fmt.Sprintf("failed to marshal schema: %v", err))
}
var out spec.Schema
if err := json.Unmarshal(b, &out); err != nil {
panic(fmt.Sprintf("failed to unmarshal schema: %v", err))
}
return &out
}
// find the object that already owns ".spec"
func findSpecContainer(s *spec.Schema) *spec.Schema {
if s == nil {
return nil
}
if len(s.Type) > 0 && s.Type.Contains("object") && s.Properties != nil {
if _, ok := s.Properties["spec"]; ok {
return s
}
}
for _, branch := range [][]spec.Schema{s.AllOf, s.OneOf, s.AnyOf} {
for i := range branch {
if res := findSpecContainer(&branch[i]); res != nil {
return res
}
}
}
return nil
}
// apply user-supplied schema; when raw == "" turn the field into a schemaless object
func patchSpec(target *spec.Schema, raw string) error {
// ------------------------------------------------------------------
// 1) schema not provided → make ".spec" a fully open object
// ------------------------------------------------------------------
if strings.TrimSpace(raw) == "" {
if target.Properties == nil {
target.Properties = map[string]spec.Schema{}
}
prop := target.Properties["spec"]
prop.AdditionalProperties = &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{},
}
target.Properties["spec"] = prop
return nil
}
// ------------------------------------------------------------------
// 2) custom schema provided → keep / inject additionalProperties
// ------------------------------------------------------------------
var custom spec.Schema
if err := json.Unmarshal([]byte(raw), &custom); err != nil {
return err
}
// if user didn't specify additionalProperties, add a permissive one
if custom.AdditionalProperties == nil {
custom.AdditionalProperties = &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{},
}
}
if target.Properties == nil {
target.Properties = map[string]spec.Schema{}
}
target.Properties["spec"] = custom
return nil
}
// -----------------------------------------------------------------------------
// OpenAPI **v3** post-processor
// -----------------------------------------------------------------------------
func buildPostProcessV3(kindSchemas map[string]string) func(*spec3.OpenAPI) (*spec3.OpenAPI, error) {
return func(doc *spec3.OpenAPI) (*spec3.OpenAPI, error) {
// Replace the basic "Application" schema with the user-supplied kinds.
if doc.Components == nil {
doc.Components = &spec3.Components{}
}
if doc.Components.Schemas == nil {
doc.Components.Schemas = map[string]*spec.Schema{}
}
base, ok := doc.Components.Schemas[baseRef]
if !ok {
return doc, fmt.Errorf("base schema %q not found", baseRef)
}
for kind, raw := range kindSchemas {
ref := fmt.Sprintf("%s.%s", "com.github.cozystack.cozystack.pkg.apis.apps.v1alpha1", kind)
s := doc.Components.Schemas[ref]
if s == nil { // first time clone "Application"
s = deepCopySchema(base)
s.Extensions = map[string]interface{}{
"x-kubernetes-group-version-kind": []interface{}{
map[string]interface{}{
"group": "apps.cozystack.io", "version": "v1alpha1", "kind": kind,
},
},
}
doc.Components.Schemas[ref] = s
}
container := findSpecContainer(s)
if container == nil { // fallback: use the root
container = s
}
if err := patchSpec(container, raw); err != nil {
return nil, fmt.Errorf("kind %s: %w", kind, err)
}
}
delete(doc.Components.Schemas, baseRef)
delete(doc.Components.Schemas, baseListRef)
// Disable strategic-merge-patch+json support in all PATCH operations
for p, pi := range doc.Paths.Paths {
if pi == nil || pi.Patch == nil || pi.Patch.RequestBody == nil {
continue
}
delete(pi.Patch.RequestBody.Content, smp)
doc.Paths.Paths[p] = pi
}
return doc, nil
}
}
// -----------------------------------------------------------------------------
// OpenAPI **v2** (swagger) post-processor
// -----------------------------------------------------------------------------
func buildPostProcessV2(kindSchemas map[string]string) func(*spec.Swagger) (*spec.Swagger, error) {
return func(sw *spec.Swagger) (*spec.Swagger, error) {
// Replace the basic "Application" schema with the user-supplied kinds.
defs := sw.Definitions
base, ok := defs[baseRef]
if !ok {
return sw, fmt.Errorf("base schema %q not found", baseRef)
}
for kind, raw := range kindSchemas {
ref := fmt.Sprintf("%s.%s", "com.github.cozystack.cozystack.pkg.apis.apps.v1alpha1", kind)
s := deepCopySchema(&base)
s.Extensions = map[string]interface{}{
"x-kubernetes-group-version-kind": []interface{}{
map[string]interface{}{
"group": "apps.cozystack.io", "version": "v1alpha1", "kind": kind,
},
},
}
if err := patchSpec(s, raw); err != nil {
return nil, fmt.Errorf("kind %s: %w", kind, err)
}
defs[ref] = *s
// clone the List variant
listName := ref + "List"
listSrc := defs[baseListRef]
listCopy := deepCopySchema(&listSrc)
listCopy.Extensions = map[string]interface{}{
"x-kubernetes-group-version-kind": []interface{}{
map[string]interface{}{
"group": "apps.cozystack.io",
"version": "v1alpha1",
"kind": kind + "List",
},
},
}
if items := listCopy.Properties["items"]; items.Items != nil && items.Items.Schema != nil {
items.Items.Schema.Ref = spec.MustCreateRef("#/definitions/" + ref)
listCopy.Properties["items"] = items
}
defs[listName] = *listCopy
}
delete(defs, baseRef)
delete(defs, baseListRef)
// Disable strategic-merge-patch+json support in all PATCH operations
for p, op := range sw.Paths.Paths {
if op.Patch != nil && len(op.Patch.Consumes) > 0 {
var out []string
for _, c := range op.Patch.Consumes {
if c != smp {
out = append(out, c)
}
}
op.Patch.Consumes = out
sw.Paths.Paths[p] = op
}
}
return sw, nil
}
}

View File

@@ -18,8 +18,6 @@ package server
import (
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"io"
@@ -40,6 +38,8 @@ import (
utilversionpkg "k8s.io/apiserver/pkg/util/version"
"k8s.io/component-base/featuregate"
baseversion "k8s.io/component-base/version"
"k8s.io/klog/v2"
"k8s.io/kube-openapi/pkg/validation/spec"
netutils "k8s.io/utils/net"
)
@@ -159,6 +159,22 @@ func (o AppsServerOptions) Validate(args []string) error {
return utilerrors.NewAggregate(allErrors)
}
// DeepCopySchema делает глубокую копию структуры spec.Schema
func DeepCopySchema(schema *spec.Schema) (*spec.Schema, error) {
data, err := json.Marshal(schema)
if err != nil {
return nil, fmt.Errorf("failed to marshal schema: %w", err)
}
var newSchema spec.Schema
err = json.Unmarshal(data, &newSchema)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal schema: %w", err)
}
return &newSchema, nil
}
// Config returns the configuration for the API server based on AppsServerOptions
func (o *AppsServerOptions) Config() (*apiserver.Config, error) {
// TODO: set the "real" external address
@@ -179,34 +195,107 @@ func (o *AppsServerOptions) Config() (*apiserver.Config, error) {
serverConfig.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(
sampleopenapi.GetOpenAPIDefinitions, openapi.NewDefinitionNamer(apiserver.Scheme),
)
version := "0.1"
if o.ResourceConfig != nil {
raw, err := json.Marshal(o.ResourceConfig)
if err != nil {
return nil, fmt.Errorf("failed to marshal resource config: %v", err)
}
sum := sha256.Sum256(raw)
version = "0.1-" + hex.EncodeToString(sum[:8])
}
// capture schemas from config once for fast lookup inside the closure
kindSchemas := map[string]string{}
for _, r := range o.ResourceConfig.Resources {
kindSchemas[r.Application.Kind] = r.Application.OpenAPISchema
}
serverConfig.OpenAPIConfig.Info.Title = "Apps"
serverConfig.OpenAPIConfig.Info.Version = version
serverConfig.OpenAPIConfig.PostProcessSpec = buildPostProcessV2(kindSchemas)
serverConfig.OpenAPIConfig.Info.Version = "0.1"
serverConfig.OpenAPIConfig.PostProcessSpec = func(swagger *spec.Swagger) (*spec.Swagger, error) {
defs := swagger.Definitions
// Verify the presence of the base Application/ApplicationList definitions
appDef, exists := defs["com.github.cozystack.cozystack.pkg.apis.apps.v1alpha1.Application"]
if !exists {
return swagger, fmt.Errorf("Application definition not found")
}
listDef, exists := defs["com.github.cozystack.cozystack.pkg.apis.apps.v1alpha1.ApplicationList"]
if !exists {
return swagger, fmt.Errorf("ApplicationList definition not found")
}
// Iterate over all registered GVKs (e.g., Bucket, Database, etc.)
for _, gvk := range v1alpha1.RegisteredGVKs {
// This will be something like:
// "com.github.cozystack.cozystack.pkg.apis.apps.v1alpha1.Bucket"
resourceName := fmt.Sprintf("com.github.cozystack.cozystack.pkg.apis.apps.v1alpha1.%s", gvk.Kind)
// 1. Create a copy of the base Application definition for the new resource
newDef, err := DeepCopySchema(&appDef)
if err != nil {
return nil, fmt.Errorf("failed to deepcopy schema for %s: %w", gvk.Kind, err)
}
// 2. Update x-kubernetes-group-version-kind to match the new resource
if newDef.Extensions == nil {
newDef.Extensions = map[string]interface{}{}
}
newDef.Extensions["x-kubernetes-group-version-kind"] = []map[string]interface{}{
{
"group": gvk.Group,
"version": gvk.Version,
"kind": gvk.Kind,
},
}
// make `.spec` schemaless so any keys are accepted
if specProp, ok := newDef.Properties["spec"]; ok {
specProp.AdditionalProperties = &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{},
}
newDef.Properties["spec"] = specProp
}
// 3. Save the new resource definition under the correct name
defs[resourceName] = *newDef
klog.V(6).Infof("PostProcessSpec: Added OpenAPI definition for %s\n", resourceName)
// 4. Now handle the corresponding List type (e.g., BucketList).
// We'll start by copying the ApplicationList definition.
listResourceName := fmt.Sprintf("com.github.cozystack.cozystack.pkg.apis.apps.v1alpha1.%sList", gvk.Kind)
newListDef, err := DeepCopySchema(&listDef)
if err != nil {
return nil, fmt.Errorf("failed to deepcopy schema for %sList: %w", gvk.Kind, err)
}
// 5. Update x-kubernetes-group-version-kind for the List definition
if newListDef.Extensions == nil {
newListDef.Extensions = map[string]interface{}{}
}
newListDef.Extensions["x-kubernetes-group-version-kind"] = []map[string]interface{}{
{
"group": gvk.Group,
"version": gvk.Version,
"kind": fmt.Sprintf("%sList", gvk.Kind),
},
}
// 6. IMPORTANT: Fix the "items" reference so it points to the new resource
// rather than to "Application".
if itemsProp, found := newListDef.Properties["items"]; found {
if itemsProp.Items != nil && itemsProp.Items.Schema != nil {
itemsProp.Items.Schema.Ref = spec.MustCreateRef("#/definitions/" + resourceName)
newListDef.Properties["items"] = itemsProp
}
}
// 7. Finally, save the new List definition
defs[listResourceName] = *newListDef
klog.V(6).Infof("PostProcessSpec: Added OpenAPI definition for %s\n", listResourceName)
}
// Remove the original Application/ApplicationList from the definitions
delete(defs, "com.github.cozystack.cozystack.pkg.apis.apps.v1alpha1.Application")
delete(defs, "com.github.cozystack.cozystack.pkg.apis.apps.v1alpha1.ApplicationList")
swagger.Definitions = defs
return swagger, nil
}
serverConfig.OpenAPIV3Config = genericapiserver.DefaultOpenAPIV3Config(
sampleopenapi.GetOpenAPIDefinitions, openapi.NewDefinitionNamer(apiserver.Scheme),
)
serverConfig.OpenAPIV3Config.Info.Title = "Apps"
serverConfig.OpenAPIV3Config.Info.Version = version
serverConfig.OpenAPIV3Config.PostProcessSpec = buildPostProcessV3(kindSchemas)
serverConfig.OpenAPIV3Config.Info.Version = "0.1"
serverConfig.FeatureGate = utilversionpkg.DefaultComponentGlobalsRegistry.FeatureGateFor(
utilversionpkg.DefaultKubeComponent,

View File

@@ -36,11 +36,10 @@ type Resource struct {
// ApplicationConfig contains the application settings.
type ApplicationConfig struct {
Kind string `yaml:"kind"`
Singular string `yaml:"singular"`
Plural string `yaml:"plural"`
ShortNames []string `yaml:"shortNames"`
OpenAPISchema string `yaml:"openAPISchema"`
Kind string `yaml:"kind"`
Singular string `yaml:"singular"`
Plural string `yaml:"plural"`
ShortNames []string `yaml:"shortNames"`
}
// ReleaseConfig contains the release settings.