mirror of
https://github.com/outbackdingo/cozystack.git
synced 2026-01-28 18:18:41 +00:00
Compare commits
14 Commits
v0.32.0-be
...
bugfix-fix
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b1ed061de9 | ||
|
|
4479ed5e95 | ||
|
|
b16e73ad42 | ||
|
|
4631f85114 | ||
|
|
746641e523 | ||
|
|
3ce6dbe850 | ||
|
|
8d5007919f | ||
|
|
08e569918b | ||
|
|
6498000721 | ||
|
|
8486e6b3aa | ||
|
|
3f6b6798f4 | ||
|
|
c1b928b8ef | ||
|
|
c2e8fba483 | ||
|
|
75ad26989d |
83
.github/workflows/pull-requests.yaml
vendored
83
.github/workflows/pull-requests.yaml
vendored
@@ -56,8 +56,8 @@ jobs:
|
||||
name: talos-image
|
||||
path: _out/assets/nocloud-amd64.raw.xz
|
||||
|
||||
test:
|
||||
name: Test
|
||||
prepare_env:
|
||||
name: Prepare environment
|
||||
runs-on: [self-hosted]
|
||||
needs: build
|
||||
|
||||
@@ -66,6 +66,12 @@ jobs:
|
||||
!contains(github.event.pull_request.labels.*.name, 'release')
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Download installer
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
@@ -78,5 +84,74 @@ jobs:
|
||||
name: talos-image
|
||||
path: _out/assets/
|
||||
|
||||
- name: Test
|
||||
run: make test
|
||||
- name: Set sandbox ID
|
||||
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
||||
|
||||
- name: Prepare environment
|
||||
run: make SANDBOX_NAME=$SANDBOX_NAME prepare-env
|
||||
|
||||
install_cozystack:
|
||||
name: Install Cozystack
|
||||
runs-on: [self-hosted]
|
||||
needs: prepare_env
|
||||
|
||||
# Never run when the PR carries the "release" label.
|
||||
if: |
|
||||
!contains(github.event.pull_request.labels.*.name, 'release')
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Set sandbox ID
|
||||
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
||||
|
||||
- name: Install Cozystack
|
||||
run: make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME install-cozystack
|
||||
|
||||
test_apps:
|
||||
name: Test applications
|
||||
runs-on: [self-hosted]
|
||||
needs: install_cozystack
|
||||
|
||||
# Never run when the PR carries the "release" label.
|
||||
if: |
|
||||
!contains(github.event.pull_request.labels.*.name, 'release')
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Set sandbox ID
|
||||
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
||||
|
||||
- name: E2E Apps
|
||||
run: make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME test-apps
|
||||
|
||||
cleanup:
|
||||
name: Tear down environment
|
||||
runs-on: [self-hosted]
|
||||
needs: test_apps
|
||||
|
||||
# Never run when the PR carries the "release" label.
|
||||
if: |
|
||||
!contains(github.event.pull_request.labels.*.name, 'release')
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Set sandbox ID
|
||||
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
||||
|
||||
- name: E2E Apps
|
||||
run: make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME delete
|
||||
|
||||
4
Makefile
4
Makefile
@@ -49,6 +49,10 @@ test:
|
||||
make -C packages/core/testing apply
|
||||
make -C packages/core/testing test
|
||||
|
||||
prepare-env:
|
||||
make -C packages/core/testing apply
|
||||
make -C packages/core/testing prepare-cluster
|
||||
|
||||
generate:
|
||||
hack/update-codegen.sh
|
||||
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Cozystack end‑to‑end provisioning test (Bats)
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
@test "Create tenant with isolated mode enabled" {
|
||||
kubectl -n tenant-root get tenants.apps.cozystack.io test ||
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Tenant
|
||||
@@ -24,6 +26,7 @@ EOF
|
||||
}
|
||||
|
||||
@test "Create a tenant Kubernetes control plane" {
|
||||
kubectl -n tenant-test get kuberneteses.apps.cozystack.io test ||
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Kubernetes
|
||||
@@ -91,4 +94,260 @@ EOF
|
||||
kubectl wait deploy --timeout=4m --for=condition=available -n tenant-test kubernetes-test kubernetes-test-cluster-autoscaler kubernetes-test-kccm kubernetes-test-kcsi-controller
|
||||
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=1m --for=jsonpath='{.status.replicas}'=2
|
||||
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=10m --for=jsonpath='{.status.v1beta2.readyReplicas}'=2
|
||||
kubectl -n tenant-test delete kuberneteses.apps.cozystack.io test
|
||||
}
|
||||
|
||||
@test "Create a VM Disk" {
|
||||
name='test'
|
||||
kubectl -n tenant-test get vmdisks.apps.cozystack.io $name ||
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: VMDisk
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
source:
|
||||
http:
|
||||
url: https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img
|
||||
optical: false
|
||||
storage: 5Gi
|
||||
storageClass: replicated
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr vm-disk-$name --timeout=5s --for=condition=ready
|
||||
kubectl -n tenant-test wait dv vm-disk-$name --timeout=150s --for=condition=ready
|
||||
kubectl -n tenant-test wait pvc vm-disk-$name --timeout=100s --for=jsonpath='{.status.phase}'=Bound
|
||||
}
|
||||
|
||||
@test "Create a VM Instance" {
|
||||
diskName='test'
|
||||
name='test'
|
||||
kubectl -n tenant-test get vminstances.apps.cozystack.io $name ||
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: VMInstance
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
external: false
|
||||
externalMethod: PortList
|
||||
externalPorts:
|
||||
- 22
|
||||
running: true
|
||||
instanceType: "u1.medium"
|
||||
instanceProfile: ubuntu
|
||||
disks:
|
||||
- name: $diskName
|
||||
gpus: []
|
||||
resources:
|
||||
cpu: ""
|
||||
memory: ""
|
||||
sshKeys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF
|
||||
test@test
|
||||
cloudInit: |
|
||||
#cloud-config
|
||||
users:
|
||||
- name: test
|
||||
shell: /bin/bash
|
||||
sudo: ['ALL=(ALL) NOPASSWD: ALL']
|
||||
groups: sudo
|
||||
ssh_authorized_keys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF test@test
|
||||
cloudInitSeed: ""
|
||||
EOF
|
||||
sleep 5
|
||||
timeout 20 sh -ec "until kubectl -n tenant-test get vmi vm-instance-$name -o jsonpath='{.status.interfaces[0].ipAddress}' | grep -q '[0-9]'; do sleep 5; done"
|
||||
kubectl -n tenant-test wait hr vm-instance-$name --timeout=5s --for=condition=ready
|
||||
kubectl -n tenant-test wait vm vm-instance-$name --timeout=20s --for=condition=ready
|
||||
kubectl -n tenant-test delete vminstances.apps.cozystack.io $name
|
||||
kubectl -n tenant-test delete vmdisks.apps.cozystack.io $diskName
|
||||
}
|
||||
|
||||
@test "Create a Virtual Machine" {
|
||||
name='test'
|
||||
kubectl -n tenant-test get virtualmachines.apps.cozystack.io $name ||
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: VirtualMachine
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
external: false
|
||||
externalMethod: PortList
|
||||
externalPorts:
|
||||
- 22
|
||||
instanceType: "u1.medium"
|
||||
instanceProfile: ubuntu
|
||||
systemDisk:
|
||||
image: ubuntu
|
||||
storage: 5Gi
|
||||
storageClass: replicated
|
||||
gpus: []
|
||||
resources:
|
||||
cpu: ""
|
||||
memory: ""
|
||||
sshKeys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF
|
||||
test@test
|
||||
cloudInit: |
|
||||
#cloud-config
|
||||
users:
|
||||
- name: test
|
||||
shell: /bin/bash
|
||||
sudo: ['ALL=(ALL) NOPASSWD: ALL']
|
||||
groups: sudo
|
||||
ssh_authorized_keys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF test@test
|
||||
cloudInitSeed: ""
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr virtual-machine-$name --timeout=10s --for=condition=ready
|
||||
kubectl -n tenant-test wait dv virtual-machine-$name --timeout=150s --for=condition=ready
|
||||
kubectl -n tenant-test wait pvc virtual-machine-$name --timeout=100s --for=jsonpath='{.status.phase}'=Bound
|
||||
kubectl -n tenant-test wait vm virtual-machine-$name --timeout=100s --for=condition=ready
|
||||
timeout 120 sh -ec "until kubectl -n tenant-test get vmi virtual-machine-$name -o jsonpath='{.status.interfaces[0].ipAddress}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test delete virtualmachines.apps.cozystack.io $name
|
||||
}
|
||||
|
||||
@test "Create DB PostgreSQL" {
|
||||
name='test'
|
||||
kubectl -n tenant-test get postgreses.apps.cozystack.io $name ||
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Postgres
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
external: false
|
||||
size: 10Gi
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
postgresql:
|
||||
parameters:
|
||||
max_connections: 100
|
||||
quorum:
|
||||
minSyncReplicas: 0
|
||||
maxSyncReplicas: 0
|
||||
users:
|
||||
testuser:
|
||||
password: xai7Wepo
|
||||
databases:
|
||||
testdb:
|
||||
roles:
|
||||
admin:
|
||||
- testuser
|
||||
backup:
|
||||
enabled: false
|
||||
s3Region: us-east-1
|
||||
s3Bucket: s3.example.org/postgres-backups
|
||||
schedule: "0 2 * * *"
|
||||
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr postgres-$name --timeout=100s --for=condition=ready
|
||||
kubectl -n tenant-test wait job.batch postgres-$name-init-job --timeout=50s --for=condition=Complete
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-r -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-ro -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-rw -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
||||
timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-r -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-ro -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-rw -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test delete postgreses.apps.cozystack.io $name
|
||||
}
|
||||
|
||||
@test "Create DB MySQL" {
|
||||
name='test'
|
||||
kubectl -n tenant-test get mysqls.apps.cozystack.io $name ||
|
||||
kubectl create -f- <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: MySQL
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
external: false
|
||||
size: 10Gi
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
users:
|
||||
testuser:
|
||||
maxUserConnections: 1000
|
||||
password: xai7Wepo
|
||||
databases:
|
||||
testdb:
|
||||
roles:
|
||||
admin:
|
||||
- testuser
|
||||
backup:
|
||||
enabled: false
|
||||
s3Region: us-east-1
|
||||
s3Bucket: s3.example.org/postgres-backups
|
||||
schedule: "0 2 * * *"
|
||||
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr mysql-$name --timeout=30s --for=condition=ready
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get svc mysql-$name -o jsonpath='{.spec.ports[0].port}' | grep -q '3306'; do sleep 10; done"
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints mysql-$name -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test wait statefulset.apps/mysql-$name --timeout=110s --for=jsonpath='{.status.replicas}'=2
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get svc mysql-$name-metrics -o jsonpath='{.spec.ports[0].port}' | grep -q '9104'; do sleep 10; done"
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get endpoints mysql-$name-metrics -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test wait deployment.apps/mysql-$name-metrics --timeout=90s --for=jsonpath='{.status.replicas}'=1
|
||||
kubectl -n tenant-test delete mysqls.apps.cozystack.io $name
|
||||
}
|
||||
|
||||
@test "Create DB ClickHouse" {
|
||||
name='test'
|
||||
kubectl -n tenant-test get clickhouses.apps.cozystack.io $name ||
|
||||
kubectl create -f- <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: ClickHouse
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
size: 10Gi
|
||||
logStorageSize: 2Gi
|
||||
shards: 1
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
logTTL: 15
|
||||
users:
|
||||
testuser:
|
||||
password: xai7Wepo
|
||||
backup:
|
||||
enabled: false
|
||||
s3Region: us-east-1
|
||||
s3Bucket: s3.example.org/clickhouse-backups
|
||||
schedule: "0 2 * * *"
|
||||
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr clickhouse-$name --timeout=20s --for=condition=ready
|
||||
timeout 180 sh -ec "until kubectl -n tenant-test get svc chendpoint-clickhouse-$name -o jsonpath='{.spec.ports[*].port}' | grep -q '8123 9000'; do sleep 10; done"
|
||||
kubectl -n tenant-test wait statefulset.apps/chi-clickhouse-$name-clickhouse-0-0 --timeout=120s --for=jsonpath='{.status.replicas}'=1
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints chi-clickhouse-$name-clickhouse-0-0 -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
timeout 100 sh -ec "until kubectl -n tenant-test get svc chi-clickhouse-$name-clickhouse-0-0 -o jsonpath='{.spec.ports[*].port}' | grep -q '9000 8123 9009'; do sleep 10; done"
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get sts chi-clickhouse-$name-clickhouse-0-1 ; do sleep 10; done"
|
||||
kubectl -n tenant-test wait statefulset.apps/chi-clickhouse-$name-clickhouse-0-1 --timeout=140s --for=jsonpath='{.status.replicas}'=1
|
||||
}
|
||||
|
||||
157
hack/e2e-install-cozystack.bats
Normal file
157
hack/e2e-install-cozystack.bats
Normal file
@@ -0,0 +1,157 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
@test "Install Cozystack" {
|
||||
# Create namespace & configmap required by installer
|
||||
kubectl create namespace cozy-system --dry-run=client -o yaml | kubectl apply -f -
|
||||
kubectl create configmap cozystack -n cozy-system \
|
||||
--from-literal=bundle-name=paas-full \
|
||||
--from-literal=ipv4-pod-cidr=10.244.0.0/16 \
|
||||
--from-literal=ipv4-pod-gateway=10.244.0.1 \
|
||||
--from-literal=ipv4-svc-cidr=10.96.0.0/16 \
|
||||
--from-literal=ipv4-join-cidr=100.64.0.0/16 \
|
||||
--from-literal=root-host=example.org \
|
||||
--from-literal=api-server-endpoint=https://192.168.123.10:6443 \
|
||||
--dry-run=client -o yaml | kubectl apply -f -
|
||||
|
||||
# Apply installer manifests from file
|
||||
kubectl apply -f _out/assets/cozystack-installer.yaml
|
||||
|
||||
# Wait for the installer deployment to become available
|
||||
kubectl wait deployment/cozystack -n cozy-system --timeout=1m --for=condition=Available
|
||||
|
||||
# Wait until HelmReleases appear & reconcile them
|
||||
timeout 60 sh -ec 'until kubectl get hr -A | grep -q cozys; do sleep 1; done'
|
||||
sleep 5
|
||||
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n "$1" hr/"$2" &"} END {print "wait"}' | sh -ex
|
||||
|
||||
# Fail the test if any HelmRelease is not Ready
|
||||
if kubectl get hr -A | grep -v " True " | grep -v NAME; then
|
||||
kubectl get hr -A
|
||||
fail "Some HelmReleases failed to reconcile"
|
||||
fi
|
||||
}
|
||||
|
||||
@test "Wait for Cluster‑API provider deployments" {
|
||||
# Wait for Cluster‑API provider deployments
|
||||
timeout 60 sh -ec 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager >/dev/null 2>&1; do sleep 1; done'
|
||||
kubectl wait deployment/capi-controller-manager deployment/capi-kamaji-controller-manager deployment/capi-kubeadm-bootstrap-controller-manager deployment/capi-operator-cluster-api-operator deployment/capk-controller-manager -n cozy-cluster-api --timeout=1m --for=condition=available
|
||||
}
|
||||
|
||||
@test "Wait for LINSTOR and configure storage" {
|
||||
# Linstor controller and nodes
|
||||
kubectl wait deployment/linstor-controller -n cozy-linstor --timeout=5m --for=condition=available
|
||||
timeout 60 sh -ec 'until [ $(kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor node list | grep -c Online) -eq 3 ]; do sleep 1; done'
|
||||
|
||||
for node in srv1 srv2 srv3; do
|
||||
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs ${node} /dev/vdc --pool-name data --storage-pool data
|
||||
done
|
||||
|
||||
# Storage classes
|
||||
kubectl apply -f - <<'EOF'
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: local
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
provisioner: linstor.csi.linbit.com
|
||||
parameters:
|
||||
linstor.csi.linbit.com/storagePool: "data"
|
||||
linstor.csi.linbit.com/layerList: "storage"
|
||||
linstor.csi.linbit.com/allowRemoteVolumeAccess: "false"
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
allowVolumeExpansion: true
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: replicated
|
||||
provisioner: linstor.csi.linbit.com
|
||||
parameters:
|
||||
linstor.csi.linbit.com/storagePool: "data"
|
||||
linstor.csi.linbit.com/autoPlace: "3"
|
||||
linstor.csi.linbit.com/layerList: "drbd storage"
|
||||
linstor.csi.linbit.com/allowRemoteVolumeAccess: "true"
|
||||
property.linstor.csi.linbit.com/DrbdOptions/auto-quorum: suspend-io
|
||||
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-no-data-accessible: suspend-io
|
||||
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-suspended-primary-outdated: force-secondary
|
||||
property.linstor.csi.linbit.com/DrbdOptions/Net/rr-conflict: retry-connect
|
||||
volumeBindingMode: Immediate
|
||||
allowVolumeExpansion: true
|
||||
EOF
|
||||
}
|
||||
|
||||
@test "Wait for MetalLB and configure address pool" {
|
||||
# MetalLB address pool
|
||||
kubectl apply -f - <<'EOF'
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: L2Advertisement
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-metallb
|
||||
spec:
|
||||
ipAddressPools: [cozystack]
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: IPAddressPool
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-metallb
|
||||
spec:
|
||||
addresses: [192.168.123.200-192.168.123.250]
|
||||
autoAssign: true
|
||||
avoidBuggyIPs: false
|
||||
EOF
|
||||
}
|
||||
|
||||
@test "Check Cozystack API service" {
|
||||
kubectl wait --for=condition=Available apiservices/v1alpha1.apps.cozystack.io --timeout=2m
|
||||
}
|
||||
|
||||
@test "Configure Tenant and wait for applications" {
|
||||
# Patch root tenant and wait for its releases
|
||||
kubectl patch tenants/root -n tenant-root --type merge -p '{"spec":{"host":"example.org","ingress":true,"monitoring":true,"etcd":true,"isolated":true}}'
|
||||
|
||||
timeout 60 sh -ec 'until kubectl get hr -n tenant-root etcd ingress monitoring tenant-root >/dev/null 2>&1; do sleep 1; done'
|
||||
kubectl wait hr/etcd hr/ingress hr/tenant-root -n tenant-root --timeout=2m --for=condition=ready
|
||||
|
||||
if ! kubectl wait hr/monitoring -n tenant-root --timeout=2m --for=condition=ready; then
|
||||
flux reconcile hr monitoring -n tenant-root --force
|
||||
kubectl wait hr/monitoring -n tenant-root --timeout=2m --for=condition=ready
|
||||
fi
|
||||
|
||||
# Expose Cozystack services through ingress
|
||||
kubectl patch configmap/cozystack -n cozy-system --type merge -p '{"data":{"expose-services":"api,dashboard,cdi-uploadproxy,vm-exportproxy,keycloak"}}'
|
||||
|
||||
# NGINX ingress controller
|
||||
timeout 60 sh -ec 'until kubectl get deploy root-ingress-controller -n tenant-root >/dev/null 2>&1; do sleep 1; done'
|
||||
kubectl wait deploy/root-ingress-controller -n tenant-root --timeout=5m --for=condition=available
|
||||
|
||||
# etcd statefulset
|
||||
kubectl wait sts/etcd -n tenant-root --for=jsonpath='{.status.readyReplicas}'=3 --timeout=5m
|
||||
|
||||
# VictoriaMetrics components
|
||||
kubectl wait vmalert/vmalert-shortterm vmalertmanager/alertmanager -n tenant-root --for=jsonpath='{.status.updateStatus}'=operational --timeout=5m
|
||||
kubectl wait vlogs/generic -n tenant-root --for=jsonpath='{.status.updateStatus}'=operational --timeout=5m
|
||||
kubectl wait vmcluster/shortterm vmcluster/longterm -n tenant-root --for=jsonpath='{.status.clusterStatus}'=operational --timeout=5m
|
||||
|
||||
# Grafana
|
||||
kubectl wait clusters.postgresql.cnpg.io/grafana-db -n tenant-root --for=condition=ready --timeout=5m
|
||||
kubectl wait deploy/grafana-deployment -n tenant-root --for=condition=available --timeout=5m
|
||||
|
||||
# Verify Grafana via ingress
|
||||
ingress_ip=$(kubectl get svc root-ingress-controller -n tenant-root -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
if ! curl -sS -k "https://${ingress_ip}" -H 'Host: grafana.example.org' --max-time 30 | grep -q Found; then
|
||||
echo "Failed to access Grafana via ingress at ${ingress_ip}" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
@test "Keycloak OIDC stack is healthy" {
|
||||
kubectl patch configmap/cozystack -n cozy-system --type merge -p '{"data":{"oidc-enabled":"true"}}'
|
||||
|
||||
timeout 120 sh -ec 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator >/dev/null 2>&1; do sleep 1; done'
|
||||
kubectl wait hr/keycloak hr/keycloak-configure hr/keycloak-operator -n cozy-keycloak --timeout=10m --for=condition=ready
|
||||
}
|
||||
235
hack/e2e-prepare-cluster.bats
Normal file
235
hack/e2e-prepare-cluster.bats
Normal file
@@ -0,0 +1,235 @@
|
||||
#!/usr/bin/env bats
|
||||
# -----------------------------------------------------------------------------
|
||||
# Cozystack end‑to‑end provisioning test (Bats)
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
@test "Required installer assets exist" {
|
||||
if [ ! -f _out/assets/cozystack-installer.yaml ]; then
|
||||
echo "Missing: _out/assets/cozystack-installer.yaml" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f _out/assets/nocloud-amd64.raw.xz ]; then
|
||||
echo "Missing: _out/assets/nocloud-amd64.raw.xz" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
@test "IPv4 forwarding is enabled" {
|
||||
if [ "$(cat /proc/sys/net/ipv4/ip_forward)" != 1 ]; then
|
||||
echo "IPv4 forwarding is disabled!" >&2
|
||||
echo >&2
|
||||
echo "Enable it with:" >&2
|
||||
echo " echo 1 > /proc/sys/net/ipv4/ip_forward" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
@test "Clean previous VMs" {
|
||||
kill $(cat srv1/qemu.pid srv2/qemu.pid srv3/qemu.pid 2>/dev/null) 2>/dev/null || true
|
||||
rm -rf srv1 srv2 srv3
|
||||
}
|
||||
|
||||
@test "Prepare networking and masquerading" {
|
||||
ip link del cozy-br0 2>/dev/null || true
|
||||
ip link add cozy-br0 type bridge
|
||||
ip link set cozy-br0 up
|
||||
ip address add 192.168.123.1/24 dev cozy-br0
|
||||
|
||||
# Masquerading rule – idempotent (delete first, then add)
|
||||
iptables -t nat -D POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE 2>/dev/null || true
|
||||
iptables -t nat -A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
|
||||
}
|
||||
|
||||
@test "Prepare cloud‑init drive for VMs" {
|
||||
mkdir -p srv1 srv2 srv3
|
||||
|
||||
# Generate cloud‑init ISOs
|
||||
for i in 1 2 3; do
|
||||
echo "hostname: srv${i}" > "srv${i}/meta-data"
|
||||
|
||||
cat > "srv${i}/user-data" <<'EOF'
|
||||
#cloud-config
|
||||
EOF
|
||||
|
||||
cat > "srv${i}/network-config" <<EOF
|
||||
version: 2
|
||||
ethernets:
|
||||
eth0:
|
||||
dhcp4: false
|
||||
addresses:
|
||||
- "192.168.123.1${i}/26"
|
||||
gateway4: "192.168.123.1"
|
||||
nameservers:
|
||||
search: [cluster.local]
|
||||
addresses: [8.8.8.8]
|
||||
EOF
|
||||
|
||||
( cd "srv${i}" && genisoimage \
|
||||
-output seed.img \
|
||||
-volid cidata -rational-rock -joliet \
|
||||
user-data meta-data network-config )
|
||||
done
|
||||
}
|
||||
|
||||
@test "Use Talos NoCloud image from assets" {
|
||||
if [ ! -f _out/assets/nocloud-amd64.raw.xz ]; then
|
||||
echo "Missing _out/assets/nocloud-amd64.raw.xz" 2>&1
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm -f nocloud-amd64.raw
|
||||
cp _out/assets/nocloud-amd64.raw.xz .
|
||||
xz --decompress nocloud-amd64.raw.xz
|
||||
}
|
||||
|
||||
@test "Prepare VM disks" {
|
||||
for i in 1 2 3; do
|
||||
cp nocloud-amd64.raw srv${i}/system.img
|
||||
qemu-img resize srv${i}/system.img 50G
|
||||
qemu-img create srv${i}/data.img 100G
|
||||
done
|
||||
}
|
||||
|
||||
@test "Create tap devices" {
|
||||
for i in 1 2 3; do
|
||||
ip link del cozy-srv${i} 2>/dev/null || true
|
||||
ip tuntap add dev cozy-srv${i} mode tap
|
||||
ip link set cozy-srv${i} up
|
||||
ip link set cozy-srv${i} master cozy-br0
|
||||
done
|
||||
}
|
||||
|
||||
@test "Boot QEMU VMs" {
|
||||
for i in 1 2 3; do
|
||||
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 8 -m 24576 \
|
||||
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5${i} \
|
||||
-netdev tap,id=net0,ifname=cozy-srv${i},script=no,downscript=no \
|
||||
-drive file=srv${i}/system.img,if=virtio,format=raw \
|
||||
-drive file=srv${i}/seed.img,if=virtio,format=raw \
|
||||
-drive file=srv${i}/data.img,if=virtio,format=raw \
|
||||
-display none -daemonize -pidfile srv${i}/qemu.pid
|
||||
done
|
||||
|
||||
# Give qemu a few seconds to start up networking
|
||||
sleep 5
|
||||
}
|
||||
|
||||
@test "Wait until Talos API port 50000 is reachable on all machines" {
|
||||
timeout 60 sh -ec 'until nc -nz 192.168.123.11 50000 && nc -nz 192.168.123.12 50000 && nc -nz 192.168.123.13 50000; do sleep 1; done'
|
||||
}
|
||||
|
||||
@test "Generate Talos cluster configuration" {
|
||||
# Cluster‑wide patches
|
||||
cat > patch.yaml <<'EOF'
|
||||
machine:
|
||||
kubelet:
|
||||
nodeIP:
|
||||
validSubnets:
|
||||
- 192.168.123.0/24
|
||||
extraConfig:
|
||||
maxPods: 512
|
||||
kernel:
|
||||
modules:
|
||||
- name: openvswitch
|
||||
- name: drbd
|
||||
parameters:
|
||||
- usermode_helper=disabled
|
||||
- name: zfs
|
||||
- name: spl
|
||||
registries:
|
||||
mirrors:
|
||||
docker.io:
|
||||
endpoints:
|
||||
- https://mirror.gcr.io
|
||||
files:
|
||||
- content: |
|
||||
[plugins]
|
||||
[plugins."io.containerd.cri.v1.runtime"]
|
||||
device_ownership_from_security_context = true
|
||||
path: /etc/cri/conf.d/20-customization.part
|
||||
op: create
|
||||
|
||||
cluster:
|
||||
apiServer:
|
||||
extraArgs:
|
||||
oidc-issuer-url: "https://keycloak.example.org/realms/cozy"
|
||||
oidc-client-id: "kubernetes"
|
||||
oidc-username-claim: "preferred_username"
|
||||
oidc-groups-claim: "groups"
|
||||
network:
|
||||
cni:
|
||||
name: none
|
||||
dnsDomain: cozy.local
|
||||
podSubnets:
|
||||
- 10.244.0.0/16
|
||||
serviceSubnets:
|
||||
- 10.96.0.0/16
|
||||
EOF
|
||||
|
||||
# Control‑plane‑only patches
|
||||
cat > patch-controlplane.yaml <<'EOF'
|
||||
machine:
|
||||
nodeLabels:
|
||||
node.kubernetes.io/exclude-from-external-load-balancers:
|
||||
$patch: delete
|
||||
network:
|
||||
interfaces:
|
||||
- interface: eth0
|
||||
vip:
|
||||
ip: 192.168.123.10
|
||||
cluster:
|
||||
allowSchedulingOnControlPlanes: true
|
||||
controllerManager:
|
||||
extraArgs:
|
||||
bind-address: 0.0.0.0
|
||||
scheduler:
|
||||
extraArgs:
|
||||
bind-address: 0.0.0.0
|
||||
apiServer:
|
||||
certSANs:
|
||||
- 127.0.0.1
|
||||
proxy:
|
||||
disabled: true
|
||||
discovery:
|
||||
enabled: false
|
||||
etcd:
|
||||
advertisedSubnets:
|
||||
- 192.168.123.0/24
|
||||
EOF
|
||||
|
||||
# Generate secrets once
|
||||
if [ ! -f secrets.yaml ]; then
|
||||
talosctl gen secrets
|
||||
fi
|
||||
|
||||
rm -f controlplane.yaml worker.yaml talosconfig kubeconfig
|
||||
talosctl gen config --with-secrets secrets.yaml cozystack https://192.168.123.10:6443 \
|
||||
--config-patch=@patch.yaml --config-patch-control-plane @patch-controlplane.yaml
|
||||
}
|
||||
|
||||
@test "Apply Talos configuration to the node" {
|
||||
# Apply the configuration to all three nodes
|
||||
for node in 11 12 13; do
|
||||
talosctl apply -f controlplane.yaml -n 192.168.123.${node} -e 192.168.123.${node} -i
|
||||
done
|
||||
|
||||
# Wait for Talos services to come up again
|
||||
timeout 60 sh -ec 'until nc -nz 192.168.123.11 50000 && nc -nz 192.168.123.12 50000 && nc -nz 192.168.123.13 50000; do sleep 1; done'
|
||||
}
|
||||
|
||||
@test "Bootstrap Talos cluster" {
|
||||
# Bootstrap etcd on the first node
|
||||
timeout 10 sh -ec 'until talosctl bootstrap -n 192.168.123.11 -e 192.168.123.11; do sleep 1; done'
|
||||
|
||||
# Wait until etcd is healthy
|
||||
timeout 180 sh -ec 'until talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 >/dev/null 2>&1; do sleep 1; done'
|
||||
timeout 60 sh -ec 'while talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1 | grep -q "rpc error"; do sleep 1; done'
|
||||
|
||||
# Retrieve kubeconfig
|
||||
rm -f kubeconfig
|
||||
talosctl kubeconfig kubeconfig -e 192.168.123.10 -n 192.168.123.10
|
||||
|
||||
# Wait until all three nodes register in Kubernetes
|
||||
timeout 60 sh -ec 'until [ $(kubectl get node --no-headers | wc -l) -eq 3 ]; do sleep 1; done'
|
||||
}
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/nginx-cache:0.5.1@sha256:b7633717cd7449c0042ae92d8ca9b36e4d69566561f5c7d44e21058e7d05c6d5
|
||||
ghcr.io/cozystack/cozystack/nginx-cache:0.5.1@sha256:50ac1581e3100bd6c477a71161cb455a341ffaf9e5e2f6086802e4e25271e8af
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.24.0
|
||||
version: 0.24.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -110,16 +110,16 @@ See the reference for components utilized in this service:
|
||||
|
||||
### Kubernetes Control Plane Configuration
|
||||
|
||||
| Name | Description | Value |
|
||||
| -------------------------------------------------- | ---------------------------------------------------------------------------- | ------- |
|
||||
| `controlPlane.apiServer.resources` | Explicit CPU/memory resource requests and limits for the API server. | `{}` |
|
||||
| `controlPlane.apiServer.resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `small` |
|
||||
| `controlPlane.controllerManager.resources` | Explicit CPU/memory resource requests and limits for the controller manager. | `{}` |
|
||||
| `controlPlane.controllerManager.resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `micro` |
|
||||
| `controlPlane.scheduler.resources` | Explicit CPU/memory resource requests and limits for the scheduler. | `{}` |
|
||||
| `controlPlane.scheduler.resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `micro` |
|
||||
| `controlPlane.konnectivity.server.resources` | Explicit CPU/memory resource requests and limits for the Konnectivity. | `{}` |
|
||||
| `controlPlane.konnectivity.server.resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `micro` |
|
||||
| Name | Description | Value |
|
||||
| -------------------------------------------------- | ---------------------------------------------------------------------------- | -------- |
|
||||
| `controlPlane.apiServer.resources` | Explicit CPU/memory resource requests and limits for the API server. | `{}` |
|
||||
| `controlPlane.apiServer.resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `medium` |
|
||||
| `controlPlane.controllerManager.resources` | Explicit CPU/memory resource requests and limits for the controller manager. | `{}` |
|
||||
| `controlPlane.controllerManager.resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `micro` |
|
||||
| `controlPlane.scheduler.resources` | Explicit CPU/memory resource requests and limits for the scheduler. | `{}` |
|
||||
| `controlPlane.scheduler.resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `micro` |
|
||||
| `controlPlane.konnectivity.server.resources` | Explicit CPU/memory resource requests and limits for the Konnectivity. | `{}` |
|
||||
| `controlPlane.konnectivity.server.resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `micro` |
|
||||
|
||||
In production environments, it's recommended to set `resources` explicitly.
|
||||
Example of `controlPlane.*.resources`:
|
||||
|
||||
@@ -24,8 +24,8 @@ spec:
|
||||
secretRef:
|
||||
name: {{ .Release.Name }}-admin-kubeconfig
|
||||
key: super-admin.svc
|
||||
targetNamespace: cozy-monitoring-agents
|
||||
storageNamespace: cozy-monitoring-agents
|
||||
targetNamespace: cozy-monitoring
|
||||
storageNamespace: cozy-monitoring
|
||||
install:
|
||||
createNamespace: true
|
||||
timeout: "300s"
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Use a common resources preset when `resources` is not set explicitly.",
|
||||
"default": "small",
|
||||
"default": "medium",
|
||||
"enum": [
|
||||
"none",
|
||||
"nano",
|
||||
|
||||
@@ -123,7 +123,7 @@ controlPlane:
|
||||
## cpu: 100m
|
||||
## memory: 512Mi
|
||||
##
|
||||
resourcesPreset: "small"
|
||||
resourcesPreset: "medium"
|
||||
resources: {}
|
||||
|
||||
controllerManager:
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.14.0
|
||||
version: 0.14.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
FROM alpine:3.20
|
||||
RUN apk add --no-cache postgresql16-client uuidgen restic
|
||||
FROM alpine:3.22
|
||||
RUN apk add --no-cache postgresql17-client uuidgen restic
|
||||
|
||||
@@ -45,38 +45,8 @@ kafka 0.5.0 93bdf411
|
||||
kafka 0.6.0 6130f43d
|
||||
kafka 0.6.1 632224a3
|
||||
kafka 0.7.0 HEAD
|
||||
kubernetes 0.1.0 263e47be
|
||||
kubernetes 0.2.0 53f2365e
|
||||
kubernetes 0.3.0 007d414f
|
||||
kubernetes 0.4.0 d7cfa53c
|
||||
kubernetes 0.5.0 dfbc210b
|
||||
kubernetes 0.6.0 5bbc488e
|
||||
kubernetes 0.7.0 e9716091
|
||||
kubernetes 0.8.0 ac11056e
|
||||
kubernetes 0.8.1 366bcafc
|
||||
kubernetes 0.8.2 f81be075
|
||||
kubernetes 0.9.0 6c5cf5bf
|
||||
kubernetes 0.10.0 b8e33d19
|
||||
kubernetes 0.11.0 4b90bf5a
|
||||
kubernetes 0.11.1 5fb9cfe3
|
||||
kubernetes 0.12.0 bb985806
|
||||
kubernetes 0.12.1 28fca4ef
|
||||
kubernetes 0.13.0 1ec10165
|
||||
kubernetes 0.14.0 bfbde07c
|
||||
kubernetes 0.14.1 898374b5
|
||||
kubernetes 0.15.0 4e68e65c
|
||||
kubernetes 0.15.1 160e4e2a
|
||||
kubernetes 0.15.2 8267072d
|
||||
kubernetes 0.16.0 077045b0
|
||||
kubernetes 0.17.0 1fbbfcd0
|
||||
kubernetes 0.17.1 fd240701
|
||||
kubernetes 0.18.0 721c12a7
|
||||
kubernetes 0.19.0 93bdf411
|
||||
kubernetes 0.20.0 609e7ede
|
||||
kubernetes 0.20.1 f9f8bb2f
|
||||
kubernetes 0.21.0 6130f43d
|
||||
kubernetes 0.23.1 632224a3
|
||||
kubernetes 0.24.0 HEAD
|
||||
kubernetes 0.24.0 62cb694d
|
||||
kubernetes 0.24.1 HEAD
|
||||
mysql 0.1.0 263e47be
|
||||
mysql 0.2.0 c24a103f
|
||||
mysql 0.3.0 53f2365e
|
||||
@@ -117,7 +87,8 @@ postgres 0.10.1 93bdf411
|
||||
postgres 0.11.0 f9f8bb2f
|
||||
postgres 0.12.0 6130f43d
|
||||
postgres 0.12.1 632224a3
|
||||
postgres 0.14.0 HEAD
|
||||
postgres 0.14.0 62cb694d
|
||||
postgres 0.14.1 HEAD
|
||||
rabbitmq 0.1.0 263e47be
|
||||
rabbitmq 0.2.0 53f2365e
|
||||
rabbitmq 0.3.0 6c5cf5bf
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
cozystack:
|
||||
image: ghcr.io/cozystack/cozystack/installer:v0.32.0-beta.2@sha256:469b5590e5fa168d7d8441ef6f8b62488e2518baa29a219e6e595eaf2a148923
|
||||
image: ghcr.io/cozystack/cozystack/installer:v0.32.0@sha256:981f1a073fa654f878e448ea89ef324f50d2479f27d3228449e8b66fda7c567f
|
||||
|
||||
@@ -32,6 +32,14 @@ image-e2e-sandbox:
|
||||
|
||||
test: test-cluster test-apps ## Run the end-to-end tests in existing sandbox
|
||||
|
||||
prepare-cluster:
|
||||
docker cp ../../../_out/assets/cozystack-installer.yaml "${SANDBOX_NAME}":/workspace/_out/assets/cozystack-installer.yaml
|
||||
docker cp ../../../_out/assets/nocloud-amd64.raw.xz "${SANDBOX_NAME}":/workspace/_out/assets/nocloud-amd64.raw.xz
|
||||
docker exec "${SANDBOX_NAME}" sh -c 'cd /workspace && hack/cozytest.sh hack/e2e-prepare-cluster.bats'
|
||||
|
||||
install-cozystack:
|
||||
docker exec "${SANDBOX_NAME}" sh -c 'cd /workspace && hack/cozytest.sh hack/e2e-install-cozystack.bats'
|
||||
|
||||
test-cluster: ## Run the end-to-end for creating a cluster
|
||||
docker cp ../../../_out/assets/cozystack-installer.yaml "${SANDBOX_NAME}":/workspace/_out/assets/cozystack-installer.yaml
|
||||
docker cp ../../../_out/assets/nocloud-amd64.raw.xz "${SANDBOX_NAME}":/workspace/_out/assets/nocloud-amd64.raw.xz
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
e2e:
|
||||
image: ghcr.io/cozystack/cozystack/e2e-sandbox:v0.32.0-beta.2@sha256:aa4dc39f4e5de85a2a22b09e53503dd70bd73a0b0fbd5853858a4dd793d5efb8
|
||||
image: ghcr.io/cozystack/cozystack/e2e-sandbox:v0.32.0@sha256:454d5a01c30685ca451a6cd42bda5f4c1d80195642f9dd8ccf09369932ebb531
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/matchbox:v0.32.0-beta.2@sha256:c10b3da45a6302c824158747ecd32c9b61ac1df8bdfd28a7a20157f720f0f938
|
||||
ghcr.io/cozystack/cozystack/matchbox:v0.32.0@sha256:1c5173f0c368dd14e29dae95c3d576574af63c226b6f554c78d05c5f160084b5
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/s3manager:v0.5.0@sha256:fb3ecad5d2969100e632ca15e657e06dd11f345b833f16a16f7f1a3237865667
|
||||
ghcr.io/cozystack/cozystack/s3manager:v0.5.0@sha256:17c867e1576da57bdee58142fa2b5d5fe5e5acb0a79322fbb0fb6e8723fad0d2
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
cozystackAPI:
|
||||
image: ghcr.io/cozystack/cozystack/cozystack-api:v0.32.0-beta.2@sha256:d9bee0e9f73a950784e43d907552c21044d01eed728e1185455308e49d00c00d
|
||||
image: ghcr.io/cozystack/cozystack/cozystack-api:v0.32.0@sha256:d9bee0e9f73a950784e43d907552c21044d01eed728e1185455308e49d00c00d
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
cozystackController:
|
||||
image: ghcr.io/cozystack/cozystack/cozystack-controller:v0.32.0-beta.2@sha256:a1fceb277007846bc85ceee0afd1f5d1122496174203c718c1275a1038cb07f6
|
||||
image: ghcr.io/cozystack/cozystack/cozystack-controller:v0.32.0@sha256:a1fceb277007846bc85ceee0afd1f5d1122496174203c718c1275a1038cb07f6
|
||||
debug: false
|
||||
disableTelemetry: false
|
||||
cozystackVersion: "v0.32.0-beta.2"
|
||||
cozystackVersion: "v0.32.0"
|
||||
|
||||
@@ -76,7 +76,7 @@ data:
|
||||
"kubeappsNamespace": {{ .Release.Namespace | quote }},
|
||||
"helmGlobalNamespace": {{ include "kubeapps.helmGlobalPackagingNamespace" . | quote }},
|
||||
"carvelGlobalNamespace": {{ .Values.kubeappsapis.pluginConfig.kappController.packages.v1alpha1.globalPackagingNamespace | quote }},
|
||||
"appVersion": "v0.32.0-beta.2",
|
||||
"appVersion": "v0.32.0",
|
||||
"authProxyEnabled": {{ .Values.authProxy.enabled }},
|
||||
"oauthLoginURI": {{ .Values.authProxy.oauthLoginURI | quote }},
|
||||
"oauthLogoutURI": {{ .Values.authProxy.oauthLogoutURI | quote }},
|
||||
|
||||
@@ -19,7 +19,7 @@ kubeapps:
|
||||
image:
|
||||
registry: ghcr.io/cozystack/cozystack
|
||||
repository: dashboard
|
||||
tag: v0.32.0-beta.2
|
||||
tag: v0.32.0
|
||||
digest: "sha256:5e514516bd3dc0c693bb346ddeb9740e0439a59deb2a56b87317286e3ce79ac9"
|
||||
redis:
|
||||
master:
|
||||
@@ -37,8 +37,8 @@ kubeapps:
|
||||
image:
|
||||
registry: ghcr.io/cozystack/cozystack
|
||||
repository: kubeapps-apis
|
||||
tag: v0.32.0-beta.2
|
||||
digest: "sha256:8924789687c6f612293904a774de2596d8c69dc281142576b1467d4f74d050da"
|
||||
tag: v0.32.0
|
||||
digest: "sha256:8ab96c9cd4f0c5452565f2ca1b7e1b644b112e534dd31c0fcef623ec3054d21e"
|
||||
pluginConfig:
|
||||
flux:
|
||||
packages:
|
||||
|
||||
@@ -3,7 +3,7 @@ kamaji:
|
||||
deploy: false
|
||||
image:
|
||||
pullPolicy: IfNotPresent
|
||||
tag: v0.32.0-beta.2@sha256:39bd83f5e7dae9c462f25d3aaef4240749dc380469062b9184727d5b12d0d584
|
||||
tag: v0.32.0@sha256:39bd83f5e7dae9c462f25d3aaef4240749dc380469062b9184727d5b12d0d584
|
||||
repository: ghcr.io/cozystack/cozystack/kamaji
|
||||
resources:
|
||||
limits:
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
portSecurity: true
|
||||
routes: ""
|
||||
image: ghcr.io/cozystack/cozystack/kubeovn-webhook:v0.32.0-beta.2@sha256:1ca9ee2675fe4ed52867f1483e9f39255533a86d15a44218f4e80a23bfe5db04
|
||||
image: ghcr.io/cozystack/cozystack/kubeovn-webhook:v0.32.0@sha256:7a66fdc3ddad2a0d09b3ae5bed33761d932f7ee3226a68fc3d726b5abb6c6c49
|
||||
|
||||
@@ -64,4 +64,4 @@ global:
|
||||
images:
|
||||
kubeovn:
|
||||
repository: kubeovn
|
||||
tag: v1.13.13@sha256:931eb2a6f8f56f8fdcf2fadc6b2bab5fa67baf58cc06fe8bc2d6f6fcc3f23c01
|
||||
tag: v1.13.13@sha256:66de00bba07798e21ba980f02c85685a7a40a92c1be0491d7fd05aefb04ae9fd
|
||||
|
||||
10
scripts/migrations/14
Executable file
10
scripts/migrations/14
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/sh
|
||||
# Migration 14 --> 15
|
||||
|
||||
# Delete the `capi-providers` HelmRelease in the `cozy-cluster-api` namespace if present
|
||||
if kubectl get hr -n cozy-cluster-api capi-providers >/dev/null 2>&1; then
|
||||
kubectl delete hr -n cozy-cluster-api capi-providers
|
||||
fi
|
||||
|
||||
# Write version to cozystack-version config
|
||||
kubectl create configmap -n cozy-system cozystack-version --from-literal=version=15 --dry-run=client -o yaml | kubectl apply -f-
|
||||
28
scripts/migrations/15
Normal file
28
scripts/migrations/15
Normal file
@@ -0,0 +1,28 @@
|
||||
#!/bin/sh
|
||||
# Migration 15 --> 16
|
||||
|
||||
if kubectl get validatingwebhookconfigurations.admissionregistration.k8s.io kamaji-validating-webhook-configuration; then
|
||||
kubectl delete validatingwebhookconfigurations.admissionregistration.k8s.io kamaji-validating-webhook-configuration
|
||||
fi
|
||||
kubectl get kamajicontrolplane -A -o custom-columns=NAMESPACE:.metadata.namespace,NAME:.metadata.name,VERSION:.spec.version,VERSION:.status.version --no-headers |
|
||||
while read namespace name version status; do
|
||||
if [ "$status" = "v1.32.4" ]; then
|
||||
continue
|
||||
fi
|
||||
(set -x; kubectl patch kamajicontrolplane "$name" -n "$namespace" --type merge -p '{"spec":{"version":"1.32.4"}}')
|
||||
(set -x; kubectl patch kamajicontrolplane "$name" -n "$namespace" --type merge -p '{"status":{"version":"v1.32.4"}}' --subresource status)
|
||||
(set -x; kubectl patch tcp "$name" -n "$namespace" --type merge -p '{"spec":{"kubernetes":{"version":"1.32.4"}}}')
|
||||
(set -x; kubectl patch tcp "$name" -n "$namespace" --type merge -p '{"status":{"kubernetesResources":{"version":{"version":"v1.32.4"}}}} ' --subresource status)
|
||||
done
|
||||
|
||||
# Upgrade kubernetes.apps to new chart version
|
||||
kubectl get kuberneteses.apps.cozystack.io -A --no-headers --output=custom-columns='NAMESPACE:.metadata.namespace,NAME:.metadata.name' | while read NAMESPACE NAME; do
|
||||
kubectl patch kuberneteses.apps.cozystack.io -n "$NAMESPACE" "$NAME" --type merge -p '{"appVersion":"0.24.0"}'
|
||||
done
|
||||
|
||||
if kubectl get helmrelease kamaji -n cozy-kamaji; then
|
||||
cozypkg reconcile kamaji -n cozy-kamaji --force
|
||||
fi
|
||||
|
||||
# Write version to cozystack-version config
|
||||
kubectl create configmap -n cozy-system cozystack-version --from-literal=version=16 --dry-run=client -o yaml | kubectl apply -f-
|
||||
Reference in New Issue
Block a user