mirror of
https://github.com/outbackdingo/cozystack.git
synced 2026-01-27 10:18:39 +00:00
[ci] Refactor Github workflows
Signed-off-by: Andrei Kvapil <kvapss@gmail.com>
This commit is contained in:
221
.github/workflows/pull-requests-release.yaml
vendored
221
.github/workflows/pull-requests-release.yaml
vendored
@@ -1,228 +1,15 @@
|
||||
name: Releasing PR
|
||||
name: "Releasing PR"
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [labeled, opened, synchronize, reopened, closed]
|
||||
types: [closed]
|
||||
|
||||
# Cancel in‑flight runs for the same PR when a new push arrives.
|
||||
concurrency:
|
||||
group: pull-requests-release-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
group: pr-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
prepare_env:
|
||||
name: Prepare environment
|
||||
runs-on: [self-hosted]
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
if: |
|
||||
contains(github.event.pull_request.labels.*.name, 'release') &&
|
||||
github.event.action != 'closed'
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
|
||||
- name: Extract tag from PR branch
|
||||
id: get_tag
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const branch = context.payload.pull_request.head.ref;
|
||||
const m = branch.match(/^release-(\d+\.\d+\.\d+(?:[-\w\.]+)?)$/);
|
||||
if (!m) {
|
||||
core.setFailed(`❌ Branch '${branch}' does not match 'release-X.Y.Z[-suffix]'`);
|
||||
return;
|
||||
}
|
||||
const tag = `v${m[1]}`;
|
||||
core.setOutput('tag', tag);
|
||||
|
||||
- name: Find draft release and get asset IDs
|
||||
id: fetch_assets
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.GH_PAT }}
|
||||
script: |
|
||||
const tag = '${{ steps.get_tag.outputs.tag }}';
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
per_page: 100
|
||||
});
|
||||
const draft = releases.data.find(r => r.tag_name === tag && r.draft);
|
||||
if (!draft) {
|
||||
core.setFailed(`Draft release '${tag}' not found`);
|
||||
return;
|
||||
}
|
||||
const findAssetId = (name) =>
|
||||
draft.assets.find(a => a.name === name)?.id;
|
||||
const installerId = findAssetId("cozystack-installer.yaml");
|
||||
const diskId = findAssetId("nocloud-amd64.raw.xz");
|
||||
if (!installerId || !diskId) {
|
||||
core.setFailed("Missing required assets");
|
||||
return;
|
||||
}
|
||||
core.setOutput("installer_id", installerId);
|
||||
core.setOutput("disk_id", diskId);
|
||||
|
||||
- name: Download assets from GitHub API
|
||||
run: |
|
||||
mkdir -p _out/assets
|
||||
curl -sSL \
|
||||
-H "Authorization: token ${GH_PAT}" \
|
||||
-H "Accept: application/octet-stream" \
|
||||
-o _out/assets/cozystack-installer.yaml \
|
||||
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/assets/${{ steps.fetch_assets.outputs.installer_id }}"
|
||||
curl -sSL \
|
||||
-H "Authorization: token ${GH_PAT}" \
|
||||
-H "Accept: application/octet-stream" \
|
||||
-o _out/assets/nocloud-amd64.raw.xz \
|
||||
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/assets/${{ steps.fetch_assets.outputs.disk_id }}"
|
||||
env:
|
||||
GH_PAT: ${{ secrets.GH_PAT }}
|
||||
|
||||
- name: Set sandbox ID
|
||||
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
||||
|
||||
- name: Prepare workspace
|
||||
run: |
|
||||
cd ..
|
||||
rm -rf /tmp/$SANDBOX_NAME
|
||||
cp -r cozystack /tmp/$SANDBOX_NAME
|
||||
sudo systemctl stop "rm-workspace-$SANDBOX_NAME.timer" "rm-workspace-$SANDBOX_NAME.service" 2>/dev/null || true
|
||||
sudo systemctl reset-failed "rm-workspace-$SANDBOX_NAME.timer" "rm-workspace-$SANDBOX_NAME.service" 2>/dev/null || true
|
||||
sudo systemctl daemon-reexec
|
||||
sudo systemd-run \
|
||||
--on-calendar="$(date -d 'now + 24 hours' '+%Y-%m-%d %H:%M:%S')" \
|
||||
--unit=rm-workspace-$SANDBOX_NAME \
|
||||
rm -rf /tmp/$SANDBOX_NAME
|
||||
|
||||
- name: Delete sandbox in 24h
|
||||
run: |
|
||||
sudo systemctl stop "teardown-$SANDBOX_NAME.timer" "teardown-$SANDBOX_NAME.service" 2>/dev/null || true
|
||||
sudo systemctl reset-failed "teardown-$SANDBOX_NAME.timer" "teardown-$SANDBOX_NAME.service" 2>/dev/null || true
|
||||
sudo systemctl daemon-reexec
|
||||
sudo systemd-run \
|
||||
--on-calendar="$(date -d 'now + 24 hours' '+%Y-%m-%d %H:%M:%S')" \
|
||||
--unit=teardown-$SANDBOX_NAME \
|
||||
/usr/bin/docker rm -f $SANDBOX_NAME
|
||||
|
||||
- name: Prepare environment
|
||||
run: |
|
||||
cd /tmp/$SANDBOX_NAME
|
||||
make SANDBOX_NAME=$SANDBOX_NAME prepare-env
|
||||
|
||||
install_cozystack:
|
||||
name: Install Cozystack
|
||||
runs-on: [self-hosted]
|
||||
needs: prepare_env
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
if: |
|
||||
contains(github.event.pull_request.labels.*.name, 'release') &&
|
||||
github.event.action != 'closed'
|
||||
|
||||
steps:
|
||||
- name: Set sandbox ID
|
||||
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
||||
|
||||
- name: Install Cozystack
|
||||
run: |
|
||||
cd /tmp/$SANDBOX_NAME
|
||||
make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME install-cozystack
|
||||
|
||||
setup_tenant:
|
||||
name: Setup tenant-test
|
||||
runs-on: [self-hosted]
|
||||
needs: install_cozystack
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
if: |
|
||||
contains(github.event.pull_request.labels.*.name, 'release') &&
|
||||
github.event.action != 'closed'
|
||||
|
||||
steps:
|
||||
- name: Set sandbox ID
|
||||
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
||||
|
||||
|
||||
- name: E2E Apps
|
||||
run: |
|
||||
cd /tmp/$SANDBOX_NAME
|
||||
make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME test-apps-tenant
|
||||
|
||||
test_apps:
|
||||
strategy:
|
||||
matrix:
|
||||
app: [clickhouse,kubernetes,mysql,postgres,virtualmachine,vminstance]
|
||||
name: Test ${{ matrix.app }}
|
||||
runs-on: [self-hosted]
|
||||
needs: setup_tenant
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
if: |
|
||||
contains(github.event.pull_request.labels.*.name, 'release') &&
|
||||
github.event.action != 'closed'
|
||||
|
||||
steps:
|
||||
- name: Set sandbox ID
|
||||
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
||||
|
||||
- name: E2E Apps
|
||||
run: |
|
||||
cd /tmp/$SANDBOX_NAME
|
||||
make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME test-apps-${{ matrix.app }}
|
||||
|
||||
cleanup:
|
||||
name: Tear down environment
|
||||
runs-on: [self-hosted]
|
||||
needs: test_apps
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
if: |
|
||||
contains(github.event.pull_request.labels.*.name, 'release') &&
|
||||
github.event.action != 'closed'
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Set sandbox ID
|
||||
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
||||
|
||||
|
||||
- name: Tear down sandbox
|
||||
run: make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME delete
|
||||
|
||||
- name: Remove workspace
|
||||
run: rm -rf /tmp/$SANDBOX_NAME
|
||||
|
||||
- name: Tear down timers
|
||||
run: |
|
||||
sudo systemctl stop "rm-workspace-$SANDBOX_NAME.timer" "rm-workspace-$SANDBOX_NAME.service" 2>/dev/null || true
|
||||
sudo systemctl reset-failed "rm-workspace-$SANDBOX_NAME.timer" "rm-workspace-$SANDBOX_NAME.service" 2>/dev/null || true
|
||||
sudo systemctl stop "teardown-$SANDBOX_NAME.timer" "teardown-$SANDBOX_NAME.service" 2>/dev/null || true
|
||||
sudo systemctl reset-failed "teardown-$SANDBOX_NAME.timer" "teardown-$SANDBOX_NAME.service" 2>/dev/null || true
|
||||
sudo systemctl daemon-reexec
|
||||
|
||||
finalize:
|
||||
name: Finalize Release
|
||||
runs-on: [self-hosted]
|
||||
|
||||
178
.github/workflows/pull-requests.yaml
vendored
178
.github/workflows/pull-requests.yaml
vendored
@@ -4,8 +4,9 @@ on:
|
||||
pull_request:
|
||||
types: [labeled, opened, synchronize, reopened]
|
||||
|
||||
# Cancel in‑flight runs for the same PR when a new push arrives.
|
||||
concurrency:
|
||||
group: pull-requests-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
group: pr-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
@@ -55,35 +56,96 @@ jobs:
|
||||
with:
|
||||
name: talos-image
|
||||
path: _out/assets/nocloud-amd64.raw.xz
|
||||
|
||||
prepare_env:
|
||||
name: Prepare environment
|
||||
runs-on: [self-hosted]
|
||||
needs: build
|
||||
|
||||
# Never run when the PR carries the "release" label.
|
||||
if: |
|
||||
!contains(github.event.pull_request.labels.*.name, 'release')
|
||||
resolve_assets:
|
||||
name: "Resolve assets"
|
||||
runs-on: ubuntu-latest
|
||||
if: contains(github.event.pull_request.labels.*.name, 'release')
|
||||
outputs:
|
||||
installer_id: ${{ steps.fetch_assets.outputs.installer_id }}
|
||||
disk_id: ${{ steps.fetch_assets.outputs.disk_id }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
if: contains(github.event.pull_request.labels.*.name, 'release')
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Download installer
|
||||
uses: actions/download-artifact@v4
|
||||
- name: Extract tag from PR branch (release PR)
|
||||
if: contains(github.event.pull_request.labels.*.name, 'release')
|
||||
id: get_tag
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
name: cozystack-installer
|
||||
path: _out/assets/
|
||||
script: |
|
||||
const branch = context.payload.pull_request.head.ref;
|
||||
const m = branch.match(/^release-(\d+\.\d+\.\d+(?:[-\w\.]+)?)$/);
|
||||
if (!m) {
|
||||
core.setFailed(`❌ Branch '${branch}' does not match 'release-X.Y.Z[-suffix]'`);
|
||||
return;
|
||||
}
|
||||
core.setOutput('tag', `v${m[1]}`);
|
||||
|
||||
- name: Download Talos image
|
||||
- name: Find draft release & asset IDs (release PR)
|
||||
if: contains(github.event.pull_request.labels.*.name, 'release')
|
||||
id: fetch_assets
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.GH_PAT }}
|
||||
script: |
|
||||
const tag = '${{ steps.get_tag.outputs.tag }}';
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
per_page: 100
|
||||
});
|
||||
const draft = releases.data.find(r => r.tag_name === tag && r.draft);
|
||||
if (!draft) {
|
||||
core.setFailed(`Draft release '${tag}' not found`);
|
||||
return;
|
||||
}
|
||||
const find = (n) => draft.assets.find(a => a.name === n)?.id;
|
||||
const installerId = find('cozystack-installer.yaml');
|
||||
const diskId = find('nocloud-amd64.raw.xz');
|
||||
if (!installerId || !diskId) {
|
||||
core.setFailed('Required assets missing in draft release');
|
||||
return;
|
||||
}
|
||||
core.setOutput('installer_id', installerId);
|
||||
core.setOutput('disk_id', diskId);
|
||||
|
||||
|
||||
prepare_env:
|
||||
name: "Prepare environment"
|
||||
runs-on: [self-hosted]
|
||||
permissions:
|
||||
contents: read
|
||||
packages: read
|
||||
needs: ["build", "resolve_assets"]
|
||||
if: ${{ always() && (needs.build.result == 'success' || needs.resolve_assets.result == 'success') }}
|
||||
|
||||
steps:
|
||||
# ▸ Regular PR path – download artefacts produced by the *build* job
|
||||
- name: "Download Talos image (regular PR)"
|
||||
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: talos-image
|
||||
path: _out/assets/
|
||||
path: _out/assets
|
||||
|
||||
|
||||
# ▸ Release PR path – fetch artefacts from the corresponding draft release
|
||||
- name: Download assets from draft release (release PR)
|
||||
if: contains(github.event.pull_request.labels.*.name, 'release')
|
||||
run: |
|
||||
curl -sSL -H "Authorization: token ${GH_PAT}" -H "Accept: application/octet-stream" \
|
||||
-o _out/assets/nocloud-amd64.raw.xz \
|
||||
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/assets/${{ needs.resolve_assets.outputs.disk_id }}"
|
||||
env:
|
||||
GH_PAT: ${{ secrets.GH_PAT }}
|
||||
|
||||
# ▸ Start actual job steps
|
||||
- name: Set sandbox ID
|
||||
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
||||
|
||||
@@ -100,69 +162,72 @@ jobs:
|
||||
--unit=rm-workspace-$SANDBOX_NAME \
|
||||
rm -rf /tmp/$SANDBOX_NAME
|
||||
|
||||
- name: Delete sandbox in 24h
|
||||
run: |
|
||||
sudo systemctl stop "teardown-$SANDBOX_NAME.timer" "teardown-$SANDBOX_NAME.service" 2>/dev/null || true
|
||||
sudo systemctl reset-failed "teardown-$SANDBOX_NAME.timer" "teardown-$SANDBOX_NAME.service" 2>/dev/null || true
|
||||
sudo systemctl daemon-reexec
|
||||
sudo systemd-run \
|
||||
--on-calendar="$(date -d 'now + 24 hours' '+%Y-%m-%d %H:%M:%S')" \
|
||||
--unit=teardown-$SANDBOX_NAME \
|
||||
/usr/bin/docker rm -f $SANDBOX_NAME
|
||||
|
||||
- name: Prepare environment
|
||||
run: |
|
||||
cd /tmp/$SANDBOX_NAME
|
||||
make SANDBOX_NAME=$SANDBOX_NAME prepare-env
|
||||
|
||||
install_cozystack:
|
||||
name: Install Cozystack
|
||||
name: "Install Cozystack"
|
||||
runs-on: [self-hosted]
|
||||
needs: prepare_env
|
||||
|
||||
# Never run when the PR carries the "release" label.
|
||||
if: |
|
||||
!contains(github.event.pull_request.labels.*.name, 'release')
|
||||
permissions:
|
||||
contents: read
|
||||
packages: read
|
||||
needs: ["prepare_env", "resolve_assets"]
|
||||
if: ${{ always() && needs.prepare_env.result == 'success' }}
|
||||
|
||||
steps:
|
||||
- name: Prepare _out/assets directory
|
||||
run: mkdir -p _out/assets
|
||||
|
||||
# ▸ Regular PR path – download artefacts produced by the *build* job
|
||||
- name: "Download installer (regular PR)"
|
||||
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: cozystack-installer
|
||||
path: _out/assets
|
||||
|
||||
# ▸ Release PR path – fetch artefacts from the corresponding draft release
|
||||
- name: Download assets from draft release (release PR)
|
||||
if: contains(github.event.pull_request.labels.*.name, 'release')
|
||||
run: |
|
||||
curl -sSL -H "Authorization: token ${GH_PAT}" -H "Accept: application/octet-stream" \
|
||||
-o _out/assets/cozystack-installer.yaml \
|
||||
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/assets/${{ needs.resolve_assets.outputs.installer_id }}"
|
||||
env:
|
||||
GH_PAT: ${{ secrets.GH_PAT }}
|
||||
|
||||
# ▸ Start actual job steps
|
||||
- name: Set sandbox ID
|
||||
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
||||
|
||||
- name: Install Cozystack
|
||||
- name: Install Cozystack into sandbox
|
||||
run: |
|
||||
cd /tmp/$SANDBOX_NAME
|
||||
make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME install-cozystack
|
||||
|
||||
setup_tenant:
|
||||
name: Setup tenant-test
|
||||
runs-on: [self-hosted]
|
||||
needs: install_cozystack
|
||||
|
||||
# Never run when the PR carries the "release" label.
|
||||
if: |
|
||||
!contains(github.event.pull_request.labels.*.name, 'release')
|
||||
detect_test_matrix:
|
||||
name: "Detect e2e test matrix"
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
matrix: ${{ steps.set.outputs.matrix }}
|
||||
|
||||
steps:
|
||||
- name: Set sandbox ID
|
||||
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
||||
|
||||
- name: E2E Apps
|
||||
- uses: actions/checkout@v4
|
||||
- id: set
|
||||
run: |
|
||||
cd /tmp/$SANDBOX_NAME
|
||||
make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME test-apps-tenant
|
||||
|
||||
apps=$(find hack/e2e-apps -maxdepth 1 -mindepth 1 -name '*.bats' | \
|
||||
awk -F/ '{sub(/\..+/, "", $NF); print $NF}' | jq -R . | jq -cs .)
|
||||
echo "matrix={\"app\":$apps}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
test_apps:
|
||||
strategy:
|
||||
matrix:
|
||||
app: [clickhouse,kafka,kubernetes,mysql,postgres,redis,virtualmachine,vminstance]
|
||||
matrix: ${{ fromJson(needs.detect_test_matrix.outputs.matrix) }}
|
||||
name: Test ${{ matrix.app }}
|
||||
runs-on: [self-hosted]
|
||||
needs: setup_tenant
|
||||
|
||||
# Never run when the PR carries the "release" label.
|
||||
if: |
|
||||
!contains(github.event.pull_request.labels.*.name, 'release')
|
||||
needs: [install_cozystack,detect_test_matrix]
|
||||
if: ${{ always() && (needs.install_cozystack.result == 'success' && needs.detect_test_matrix.result == 'success') }}
|
||||
|
||||
steps:
|
||||
- name: Set sandbox ID
|
||||
@@ -177,10 +242,7 @@ jobs:
|
||||
name: Tear down environment
|
||||
runs-on: [self-hosted]
|
||||
needs: test_apps
|
||||
|
||||
# Never run when the PR carries the "release" label.
|
||||
if: |
|
||||
!contains(github.event.pull_request.labels.*.name, 'release')
|
||||
if: ${{ always() && needs.test_apps.result == 'success' }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
|
||||
@@ -1,353 +0,0 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Cozystack end‑to‑end provisioning test (Bats)
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
@test "Create tenant with isolated mode enabled" {
|
||||
kubectl -n tenant-root get tenants.apps.cozystack.io test ||
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: test
|
||||
namespace: tenant-root
|
||||
spec:
|
||||
etcd: false
|
||||
host: ""
|
||||
ingress: false
|
||||
isolated: true
|
||||
monitoring: false
|
||||
resourceQuotas: {}
|
||||
seaweedfs: false
|
||||
EOF
|
||||
kubectl wait hr/tenant-test -n tenant-root --timeout=1m --for=condition=ready
|
||||
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
|
||||
}
|
||||
|
||||
@test "Create a tenant Kubernetes control plane" {
|
||||
kubectl -n tenant-test get kuberneteses.apps.cozystack.io test ||
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Kubernetes
|
||||
metadata:
|
||||
name: test
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
addons:
|
||||
certManager:
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
cilium:
|
||||
valuesOverride: {}
|
||||
fluxcd:
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
gatewayAPI:
|
||||
enabled: false
|
||||
gpuOperator:
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
ingressNginx:
|
||||
enabled: true
|
||||
hosts: []
|
||||
valuesOverride: {}
|
||||
monitoringAgents:
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
verticalPodAutoscaler:
|
||||
valuesOverride: {}
|
||||
controlPlane:
|
||||
apiServer:
|
||||
resources: {}
|
||||
resourcesPreset: small
|
||||
controllerManager:
|
||||
resources: {}
|
||||
resourcesPreset: micro
|
||||
konnectivity:
|
||||
server:
|
||||
resources: {}
|
||||
resourcesPreset: micro
|
||||
replicas: 2
|
||||
scheduler:
|
||||
resources: {}
|
||||
resourcesPreset: micro
|
||||
host: ""
|
||||
nodeGroups:
|
||||
md0:
|
||||
ephemeralStorage: 20Gi
|
||||
gpus: []
|
||||
instanceType: u1.medium
|
||||
maxReplicas: 10
|
||||
minReplicas: 0
|
||||
resources:
|
||||
cpu: ""
|
||||
memory: ""
|
||||
roles:
|
||||
- ingress-nginx
|
||||
storageClass: replicated
|
||||
EOF
|
||||
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
|
||||
timeout 10 sh -ec 'until kubectl get kamajicontrolplane -n tenant-test kubernetes-test; do sleep 1; done'
|
||||
kubectl wait --for=condition=TenantControlPlaneCreated kamajicontrolplane -n tenant-test kubernetes-test --timeout=4m
|
||||
kubectl wait tcp -n tenant-test kubernetes-test --timeout=2m --for=jsonpath='{.status.kubernetesResources.version.status}'=Ready
|
||||
kubectl wait deploy --timeout=4m --for=condition=available -n tenant-test kubernetes-test kubernetes-test-cluster-autoscaler kubernetes-test-kccm kubernetes-test-kcsi-controller
|
||||
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=1m --for=jsonpath='{.status.replicas}'=2
|
||||
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=10m --for=jsonpath='{.status.v1beta2.readyReplicas}'=2
|
||||
kubectl -n tenant-test delete kuberneteses.apps.cozystack.io test
|
||||
}
|
||||
|
||||
@test "Create a VM Disk" {
|
||||
name='test'
|
||||
kubectl -n tenant-test get vmdisks.apps.cozystack.io $name ||
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: VMDisk
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
source:
|
||||
http:
|
||||
url: https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img
|
||||
optical: false
|
||||
storage: 5Gi
|
||||
storageClass: replicated
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr vm-disk-$name --timeout=5s --for=condition=ready
|
||||
kubectl -n tenant-test wait dv vm-disk-$name --timeout=150s --for=condition=ready
|
||||
kubectl -n tenant-test wait pvc vm-disk-$name --timeout=100s --for=jsonpath='{.status.phase}'=Bound
|
||||
}
|
||||
|
||||
@test "Create a VM Instance" {
|
||||
diskName='test'
|
||||
name='test'
|
||||
kubectl -n tenant-test get vminstances.apps.cozystack.io $name ||
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: VMInstance
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
external: false
|
||||
externalMethod: PortList
|
||||
externalPorts:
|
||||
- 22
|
||||
running: true
|
||||
instanceType: "u1.medium"
|
||||
instanceProfile: ubuntu
|
||||
disks:
|
||||
- name: $diskName
|
||||
gpus: []
|
||||
resources:
|
||||
cpu: ""
|
||||
memory: ""
|
||||
sshKeys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF
|
||||
test@test
|
||||
cloudInit: |
|
||||
#cloud-config
|
||||
users:
|
||||
- name: test
|
||||
shell: /bin/bash
|
||||
sudo: ['ALL=(ALL) NOPASSWD: ALL']
|
||||
groups: sudo
|
||||
ssh_authorized_keys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF test@test
|
||||
cloudInitSeed: ""
|
||||
EOF
|
||||
sleep 5
|
||||
timeout 20 sh -ec "until kubectl -n tenant-test get vmi vm-instance-$name -o jsonpath='{.status.interfaces[0].ipAddress}' | grep -q '[0-9]'; do sleep 5; done"
|
||||
kubectl -n tenant-test wait hr vm-instance-$name --timeout=5s --for=condition=ready
|
||||
kubectl -n tenant-test wait vm vm-instance-$name --timeout=20s --for=condition=ready
|
||||
kubectl -n tenant-test delete vminstances.apps.cozystack.io $name
|
||||
kubectl -n tenant-test delete vmdisks.apps.cozystack.io $diskName
|
||||
}
|
||||
|
||||
@test "Create a Virtual Machine" {
|
||||
name='test'
|
||||
kubectl -n tenant-test get virtualmachines.apps.cozystack.io $name ||
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: VirtualMachine
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
external: false
|
||||
externalMethod: PortList
|
||||
externalPorts:
|
||||
- 22
|
||||
instanceType: "u1.medium"
|
||||
instanceProfile: ubuntu
|
||||
systemDisk:
|
||||
image: ubuntu
|
||||
storage: 5Gi
|
||||
storageClass: replicated
|
||||
gpus: []
|
||||
resources:
|
||||
cpu: ""
|
||||
memory: ""
|
||||
sshKeys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF
|
||||
test@test
|
||||
cloudInit: |
|
||||
#cloud-config
|
||||
users:
|
||||
- name: test
|
||||
shell: /bin/bash
|
||||
sudo: ['ALL=(ALL) NOPASSWD: ALL']
|
||||
groups: sudo
|
||||
ssh_authorized_keys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF test@test
|
||||
cloudInitSeed: ""
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr virtual-machine-$name --timeout=10s --for=condition=ready
|
||||
kubectl -n tenant-test wait dv virtual-machine-$name --timeout=150s --for=condition=ready
|
||||
kubectl -n tenant-test wait pvc virtual-machine-$name --timeout=100s --for=jsonpath='{.status.phase}'=Bound
|
||||
kubectl -n tenant-test wait vm virtual-machine-$name --timeout=100s --for=condition=ready
|
||||
timeout 120 sh -ec "until kubectl -n tenant-test get vmi virtual-machine-$name -o jsonpath='{.status.interfaces[0].ipAddress}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test delete virtualmachines.apps.cozystack.io $name
|
||||
}
|
||||
|
||||
@test "Create DB PostgreSQL" {
|
||||
name='test'
|
||||
kubectl -n tenant-test get postgreses.apps.cozystack.io $name ||
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Postgres
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
external: false
|
||||
size: 10Gi
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
postgresql:
|
||||
parameters:
|
||||
max_connections: 100
|
||||
quorum:
|
||||
minSyncReplicas: 0
|
||||
maxSyncReplicas: 0
|
||||
users:
|
||||
testuser:
|
||||
password: xai7Wepo
|
||||
databases:
|
||||
testdb:
|
||||
roles:
|
||||
admin:
|
||||
- testuser
|
||||
backup:
|
||||
enabled: false
|
||||
s3Region: us-east-1
|
||||
s3Bucket: s3.example.org/postgres-backups
|
||||
schedule: "0 2 * * *"
|
||||
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr postgres-$name --timeout=100s --for=condition=ready
|
||||
kubectl -n tenant-test wait job.batch postgres-$name-init-job --timeout=50s --for=condition=Complete
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-r -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-ro -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-rw -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
||||
timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-r -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-ro -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-rw -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test delete postgreses.apps.cozystack.io $name
|
||||
}
|
||||
|
||||
@test "Create DB MySQL" {
|
||||
name='test'
|
||||
kubectl -n tenant-test get mysqls.apps.cozystack.io $name ||
|
||||
kubectl create -f- <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: MySQL
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
external: false
|
||||
size: 10Gi
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
users:
|
||||
testuser:
|
||||
maxUserConnections: 1000
|
||||
password: xai7Wepo
|
||||
databases:
|
||||
testdb:
|
||||
roles:
|
||||
admin:
|
||||
- testuser
|
||||
backup:
|
||||
enabled: false
|
||||
s3Region: us-east-1
|
||||
s3Bucket: s3.example.org/postgres-backups
|
||||
schedule: "0 2 * * *"
|
||||
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr mysql-$name --timeout=30s --for=condition=ready
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get svc mysql-$name -o jsonpath='{.spec.ports[0].port}' | grep -q '3306'; do sleep 10; done"
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints mysql-$name -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test wait statefulset.apps/mysql-$name --timeout=110s --for=jsonpath='{.status.replicas}'=2
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get svc mysql-$name-metrics -o jsonpath='{.spec.ports[0].port}' | grep -q '9104'; do sleep 10; done"
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get endpoints mysql-$name-metrics -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test wait deployment.apps/mysql-$name-metrics --timeout=90s --for=jsonpath='{.status.replicas}'=1
|
||||
kubectl -n tenant-test delete mysqls.apps.cozystack.io $name
|
||||
}
|
||||
|
||||
@test "Create DB ClickHouse" {
|
||||
name='test'
|
||||
kubectl -n tenant-test get clickhouses.apps.cozystack.io $name ||
|
||||
kubectl create -f- <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: ClickHouse
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
size: 10Gi
|
||||
logStorageSize: 2Gi
|
||||
shards: 1
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
logTTL: 15
|
||||
users:
|
||||
testuser:
|
||||
password: xai7Wepo
|
||||
backup:
|
||||
enabled: false
|
||||
s3Region: us-east-1
|
||||
s3Bucket: s3.example.org/clickhouse-backups
|
||||
schedule: "0 2 * * *"
|
||||
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr clickhouse-$name --timeout=20s --for=condition=ready
|
||||
timeout 180 sh -ec "until kubectl -n tenant-test get svc chendpoint-clickhouse-$name -o jsonpath='{.spec.ports[*].port}' | grep -q '8123 9000'; do sleep 10; done"
|
||||
kubectl -n tenant-test wait statefulset.apps/chi-clickhouse-$name-clickhouse-0-0 --timeout=120s --for=jsonpath='{.status.replicas}'=1
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints chi-clickhouse-$name-clickhouse-0-0 -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
timeout 100 sh -ec "until kubectl -n tenant-test get svc chi-clickhouse-$name-clickhouse-0-0 -o jsonpath='{.spec.ports[*].port}' | grep -q '9000 8123 9009'; do sleep 10; done"
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get sts chi-clickhouse-$name-clickhouse-0-1 ; do sleep 10; done"
|
||||
kubectl -n tenant-test wait statefulset.apps/chi-clickhouse-$name-clickhouse-0-1 --timeout=140s --for=jsonpath='{.status.replicas}'=1
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
@test "Create tenant with isolated mode enabled" {
|
||||
kubectl -n tenant-root get tenants.apps.cozystack.io test ||
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: test
|
||||
namespace: tenant-root
|
||||
spec:
|
||||
etcd: false
|
||||
host: ""
|
||||
ingress: false
|
||||
isolated: true
|
||||
monitoring: false
|
||||
resourceQuotas: {}
|
||||
seaweedfs: false
|
||||
EOF
|
||||
kubectl wait hr/tenant-test -n tenant-root --timeout=1m --for=condition=ready
|
||||
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
|
||||
}
|
||||
@@ -1,391 +0,0 @@
|
||||
#!/usr/bin/env bats
|
||||
# -----------------------------------------------------------------------------
|
||||
# Cozystack end‑to‑end provisioning test (Bats)
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
@test "Required installer assets exist" {
|
||||
if [ ! -f _out/assets/cozystack-installer.yaml ]; then
|
||||
echo "Missing: _out/assets/cozystack-installer.yaml" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f _out/assets/nocloud-amd64.raw.xz ]; then
|
||||
echo "Missing: _out/assets/nocloud-amd64.raw.xz" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
@test "IPv4 forwarding is enabled" {
|
||||
if [ "$(cat /proc/sys/net/ipv4/ip_forward)" != 1 ]; then
|
||||
echo "IPv4 forwarding is disabled!" >&2
|
||||
echo >&2
|
||||
echo "Enable it with:" >&2
|
||||
echo " echo 1 > /proc/sys/net/ipv4/ip_forward" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
@test "Clean previous VMs" {
|
||||
kill $(cat srv1/qemu.pid srv2/qemu.pid srv3/qemu.pid 2>/dev/null) 2>/dev/null || true
|
||||
rm -rf srv1 srv2 srv3
|
||||
}
|
||||
|
||||
@test "Prepare networking and masquerading" {
|
||||
ip link del cozy-br0 2>/dev/null || true
|
||||
ip link add cozy-br0 type bridge
|
||||
ip link set cozy-br0 up
|
||||
ip address add 192.168.123.1/24 dev cozy-br0
|
||||
|
||||
# Masquerading rule – idempotent (delete first, then add)
|
||||
iptables -t nat -D POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE 2>/dev/null || true
|
||||
iptables -t nat -A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
|
||||
}
|
||||
|
||||
@test "Prepare cloud‑init drive for VMs" {
|
||||
mkdir -p srv1 srv2 srv3
|
||||
|
||||
# Generate cloud‑init ISOs
|
||||
for i in 1 2 3; do
|
||||
echo "hostname: srv${i}" > "srv${i}/meta-data"
|
||||
|
||||
cat > "srv${i}/user-data" <<'EOF'
|
||||
#cloud-config
|
||||
EOF
|
||||
|
||||
cat > "srv${i}/network-config" <<EOF
|
||||
version: 2
|
||||
ethernets:
|
||||
eth0:
|
||||
dhcp4: false
|
||||
addresses:
|
||||
- "192.168.123.1${i}/26"
|
||||
gateway4: "192.168.123.1"
|
||||
nameservers:
|
||||
search: [cluster.local]
|
||||
addresses: [8.8.8.8]
|
||||
EOF
|
||||
|
||||
( cd "srv${i}" && genisoimage \
|
||||
-output seed.img \
|
||||
-volid cidata -rational-rock -joliet \
|
||||
user-data meta-data network-config )
|
||||
done
|
||||
}
|
||||
|
||||
@test "Use Talos NoCloud image from assets" {
|
||||
if [ ! -f _out/assets/nocloud-amd64.raw.xz ]; then
|
||||
echo "Missing _out/assets/nocloud-amd64.raw.xz" 2>&1
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm -f nocloud-amd64.raw
|
||||
cp _out/assets/nocloud-amd64.raw.xz .
|
||||
xz --decompress nocloud-amd64.raw.xz
|
||||
}
|
||||
|
||||
@test "Prepare VM disks" {
|
||||
for i in 1 2 3; do
|
||||
cp nocloud-amd64.raw srv${i}/system.img
|
||||
qemu-img resize srv${i}/system.img 50G
|
||||
qemu-img create srv${i}/data.img 200G
|
||||
done
|
||||
}
|
||||
|
||||
@test "Create tap devices" {
|
||||
for i in 1 2 3; do
|
||||
ip link del cozy-srv${i} 2>/dev/null || true
|
||||
ip tuntap add dev cozy-srv${i} mode tap
|
||||
ip link set cozy-srv${i} up
|
||||
ip link set cozy-srv${i} master cozy-br0
|
||||
done
|
||||
}
|
||||
|
||||
@test "Boot QEMU VMs" {
|
||||
for i in 1 2 3; do
|
||||
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 8 -m 24576 \
|
||||
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5${i} \
|
||||
-netdev tap,id=net0,ifname=cozy-srv${i},script=no,downscript=no \
|
||||
-drive file=srv${i}/system.img,if=virtio,format=raw \
|
||||
-drive file=srv${i}/seed.img,if=virtio,format=raw \
|
||||
-drive file=srv${i}/data.img,if=virtio,format=raw \
|
||||
-display none -daemonize -pidfile srv${i}/qemu.pid
|
||||
done
|
||||
|
||||
# Give qemu a few seconds to start up networking
|
||||
sleep 5
|
||||
}
|
||||
|
||||
@test "Wait until Talos API port 50000 is reachable on all machines" {
|
||||
timeout 60 sh -ec 'until nc -nz 192.168.123.11 50000 && nc -nz 192.168.123.12 50000 && nc -nz 192.168.123.13 50000; do sleep 1; done'
|
||||
}
|
||||
|
||||
@test "Generate Talos cluster configuration" {
|
||||
# Cluster‑wide patches
|
||||
cat > patch.yaml <<'EOF'
|
||||
machine:
|
||||
kubelet:
|
||||
nodeIP:
|
||||
validSubnets:
|
||||
- 192.168.123.0/24
|
||||
extraConfig:
|
||||
maxPods: 512
|
||||
kernel:
|
||||
modules:
|
||||
- name: openvswitch
|
||||
- name: drbd
|
||||
parameters:
|
||||
- usermode_helper=disabled
|
||||
- name: zfs
|
||||
- name: spl
|
||||
registries:
|
||||
mirrors:
|
||||
docker.io:
|
||||
endpoints:
|
||||
- https://mirror.gcr.io
|
||||
files:
|
||||
- content: |
|
||||
[plugins]
|
||||
[plugins."io.containerd.cri.v1.runtime"]
|
||||
device_ownership_from_security_context = true
|
||||
path: /etc/cri/conf.d/20-customization.part
|
||||
op: create
|
||||
|
||||
cluster:
|
||||
apiServer:
|
||||
extraArgs:
|
||||
oidc-issuer-url: "https://keycloak.example.org/realms/cozy"
|
||||
oidc-client-id: "kubernetes"
|
||||
oidc-username-claim: "preferred_username"
|
||||
oidc-groups-claim: "groups"
|
||||
network:
|
||||
cni:
|
||||
name: none
|
||||
dnsDomain: cozy.local
|
||||
podSubnets:
|
||||
- 10.244.0.0/16
|
||||
serviceSubnets:
|
||||
- 10.96.0.0/16
|
||||
EOF
|
||||
|
||||
# Control‑plane‑only patches
|
||||
cat > patch-controlplane.yaml <<'EOF'
|
||||
machine:
|
||||
nodeLabels:
|
||||
node.kubernetes.io/exclude-from-external-load-balancers:
|
||||
$patch: delete
|
||||
network:
|
||||
interfaces:
|
||||
- interface: eth0
|
||||
vip:
|
||||
ip: 192.168.123.10
|
||||
cluster:
|
||||
allowSchedulingOnControlPlanes: true
|
||||
controllerManager:
|
||||
extraArgs:
|
||||
bind-address: 0.0.0.0
|
||||
scheduler:
|
||||
extraArgs:
|
||||
bind-address: 0.0.0.0
|
||||
apiServer:
|
||||
certSANs:
|
||||
- 127.0.0.1
|
||||
proxy:
|
||||
disabled: true
|
||||
discovery:
|
||||
enabled: false
|
||||
etcd:
|
||||
advertisedSubnets:
|
||||
- 192.168.123.0/24
|
||||
EOF
|
||||
|
||||
# Generate secrets once
|
||||
if [ ! -f secrets.yaml ]; then
|
||||
talosctl gen secrets
|
||||
fi
|
||||
|
||||
rm -f controlplane.yaml worker.yaml talosconfig kubeconfig
|
||||
talosctl gen config --with-secrets secrets.yaml cozystack https://192.168.123.10:6443 \
|
||||
--config-patch=@patch.yaml --config-patch-control-plane @patch-controlplane.yaml
|
||||
}
|
||||
|
||||
@test "Apply Talos configuration to the node" {
|
||||
# Apply the configuration to all three nodes
|
||||
for node in 11 12 13; do
|
||||
talosctl apply -f controlplane.yaml -n 192.168.123.${node} -e 192.168.123.${node} -i
|
||||
done
|
||||
|
||||
# Wait for Talos services to come up again
|
||||
timeout 60 sh -ec 'until nc -nz 192.168.123.11 50000 && nc -nz 192.168.123.12 50000 && nc -nz 192.168.123.13 50000; do sleep 1; done'
|
||||
}
|
||||
|
||||
@test "Bootstrap Talos cluster" {
|
||||
# Bootstrap etcd on the first node
|
||||
timeout 10 sh -ec 'until talosctl bootstrap -n 192.168.123.11 -e 192.168.123.11; do sleep 1; done'
|
||||
|
||||
# Wait until etcd is healthy
|
||||
timeout 180 sh -ec 'until talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 >/dev/null 2>&1; do sleep 1; done'
|
||||
timeout 60 sh -ec 'while talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1 | grep -q "rpc error"; do sleep 1; done'
|
||||
|
||||
# Retrieve kubeconfig
|
||||
rm -f kubeconfig
|
||||
talosctl kubeconfig kubeconfig -e 192.168.123.10 -n 192.168.123.10
|
||||
|
||||
# Wait until all three nodes register in Kubernetes
|
||||
timeout 60 sh -ec 'until [ $(kubectl get node --no-headers | wc -l) -eq 3 ]; do sleep 1; done'
|
||||
}
|
||||
|
||||
@test "Install Cozystack" {
|
||||
# Create namespace & configmap required by installer
|
||||
kubectl create namespace cozy-system --dry-run=client -o yaml | kubectl apply -f -
|
||||
kubectl create configmap cozystack -n cozy-system \
|
||||
--from-literal=bundle-name=paas-full \
|
||||
--from-literal=ipv4-pod-cidr=10.244.0.0/16 \
|
||||
--from-literal=ipv4-pod-gateway=10.244.0.1 \
|
||||
--from-literal=ipv4-svc-cidr=10.96.0.0/16 \
|
||||
--from-literal=ipv4-join-cidr=100.64.0.0/16 \
|
||||
--from-literal=root-host=example.org \
|
||||
--from-literal=api-server-endpoint=https://192.168.123.10:6443 \
|
||||
--dry-run=client -o yaml | kubectl apply -f -
|
||||
|
||||
# Apply installer manifests from file
|
||||
kubectl apply -f _out/assets/cozystack-installer.yaml
|
||||
|
||||
# Wait for the installer deployment to become available
|
||||
kubectl wait deployment/cozystack -n cozy-system --timeout=1m --for=condition=Available
|
||||
|
||||
# Wait until HelmReleases appear & reconcile them
|
||||
timeout 60 sh -ec 'until kubectl get hr -A | grep -q cozys; do sleep 1; done'
|
||||
sleep 5
|
||||
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n "$1" hr/"$2" &"} END {print "wait"}' | sh -ex
|
||||
|
||||
# Fail the test if any HelmRelease is not Ready
|
||||
if kubectl get hr -A | grep -v " True " | grep -v NAME; then
|
||||
kubectl get hr -A
|
||||
fail "Some HelmReleases failed to reconcile"
|
||||
fi
|
||||
}
|
||||
|
||||
@test "Wait for Cluster‑API provider deployments" {
|
||||
# Wait for Cluster‑API provider deployments
|
||||
timeout 60 sh -ec 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager >/dev/null 2>&1; do sleep 1; done'
|
||||
kubectl wait deployment/capi-controller-manager deployment/capi-kamaji-controller-manager deployment/capi-kubeadm-bootstrap-controller-manager deployment/capi-operator-cluster-api-operator deployment/capk-controller-manager -n cozy-cluster-api --timeout=1m --for=condition=available
|
||||
}
|
||||
|
||||
@test "Wait for LINSTOR and configure storage" {
|
||||
# Linstor controller and nodes
|
||||
kubectl wait deployment/linstor-controller -n cozy-linstor --timeout=5m --for=condition=available
|
||||
timeout 60 sh -ec 'until [ $(kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor node list | grep -c Online) -eq 3 ]; do sleep 1; done'
|
||||
|
||||
for node in srv1 srv2 srv3; do
|
||||
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs ${node} /dev/vdc --pool-name data --storage-pool data
|
||||
done
|
||||
|
||||
# Storage classes
|
||||
kubectl apply -f - <<'EOF'
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: local
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
provisioner: linstor.csi.linbit.com
|
||||
parameters:
|
||||
linstor.csi.linbit.com/storagePool: "data"
|
||||
linstor.csi.linbit.com/layerList: "storage"
|
||||
linstor.csi.linbit.com/allowRemoteVolumeAccess: "false"
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
allowVolumeExpansion: true
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: replicated
|
||||
provisioner: linstor.csi.linbit.com
|
||||
parameters:
|
||||
linstor.csi.linbit.com/storagePool: "data"
|
||||
linstor.csi.linbit.com/autoPlace: "3"
|
||||
linstor.csi.linbit.com/layerList: "drbd storage"
|
||||
linstor.csi.linbit.com/allowRemoteVolumeAccess: "true"
|
||||
property.linstor.csi.linbit.com/DrbdOptions/auto-quorum: suspend-io
|
||||
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-no-data-accessible: suspend-io
|
||||
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-suspended-primary-outdated: force-secondary
|
||||
property.linstor.csi.linbit.com/DrbdOptions/Net/rr-conflict: retry-connect
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
allowVolumeExpansion: true
|
||||
EOF
|
||||
}
|
||||
|
||||
@test "Wait for MetalLB and configure address pool" {
|
||||
# MetalLB address pool
|
||||
kubectl apply -f - <<'EOF'
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: L2Advertisement
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-metallb
|
||||
spec:
|
||||
ipAddressPools: [cozystack]
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: IPAddressPool
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-metallb
|
||||
spec:
|
||||
addresses: [192.168.123.200-192.168.123.250]
|
||||
autoAssign: true
|
||||
avoidBuggyIPs: false
|
||||
EOF
|
||||
}
|
||||
|
||||
@test "Check Cozystack API service" {
|
||||
kubectl wait --for=condition=Available apiservices/v1alpha1.apps.cozystack.io --timeout=2m
|
||||
}
|
||||
|
||||
@test "Configure Tenant and wait for applications" {
|
||||
# Patch root tenant and wait for its releases
|
||||
kubectl patch tenants/root -n tenant-root --type merge -p '{"spec":{"host":"example.org","ingress":true,"monitoring":true,"etcd":true,"isolated":true}}'
|
||||
|
||||
timeout 60 sh -ec 'until kubectl get hr -n tenant-root etcd ingress monitoring tenant-root >/dev/null 2>&1; do sleep 1; done'
|
||||
kubectl wait hr/etcd hr/ingress hr/tenant-root -n tenant-root --timeout=2m --for=condition=ready
|
||||
|
||||
if ! kubectl wait hr/monitoring -n tenant-root --timeout=2m --for=condition=ready; then
|
||||
flux reconcile hr monitoring -n tenant-root --force
|
||||
kubectl wait hr/monitoring -n tenant-root --timeout=2m --for=condition=ready
|
||||
fi
|
||||
|
||||
# Expose Cozystack services through ingress
|
||||
kubectl patch configmap/cozystack -n cozy-system --type merge -p '{"data":{"expose-services":"api,dashboard,cdi-uploadproxy,vm-exportproxy,keycloak"}}'
|
||||
|
||||
# NGINX ingress controller
|
||||
timeout 60 sh -ec 'until kubectl get deploy root-ingress-controller -n tenant-root >/dev/null 2>&1; do sleep 1; done'
|
||||
kubectl wait deploy/root-ingress-controller -n tenant-root --timeout=5m --for=condition=available
|
||||
|
||||
# etcd statefulset
|
||||
kubectl wait sts/etcd -n tenant-root --for=jsonpath='{.status.readyReplicas}'=3 --timeout=5m
|
||||
|
||||
# VictoriaMetrics components
|
||||
kubectl wait vmalert/vmalert-shortterm vmalertmanager/alertmanager -n tenant-root --for=jsonpath='{.status.updateStatus}'=operational --timeout=5m
|
||||
kubectl wait vlogs/generic -n tenant-root --for=jsonpath='{.status.updateStatus}'=operational --timeout=5m
|
||||
kubectl wait vmcluster/shortterm vmcluster/longterm -n tenant-root --for=jsonpath='{.status.clusterStatus}'=operational --timeout=5m
|
||||
|
||||
# Grafana
|
||||
kubectl wait clusters.postgresql.cnpg.io/grafana-db -n tenant-root --for=condition=ready --timeout=5m
|
||||
kubectl wait deploy/grafana-deployment -n tenant-root --for=condition=available --timeout=5m
|
||||
|
||||
# Verify Grafana via ingress
|
||||
ingress_ip=$(kubectl get svc root-ingress-controller -n tenant-root -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
if ! curl -sS -k "https://${ingress_ip}" -H 'Host: grafana.example.org' --max-time 30 | grep -q Found; then
|
||||
echo "Failed to access Grafana via ingress at ${ingress_ip}" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
@test "Keycloak OIDC stack is healthy" {
|
||||
kubectl patch configmap/cozystack -n cozy-system --type merge -p '{"data":{"oidc-enabled":"true"}}'
|
||||
|
||||
timeout 120 sh -ec 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator >/dev/null 2>&1; do sleep 1; done'
|
||||
kubectl wait hr/keycloak hr/keycloak-configure hr/keycloak-operator -n cozy-keycloak --timeout=10m --for=condition=ready
|
||||
}
|
||||
@@ -20,9 +20,9 @@
|
||||
kubectl wait deployment/cozystack -n cozy-system --timeout=1m --for=condition=Available
|
||||
|
||||
# Wait until HelmReleases appear & reconcile them
|
||||
timeout 60 sh -ec 'until kubectl get hr -A | grep -q cozys; do sleep 1; done'
|
||||
timeout 60 sh -ec 'until kubectl get hr -A -l cozystack.io/system-app=true | grep -q cozys; do sleep 1; done'
|
||||
sleep 5
|
||||
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n "$1" hr/"$2" &"} END {print "wait"}' | sh -ex
|
||||
kubectl get hr -A -l cozystack.io/system-app=true | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n "$1" hr/"$2" &"} END {print "wait"}' | sh -ex
|
||||
|
||||
# Fail the test if any HelmRelease is not Ready
|
||||
if kubectl get hr -A | grep -v " True " | grep -v NAME; then
|
||||
@@ -42,7 +42,11 @@
|
||||
kubectl wait deployment/linstor-controller -n cozy-linstor --timeout=5m --for=condition=available
|
||||
timeout 60 sh -ec 'until [ $(kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor node list | grep -c Online) -eq 3 ]; do sleep 1; done'
|
||||
|
||||
created_pools=$(kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor sp l -s data --pastable | awk '$2 == "data" {printf " " $4} END{printf " "}')
|
||||
for node in srv1 srv2 srv3; do
|
||||
case $created_pools in
|
||||
*" $node "*) echo "Storage pool 'data' already exists on node $node"; continue;;
|
||||
esac
|
||||
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs ${node} /dev/vdc --pool-name data --storage-pool data
|
||||
done
|
||||
|
||||
@@ -155,3 +159,24 @@ EOF
|
||||
timeout 120 sh -ec 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator >/dev/null 2>&1; do sleep 1; done'
|
||||
kubectl wait hr/keycloak hr/keycloak-configure hr/keycloak-operator -n cozy-keycloak --timeout=10m --for=condition=ready
|
||||
}
|
||||
|
||||
@test "Create tenant with isolated mode enabled" {
|
||||
kubectl -n tenant-root get tenants.apps.cozystack.io test ||
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: test
|
||||
namespace: tenant-root
|
||||
spec:
|
||||
etcd: false
|
||||
host: ""
|
||||
ingress: false
|
||||
isolated: true
|
||||
monitoring: false
|
||||
resourceQuotas: {}
|
||||
seaweedfs: false
|
||||
EOF
|
||||
kubectl wait hr/tenant-test -n tenant-root --timeout=1m --for=condition=ready
|
||||
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
|
||||
}
|
||||
|
||||
@@ -32,22 +32,21 @@ image-e2e-sandbox:
|
||||
|
||||
test: test-cluster test-apps ## Run the end-to-end tests in existing sandbox
|
||||
|
||||
prepare-cluster:
|
||||
docker cp ../../../_out/assets/cozystack-installer.yaml "${SANDBOX_NAME}":/workspace/_out/assets/cozystack-installer.yaml
|
||||
copy-nocloud-image:
|
||||
docker cp ../../../_out/assets/nocloud-amd64.raw.xz "${SANDBOX_NAME}":/workspace/_out/assets/nocloud-amd64.raw.xz
|
||||
|
||||
copy-installer-manifest:
|
||||
docker cp ../../../_out/assets/cozystack-installer.yaml "${SANDBOX_NAME}":/workspace/_out/assets/cozystack-installer.yaml
|
||||
|
||||
prepare-cluster: copy-nocloud-image
|
||||
docker exec "${SANDBOX_NAME}" sh -c 'cd /workspace && hack/cozytest.sh hack/e2e-prepare-cluster.bats'
|
||||
|
||||
install-cozystack:
|
||||
install-cozystack: copy-installer-manifest
|
||||
docker exec "${SANDBOX_NAME}" sh -c 'cd /workspace && hack/cozytest.sh hack/e2e-install-cozystack.bats'
|
||||
|
||||
test-cluster: ## Run the end-to-end for creating a cluster
|
||||
docker cp ../../../_out/assets/cozystack-installer.yaml "${SANDBOX_NAME}":/workspace/_out/assets/cozystack-installer.yaml
|
||||
docker cp ../../../_out/assets/nocloud-amd64.raw.xz "${SANDBOX_NAME}":/workspace/_out/assets/nocloud-amd64.raw.xz
|
||||
test-cluster: copy-nocloud-image copy-installer-manifest ## Run the end-to-end for creating a cluster
|
||||
docker exec "${SANDBOX_NAME}" sh -c 'cd /workspace && hack/cozytest.sh hack/e2e-cluster.bats'
|
||||
|
||||
test-apps: ## Run the end-to-end tests for apps
|
||||
docker exec "${SANDBOX_NAME}" sh -c 'cd /workspace && hack/cozytest.sh hack/e2e-apps.bats'
|
||||
|
||||
test-apps-%:
|
||||
docker exec "${SANDBOX_NAME}" sh -c 'cd /workspace && hack/cozytest.sh hack/e2e-apps/$*.bats'
|
||||
|
||||
|
||||
Reference in New Issue
Block a user