Compare commits

..

1 Commits

Author SHA1 Message Date
Andrei Kvapil
84470a0fa7 [ci] use host buildx config
Signed-off-by: Andrei Kvapil <kvapss@gmail.com>
2025-05-30 17:09:40 +02:00
635 changed files with 54733 additions and 82256 deletions

View File

@@ -1,24 +0,0 @@
<!-- Thank you for making a contribution! Here are some tips for you:
- Start the PR title with the [label] of Cozystack component:
- For system components: [platform], [system], [linstor], [cilium], [kube-ovn], [dashboard], [cluster-api], etc.
- For managed apps: [apps], [tenant], [kubernetes], [postgres], [virtual-machine] etc.
- For development and maintenance: [tests], [ci], [docs], [maintenance].
- If it's a work in progress, consider creating this PR as a draft.
- Don't hesistate to ask for opinion and review in the community chats, even if it's still a draft.
- Add the label `backport` if it's a bugfix that needs to be backported to a previous version.
-->
## What this PR does
### Release note
<!-- Write a release note:
- Explain what has changed internally and for users.
- Start with the same [label] as in the PR title
- Follow the guidelines at https://github.com/kubernetes/community/blob/master/contributors/guide/release-notes.md.
-->
```release-note
[]
```

View File

@@ -2,7 +2,7 @@ name: Pre-Commit Checks
on:
pull_request:
types: [opened, synchronize, reopened]
types: [labeled, opened, synchronize, reopened]
concurrency:
group: pre-commit-${{ github.workflow }}-${{ github.event.pull_request.number }}
@@ -28,7 +28,15 @@ jobs:
- name: Install generate
run: |
curl -sSL https://github.com/cozystack/readme-generator-for-helm/releases/download/v1.0.0/readme-generator-for-helm-linux-amd64.tar.gz | tar -xzvf- -C /usr/local/bin/ readme-generator-for-helm
sudo apt update
sudo apt install curl -y
curl -fsSL https://deb.nodesource.com/setup_16.x | sudo -E bash -
sudo apt install nodejs -y
git clone https://github.com/bitnami/readme-generator-for-helm
cd ./readme-generator-for-helm
npm install
npm install -g pkg
pkg . -o /usr/local/bin/readme-generator
- name: Run pre-commit hooks
run: |

View File

@@ -1,17 +1,100 @@
name: "Releasing PR"
name: Releasing PR
on:
pull_request:
types: [closed]
paths-ignore:
- 'docs/**/*'
types: [labeled, opened, synchronize, reopened, closed]
# Cancel inflight runs for the same PR when a new push arrives.
concurrency:
group: pr-${{ github.workflow }}-${{ github.event.pull_request.number }}
group: pull-requests-release-${{ github.workflow }}-${{ github.event.pull_request.number }}
cancel-in-progress: true
jobs:
verify:
name: Test Release
runs-on: [self-hosted]
permissions:
contents: read
packages: write
if: |
contains(github.event.pull_request.labels.*.name, 'release') &&
github.event.action != 'closed'
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
fetch-tags: true
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
registry: ghcr.io
- name: Extract tag from PR branch
id: get_tag
uses: actions/github-script@v7
with:
script: |
const branch = context.payload.pull_request.head.ref;
const m = branch.match(/^release-(\d+\.\d+\.\d+(?:[-\w\.]+)?)$/);
if (!m) {
core.setFailed(`❌ Branch '${branch}' does not match 'release-X.Y.Z[-suffix]'`);
return;
}
const tag = `v${m[1]}`;
core.setOutput('tag', tag);
- name: Find draft release and get asset IDs
id: fetch_assets
uses: actions/github-script@v7
with:
github-token: ${{ secrets.GH_PAT }}
script: |
const tag = '${{ steps.get_tag.outputs.tag }}';
const releases = await github.rest.repos.listReleases({
owner: context.repo.owner,
repo: context.repo.repo,
per_page: 100
});
const draft = releases.data.find(r => r.tag_name === tag && r.draft);
if (!draft) {
core.setFailed(`Draft release '${tag}' not found`);
return;
}
const findAssetId = (name) =>
draft.assets.find(a => a.name === name)?.id;
const installerId = findAssetId("cozystack-installer.yaml");
const diskId = findAssetId("nocloud-amd64.raw.xz");
if (!installerId || !diskId) {
core.setFailed("Missing required assets");
return;
}
core.setOutput("installer_id", installerId);
core.setOutput("disk_id", diskId);
- name: Download assets from GitHub API
run: |
mkdir -p _out/assets
curl -sSL \
-H "Authorization: token ${GH_PAT}" \
-H "Accept: application/octet-stream" \
-o _out/assets/cozystack-installer.yaml \
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/assets/${{ steps.fetch_assets.outputs.installer_id }}"
curl -sSL \
-H "Authorization: token ${GH_PAT}" \
-H "Accept: application/octet-stream" \
-o _out/assets/nocloud-amd64.raw.xz \
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/assets/${{ steps.fetch_assets.outputs.disk_id }}"
env:
GH_PAT: ${{ secrets.GH_PAT }}
- name: Run tests
run: make test
finalize:
name: Finalize Release
runs-on: [self-hosted]

View File

@@ -2,13 +2,10 @@ name: Pull Request
on:
pull_request:
types: [opened, synchronize, reopened]
paths-ignore:
- 'docs/**/*'
types: [labeled, opened, synchronize, reopened]
# Cancel inflight runs for the same PR when a new push arrives.
concurrency:
group: pr-${{ github.workflow }}-${{ github.event.pull_request.number }}
group: pull-requests-${{ github.workflow }}-${{ github.event.pull_request.number }}
cancel-in-progress: true
jobs:
@@ -30,6 +27,9 @@ jobs:
fetch-depth: 0
fetch-tags: true
- name: Set up Docker config
run: cp -r ~/.docker ${{ runner.temp }}/.docker
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
@@ -46,17 +46,6 @@ jobs:
- name: Build Talos image
run: make -C packages/core/installer talos-nocloud
- name: Save git diff as patch
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
run: git diff HEAD > _out/assets/pr.patch
- name: Upload git diff patch
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
uses: actions/upload-artifact@v4
with:
name: pr-patch
path: _out/assets/pr.patch
- name: Upload installer
uses: actions/upload-artifact@v4
@@ -69,283 +58,28 @@ jobs:
with:
name: talos-image
path: _out/assets/nocloud-amd64.raw.xz
resolve_assets:
name: "Resolve assets"
runs-on: ubuntu-latest
if: contains(github.event.pull_request.labels.*.name, 'release')
outputs:
installer_id: ${{ steps.fetch_assets.outputs.installer_id }}
disk_id: ${{ steps.fetch_assets.outputs.disk_id }}
steps:
- name: Checkout code
if: contains(github.event.pull_request.labels.*.name, 'release')
uses: actions/checkout@v4
with:
fetch-depth: 0
fetch-tags: true
- name: Extract tag from PR branch (release PR)
if: contains(github.event.pull_request.labels.*.name, 'release')
id: get_tag
uses: actions/github-script@v7
with:
script: |
const branch = context.payload.pull_request.head.ref;
const m = branch.match(/^release-(\d+\.\d+\.\d+(?:[-\w\.]+)?)$/);
if (!m) {
core.setFailed(`❌ Branch '${branch}' does not match 'release-X.Y.Z[-suffix]'`);
return;
}
core.setOutput('tag', `v${m[1]}`);
- name: Find draft release & asset IDs (release PR)
if: contains(github.event.pull_request.labels.*.name, 'release')
id: fetch_assets
uses: actions/github-script@v7
with:
github-token: ${{ secrets.GH_PAT }}
script: |
const tag = '${{ steps.get_tag.outputs.tag }}';
const releases = await github.rest.repos.listReleases({
owner: context.repo.owner,
repo: context.repo.repo,
per_page: 100
});
const draft = releases.data.find(r => r.tag_name === tag && r.draft);
if (!draft) {
core.setFailed(`Draft release '${tag}' not found`);
return;
}
const find = (n) => draft.assets.find(a => a.name === n)?.id;
const installerId = find('cozystack-installer.yaml');
const diskId = find('nocloud-amd64.raw.xz');
if (!installerId || !diskId) {
core.setFailed('Required assets missing in draft release');
return;
}
core.setOutput('installer_id', installerId);
core.setOutput('disk_id', diskId);
prepare_env:
name: "Prepare environment"
test:
name: Test
runs-on: [self-hosted]
permissions:
contents: read
packages: read
needs: ["build", "resolve_assets"]
if: ${{ always() && (needs.build.result == 'success' || needs.resolve_assets.result == 'success') }}
needs: build
# Never run when the PR carries the "release" label.
if: |
!contains(github.event.pull_request.labels.*.name, 'release')
steps:
# ▸ Checkout and prepare the codebase
- name: Checkout code
uses: actions/checkout@v4
# ▸ Regular PR path download artefacts produced by the *build* job
- name: "Download Talos image (regular PR)"
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
uses: actions/download-artifact@v4
with:
name: talos-image
path: _out/assets
- name: Download PR patch
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
uses: actions/download-artifact@v4
with:
name: pr-patch
path: _out/assets
- name: Apply patch
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
run: |
git apply _out/assets/pr.patch
# ▸ Release PR path fetch artefacts from the corresponding draft release
- name: Download assets from draft release (release PR)
if: contains(github.event.pull_request.labels.*.name, 'release')
run: |
mkdir -p _out/assets
curl -sSL -H "Authorization: token ${GH_PAT}" -H "Accept: application/octet-stream" \
-o _out/assets/nocloud-amd64.raw.xz \
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/assets/${{ needs.resolve_assets.outputs.disk_id }}"
env:
GH_PAT: ${{ secrets.GH_PAT }}
- name: Set sandbox ID
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
# ▸ Start actual job steps
- name: Prepare workspace
run: |
rm -rf /tmp/$SANDBOX_NAME
cp -r ${{ github.workspace }} /tmp/$SANDBOX_NAME
- name: Prepare environment
run: |
cd /tmp/$SANDBOX_NAME
attempt=0
until make SANDBOX_NAME=$SANDBOX_NAME prepare-env; do
attempt=$((attempt + 1))
if [ $attempt -ge 3 ]; then
echo "❌ Attempt $attempt failed, exiting..."
exit 1
fi
echo "❌ Attempt $attempt failed, retrying..."
done
echo "✅ The task completed successfully after $attempt attempts"
install_cozystack:
name: "Install Cozystack"
runs-on: [self-hosted]
permissions:
contents: read
packages: read
needs: ["prepare_env", "resolve_assets"]
if: ${{ always() && needs.prepare_env.result == 'success' }}
steps:
- name: Prepare _out/assets directory
run: mkdir -p _out/assets
# ▸ Regular PR path download artefacts produced by the *build* job
- name: "Download installer (regular PR)"
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
- name: Download installer
uses: actions/download-artifact@v4
with:
name: cozystack-installer
path: _out/assets
path: _out/assets/
# ▸ Release PR path fetch artefacts from the corresponding draft release
- name: Download assets from draft release (release PR)
if: contains(github.event.pull_request.labels.*.name, 'release')
run: |
mkdir -p _out/assets
curl -sSL -H "Authorization: token ${GH_PAT}" -H "Accept: application/octet-stream" \
-o _out/assets/cozystack-installer.yaml \
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/assets/${{ needs.resolve_assets.outputs.installer_id }}"
env:
GH_PAT: ${{ secrets.GH_PAT }}
# ▸ Start actual job steps
- name: Set sandbox ID
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
- name: Sync _out/assets directory
run: |
mkdir -p /tmp/$SANDBOX_NAME/_out/assets
mv _out/assets/* /tmp/$SANDBOX_NAME/_out/assets/
- name: Install Cozystack into sandbox
run: |
cd /tmp/$SANDBOX_NAME
attempt=0
until make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME install-cozystack; do
attempt=$((attempt + 1))
if [ $attempt -ge 3 ]; then
echo "❌ Attempt $attempt failed, exiting..."
exit 1
fi
echo "❌ Attempt $attempt failed, retrying..."
done
echo "✅ The task completed successfully after $attempt attempts."
detect_test_matrix:
name: "Detect e2e test matrix"
runs-on: ubuntu-latest
outputs:
matrix: ${{ steps.set.outputs.matrix }}
steps:
- uses: actions/checkout@v4
- id: set
run: |
apps=$(find hack/e2e-apps -maxdepth 1 -mindepth 1 -name '*.bats' | \
awk -F/ '{sub(/\..+/, "", $NF); print $NF}' | jq -R . | jq -cs .)
echo "matrix={\"app\":$apps}" >> "$GITHUB_OUTPUT"
test_apps:
strategy:
matrix: ${{ fromJson(needs.detect_test_matrix.outputs.matrix) }}
name: Test ${{ matrix.app }}
runs-on: [self-hosted]
needs: [install_cozystack,detect_test_matrix]
if: ${{ always() && (needs.install_cozystack.result == 'success' && needs.detect_test_matrix.result == 'success') }}
steps:
- name: Set sandbox ID
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
- name: E2E Apps
run: |
cd /tmp/$SANDBOX_NAME
attempt=0
until make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME test-apps-${{ matrix.app }}; do
attempt=$((attempt + 1))
if [ $attempt -ge 3 ]; then
echo "❌ Attempt $attempt failed, exiting..."
exit 1
fi
echo "❌ Attempt $attempt failed, retrying..."
done
echo "✅ The task completed successfully after $attempt attempts"
collect_debug_information:
name: Collect debug information
runs-on: [self-hosted]
needs: [test_apps]
if: ${{ always() }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set sandbox ID
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
- name: Collect report
run: |
cd /tmp/$SANDBOX_NAME
make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME collect-report
- name: Upload cozyreport.tgz
uses: actions/upload-artifact@v4
- name: Download Talos image
uses: actions/download-artifact@v4
with:
name: cozyreport
path: /tmp/${{ env.SANDBOX_NAME }}/_out/cozyreport.tgz
- name: Collect images list
run: |
cd /tmp/$SANDBOX_NAME
make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME collect-images
- name: Upload image list
uses: actions/upload-artifact@v4
with:
name: image-list
path: /tmp/${{ env.SANDBOX_NAME }}/_out/images.txt
cleanup:
name: Tear down environment
runs-on: [self-hosted]
needs: [collect_debug_information]
if: ${{ always() && needs.test_apps.result == 'success' }}
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
fetch-tags: true
- name: Set sandbox ID
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
- name: Tear down sandbox
run: make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME delete
- name: Remove workspace
run: rm -rf /tmp/$SANDBOX_NAME
name: talos-image
path: _out/assets/
- name: Test
run: make test

View File

@@ -112,13 +112,9 @@ jobs:
# Commit built artifacts
- name: Commit release artifacts
if: steps.check_release.outputs.skip == 'false'
env:
GH_PAT: ${{ secrets.GH_PAT }}
run: |
git config user.name "cozystack-bot"
git config user.email "217169706+cozystack-bot@users.noreply.github.com"
git remote set-url origin https://cozystack-bot:${GH_PAT}@github.com/${GITHUB_REPOSITORY}
git config --unset-all http.https://github.com/.extraheader || true
git config user.name "github-actions"
git config user.email "github-actions@github.com"
git add .
git commit -m "Prepare release ${GITHUB_REF#refs/tags/}" -s || echo "No changes to commit"
git push origin HEAD || true
@@ -193,12 +189,7 @@ jobs:
# Create release-X.Y.Z branch and push (force-update)
- name: Create release branch
if: steps.check_release.outputs.skip == 'false'
env:
GH_PAT: ${{ secrets.GH_PAT }}
run: |
git config user.name "cozystack-bot"
git config user.email "217169706+cozystack-bot@users.noreply.github.com"
git remote set-url origin https://cozystack-bot:${GH_PAT}@github.com/${GITHUB_REPOSITORY}
BRANCH="release-${GITHUB_REF#refs/tags/v}"
git branch -f "$BRANCH"
git push -f origin "$BRANCH"
@@ -208,7 +199,6 @@ jobs:
if: steps.check_release.outputs.skip == 'false'
uses: actions/github-script@v7
with:
github-token: ${{ secrets.GH_PAT }}
script: |
const version = context.ref.replace('refs/tags/v', '');
const base = '${{ steps.get_base.outputs.branch }}';

View File

@@ -11,14 +11,14 @@ repos:
- id: run-make-generate
name: Run 'make generate' in all app directories
entry: |
flock -x .git/pre-commit.lock sh -c '
for dir in ./packages/apps/*/ ./packages/extra/*/ ./packages/system/cozystack-api/; do
/bin/bash -c '
for dir in ./packages/apps/*/; do
if [ -d "$dir" ]; then
echo "Running make generate in $dir"
make generate -C "$dir" || exit $?
(cd "$dir" && make generate)
fi
done
git diff --color=always | cat
'
language: system
language: script
files: ^.*$

View File

@@ -9,6 +9,7 @@ build-deps:
build: build-deps
make -C packages/apps/http-cache image
make -C packages/apps/postgres image
make -C packages/apps/mysql image
make -C packages/apps/clickhouse image
make -C packages/apps/kubernetes image
@@ -48,10 +49,6 @@ test:
make -C packages/core/testing apply
make -C packages/core/testing test
prepare-env:
make -C packages/core/testing apply
make -C packages/core/testing prepare-cluster
generate:
hack/update-codegen.sh

View File

@@ -12,15 +12,11 @@
**Cozystack** is a free PaaS platform and framework for building clouds.
Cozystack is a [CNCF Sandbox Level Project](https://www.cncf.io/sandbox-projects/) that was originally built and sponsored by [Ænix](https://aenix.io/).
With Cozystack, you can transform a bunch of servers into an intelligent system with a simple REST API for spawning Kubernetes clusters,
Database-as-a-Service, virtual machines, load balancers, HTTP caching services, and other services with ease.
Use Cozystack to build your own cloud or provide a cost-effective development environment.
![Cozystack user interface](https://cozystack.io/img/screenshot.png)
## Use-Cases
* [**Using Cozystack to build a public cloud**](https://cozystack.io/docs/guides/use-cases/public-cloud/)
@@ -32,6 +28,9 @@ You can use Cozystack as a platform to build a private cloud powered by Infrastr
* [**Using Cozystack as a Kubernetes distribution**](https://cozystack.io/docs/guides/use-cases/kubernetes-distribution/)
You can use Cozystack as a Kubernetes distribution for Bare Metal
## Screenshot
![Cozystack screenshot](https://cozystack.io/img/screenshot.png)
## Documentation
@@ -60,10 +59,7 @@ Commits are used to generate the changelog, and their author will be referenced
If you have **Feature Requests** please use the [Discussion's Feature Request section](https://github.com/cozystack/cozystack/discussions/categories/feature-requests).
## Community
You are welcome to join our [Telegram group](https://t.me/cozystack) and come to our weekly community meetings.
Add them to your [Google Calendar](https://calendar.google.com/calendar?cid=ZTQzZDIxZTVjOWI0NWE5NWYyOGM1ZDY0OWMyY2IxZTFmNDMzZTJlNjUzYjU2ZGJiZGE3NGNhMzA2ZjBkMGY2OEBncm91cC5jYWxlbmRhci5nb29nbGUuY29t) or [iCal](https://calendar.google.com/calendar/ical/e43d21e5c9b45a95f28c5d649c2cb1e1f433e2e653b56dbbda74ca306f0d0f68%40group.calendar.google.com/public/basic.ics) for convenience.
You are welcome to join our weekly community meetings (just add this events to your [Google Calendar](https://calendar.google.com/calendar?cid=ZTQzZDIxZTVjOWI0NWE5NWYyOGM1ZDY0OWMyY2IxZTFmNDMzZTJlNjUzYjU2ZGJiZGE3NGNhMzA2ZjBkMGY2OEBncm91cC5jYWxlbmRhci5nb29nbGUuY29t) or [iCal](https://calendar.google.com/calendar/ical/e43d21e5c9b45a95f28c5d649c2cb1e1f433e2e653b56dbbda74ca306f0d0f68%40group.calendar.google.com/public/basic.ics)) or [Telegram group](https://t.me/cozystack).
## License

View File

@@ -194,15 +194,7 @@ func main() {
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "TenantHelmReconciler")
os.Exit(1)
}
if err = (&controller.CozystackConfigReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "CozystackConfigReconciler")
setupLog.Error(err, "unable to create controller", "controller", "Workload")
os.Exit(1)
}

View File

@@ -1,11 +0,0 @@
## Major Features and Improvements
## Security
## Fixes
## Dependencies
## Documentation
## Development, Testing, and CI/CD

View File

@@ -1,8 +0,0 @@
## Fixes
* [build] Update Talos Linux v1.10.3 and fix assets. (@kvaps in https://github.com/cozystack/cozystack/pull/1006)
* [ci] Fix uploading released artifacts to GitHub. (@kvaps in https://github.com/cozystack/cozystack/pull/1009)
* [ci] Separate build and testing jobs. (@kvaps in https://github.com/cozystack/cozystack/pull/1005)
* [docs] Write a full release post for v0.31.1. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/999)
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.31.0...v0.31.1

View File

@@ -1,12 +0,0 @@
## Security
* Resolve a security problem that allowed a tenant administrator to gain enhanced privileges outside the tenant. (@kvaps in https://github.com/cozystack/cozystack/pull/1062, backported in https://github.com/cozystack/cozystack/pull/1066)
## Fixes
* [platform] Fix dependencies in `distro-full` bundle. (@klinch0 in https://github.com/cozystack/cozystack/pull/1056, backported in https://github.com/cozystack/cozystack/pull/1064)
* [platform] Fix RBAC for annotating namespaces. (@kvaps in https://github.com/cozystack/cozystack/pull/1031, backported in https://github.com/cozystack/cozystack/pull/1037)
* [platform] Reduce system resource consumption by using smaller resource presets for VerticalPodAutoscaler, SeaweedFS, and KubeOVN. (@klinch0 in https://github.com/cozystack/cozystack/pull/1054, backported in https://github.com/cozystack/cozystack/pull/1058)
* [dashboard] Fix a number of issues in the Cozystack Dashboard (@kvaps in https://github.com/cozystack/cozystack/pull/1042, backported in https://github.com/cozystack/cozystack/pull/1066)
* [apps] Specify minimal working resource presets. (@kvaps in https://github.com/cozystack/cozystack/pull/1040, backported in https://github.com/cozystack/cozystack/pull/1041)
* [apps] Update built-in documentation and configuration reference for managed Clickhouse application. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/1059, backported in https://github.com/cozystack/cozystack/pull/1065)

View File

@@ -1,71 +0,0 @@
Cozystack v0.32.0 is a significant release that brings new features, key fixes, and updates to underlying components.
## Major Features and Improvements
* [platform] Use `cozypkg` instead of Helm (@kvaps in https://github.com/cozystack/cozystack/pull/1057)
* [platform] Introduce the HelmRelease reconciler for system components. (@kvaps in https://github.com/cozystack/cozystack/pull/1033)
* [kubernetes] Enable using container registry mirrors by tenant Kubernetes clusters. Configure containerd for tenant Kubernetes clusters. (@klinch0 in https://github.com/cozystack/cozystack/pull/979, patched by @lllamnyp in https://github.com/cozystack/cozystack/pull/1032)
* [platform] Allow users to specify CPU requests in VCPUs. Use a library chart for resource management. (@lllamnyp in https://github.com/cozystack/cozystack/pull/972 and https://github.com/cozystack/cozystack/pull/1025)
* [platform] Annotate all child objects of apps with uniform labels for tracking by WorkloadMonitors. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1018 and https://github.com/cozystack/cozystack/pull/1024)
* [platform] Introduce `cluster-domain` option and un-hardcode `cozy.local`. (@kvaps in https://github.com/cozystack/cozystack/pull/1039)
* [platform] Get instance type when reconciling WorkloadMonitor (https://github.com/cozystack/cozystack/pull/1030)
* [virtual-machine] Add RBAC rules to allow port forwarding in KubeVirt for SSH via `virtctl`. (@mattia-eleuteri in https://github.com/cozystack/cozystack/pull/1027, patched by @klinch0 in https://github.com/cozystack/cozystack/pull/1028)
* [monitoring] Add events and audit inputs (@kevin880202 in https://github.com/cozystack/cozystack/pull/948)
## Security
* Resolve a security problem that allowed tenant administrator to gain enhanced privileges outside the tenant. (@kvaps in https://github.com/cozystack/cozystack/pull/1062)
## Fixes
* [dashboard] Fix a number of issues in the Cozystack Dashboard (@kvaps in https://github.com/cozystack/cozystack/pull/1042)
* [kafka] Specify minimal working resource presets. (@kvaps in https://github.com/cozystack/cozystack/pull/1040)
* [cilium] Fixed Gateway API manifest. (@zdenekjanda in https://github.com/cozystack/cozystack/pull/1016)
* [platform] Fix RBAC for annotating namespaces. (@kvaps in https://github.com/cozystack/cozystack/pull/1031)
* [platform] Fix dependencies for paas-hosted bundle. (@kvaps in https://github.com/cozystack/cozystack/pull/1034)
* [platform] Reduce system resource consumption by using lesser resource presets for VerticalPodAutoscaler, SeaweedFS, and KubeOVN. (@klinch0 in https://github.com/cozystack/cozystack/pull/1054)
* [virtual-machine] Fix handling of cloudinit and ssh-key input for `virtual-machine` and `vm-instance` applications. (@gwynbleidd2106 in https://github.com/cozystack/cozystack/pull/1019 and https://github.com/cozystack/cozystack/pull/1020)
* [apps] Fix Clickhouse version parsing. (@kvaps in https://github.com/cozystack/cozystack/commit/28302e776e9d2bb8f424cf467619fa61d71ac49a)
* [apps] Add resource quotas for PostgreSQL jobs and fix application readme generation check in CI. (@klinch0 in https://github.com/cozystack/cozystack/pull/1051)
* [kube-ovn] Enable database health check. (@kvaps in https://github.com/cozystack/cozystack/pull/1047)
* [kubernetes] Fix upstream issue by updating Kubevirt-CCM. (@kvaps in https://github.com/cozystack/cozystack/pull/1052)
* [kubernetes] Fix resources and introduce a migration when upgrading tenant Kubernetes to v0.32.4. (@kvaps in https://github.com/cozystack/cozystack/pull/1073)
* [cluster-api] Add a missing migration for `capi-providers`. (@kvaps in https://github.com/cozystack/cozystack/pull/1072)
## Dependencies
* Introduce cozykpg, update to v1.1.0. (@kvaps in https://github.com/cozystack/cozystack/pull/1057 and https://github.com/cozystack/cozystack/pull/1063)
* Update flux-operator to 0.22.0, Flux to 2.6.x. (@kingdonb in https://github.com/cozystack/cozystack/pull/1035)
* Update Talos Linux to v1.10.3. (@kvaps in https://github.com/cozystack/cozystack/pull/1006)
* Update Cilium to v1.17.4. (@kvaps in https://github.com/cozystack/cozystack/pull/1046)
* Update MetalLB to v0.15.2. (@kvaps in https://github.com/cozystack/cozystack/pull/1045)
* Update Kube-OVN to v1.13.13. (@kvaps in https://github.com/cozystack/cozystack/pull/1047)
## Documentation
* [Oracle Cloud Infrastructure installation guide](https://cozystack.io/docs/operations/talos/installation/oracle-cloud/). (@kvaps, @lllamnyp, and @NickVolynkin in https://github.com/cozystack/website/pull/168)
* [Cluster configuration with `talosctl`](https://cozystack.io/docs/operations/talos/configuration/talosctl/). (@NickVolynkin in https://github.com/cozystack/website/pull/211)
* [Configuring container registry mirrors for tenant Kubernetes clusters](https://cozystack.io/docs/operations/talos/configuration/air-gapped/#5-configure-container-registry-mirrors-for-tenant-kubernetes). (@klinch0 in https://github.com/cozystack/website/pull/210)
* [Explain application management strategies and available versions for managed applications.](https://cozystack.io/docs/guides/applications/). (@NickVolynkin in https://github.com/cozystack/website/pull/219)
* [How to clean up etcd state](https://cozystack.io/docs/operations/faq/#how-to-clean-up-etcd-state). (@gwynbleidd2106 in https://github.com/cozystack/website/pull/214)
* [State that Cozystack is a CNCF Sandbox project](https://github.com/cozystack/cozystack?tab=readme-ov-file#cozystack). (@NickVolynkin in https://github.com/cozystack/cozystack/pull/1055)
## Development, Testing, and CI/CD
* [tests] Add tests for applications `virtual-machine`, `vm-disk`, `vm-instance`, `postgresql`, `mysql`, and `clickhouse`. (@gwynbleidd2106 in https://github.com/cozystack/cozystack/pull/1048, patched by @kvaps in https://github.com/cozystack/cozystack/pull/1074)
* [tests] Fix concurrency for the `docker login` action. (@kvaps in https://github.com/cozystack/cozystack/pull/1014)
* [tests] Increase QEMU system disk size in tests. (@kvaps in https://github.com/cozystack/cozystack/pull/1011)
* [tests] Increase the waiting timeout for VMs in tests. (@kvaps in https://github.com/cozystack/cozystack/pull/1038)
* [ci] Separate build and testing jobs in CI. (@kvaps in https://github.com/cozystack/cozystack/pull/1005 and https://github.com/cozystack/cozystack/pull/1010)
* [ci] Fix the release assets. (@kvaps in https://github.com/cozystack/cozystack/pull/1006 and https://github.com/cozystack/cozystack/pull/1009)
## New Contributors
* @kevin880202 made their first contribution in https://github.com/cozystack/cozystack/pull/948
* @mattia-eleuteri made their first contribution in https://github.com/cozystack/cozystack/pull/1027
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.31.0...v0.32.0
<!--
HEAD https://github.com/cozystack/cozystack/commit/3ce6dbe8
-->

View File

@@ -1,38 +0,0 @@
## Major Features and Improvements
* [postgres] Introduce new functionality for backup and restore in PostgreSQL. (@klinch0 in https://github.com/cozystack/cozystack/pull/1086)
* [apps] Refactor resources in managed applications. (@kvaps in https://github.com/cozystack/cozystack/pull/1106)
* [system] Make VMAgent's `extraArgs` tunable. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1091)
## Fixes
* [postgres] Escape users and database names. (@kvaps in https://github.com/cozystack/cozystack/pull/1087)
* [tenant] Fix monitoring agents HelmReleases for tenant clusters. (@klinch0 in https://github.com/cozystack/cozystack/pull/1079)
* [kubernetes] Wrap cert-manager CRDs in a conditional. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1076)
* [kubernetes] Remove `useCustomSecretForPatchContainerd` option and enable it by default. (@kvaps in https://github.com/cozystack/cozystack/pull/1104)
* [apps] Increase default resource presets for Clickhouse and Kafka from `nano` to `small`. Update OpenAPI specs and readme's. (@kvaps in https://github.com/cozystack/cozystack/pull/1103 and https://github.com/cozystack/cozystack/pull/1105)
* [linstor] Add configurable DRBD network options for connection and timeout settings, replacing scripted logic for detecting devices that lost connection. (@kvaps in https://github.com/cozystack/cozystack/pull/1094)
## Dependencies
* Update cozy-proxy to v0.2.0 (@kvaps in https://github.com/cozystack/cozystack/pull/1081)
* Update Kafka Operator to 0.45.1-rc1 (@kvaps in https://github.com/cozystack/cozystack/pull/1082 and https://github.com/cozystack/cozystack/pull/1102)
* Update Flux Operator to 0.23.0 (@kingdonb in https://github.com/cozystack/cozystack/pull/1078)
## Documentation
* [docs] Release notes for v0.32.0 and two beta-versions. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/1043)
## Development, Testing, and CI/CD
* [tests] Add Kafka, Redis. (@gwynbleidd2106 in https://github.com/cozystack/cozystack/pull/1077)
* [tests] Increase disk space for VMs in tests. (@kvaps in https://github.com/cozystack/cozystack/pull/1097)
* [tests] Upd Kubernetes v1.33. (@kvaps in https://github.com/cozystack/cozystack/pull/1083)
* [tests] increase postgres timeouts. (@kvaps in https://github.com/cozystack/cozystack/pull/1108)
* [tests] don't wait for postgres ro service. (@kvaps in https://github.com/cozystack/cozystack/pull/1109)
* [ci] Setup systemd timer to tear down sandbox. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1092)
* [ci] Split testing job into several. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1075)
* [ci] Run E2E tests as separate parallel jobs. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1093)
* [ci] Refactor GitHub workflows. (@kvaps in https://github.com/cozystack/cozystack/pull/1107)
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.32.0...v0.32.1

View File

@@ -1,91 +0,0 @@
> [!WARNING]
> A patch release [0.33.2](github.com/cozystack/cozystack/releases/tag/v0.33.2) fixing a regression in 0.33.0 has been released.
> It is recommended to skip this version and upgrade to [0.33.2](github.com/cozystack/cozystack/releases/tag/v0.33.2) instead.
## Feature Highlights
### Unified CPU and Memory Allocation Management
Since version 0.31.0, Cozystack introduced a single-point-of-truth configuration variable `cpu-allocation-ratio`,
making CPU resource requests and limits uniform in Virtual Machines managed by KubeVirt.
The new release 0.33.0 introduces `memory-allocation-ratio` and expands both variables to all managed applications and tenant resource quotas.
Resource presets also respect the allocation ratios and behave in the same way as explicit resource definitions.
The new resource definition format is concise and simple for platform users.
```yaml
# resource definition in the configuration
resources:
cpu: <defined cpu value>
memory: <defined memory value>
```
It results in Kubernetes resource requests and limits, based on defined values and the universal allocation ratios:
```yaml
# actual requests and limits, provided to the application
resources:
limits:
cpu: <defined cpu value>
memory: <defined memory value>
requests:
cpu: <defined cpu value / cpu-allocation-ratio>
memory: <defined memory value / memory-allocation-ratio>
```
When updating from earlier Cozystack versions, resource configuration in managed applications will be automatically migrated to the new format.
### Backing up and Restoring Data in Tenant Kubernetes
One of the main features of the release is backup capability for PVCs in tenant Kubernetes clusters.
It enables platform and tenant administrators to back up and restore data used by services in the tenant clusters.
This new functionality in Cozystack is powered by [Velero](https://velero.io/) and needs an external S3-compatible storage.
## Support for NFS Storage
Cozystack now supports using NFS shared storage with a new optional system module.
See the documentation: https://cozystack.io/docs/operations/storage/nfs/.
## Features and Improvements
* [kubernetes] Enable PVC backups in tenant Kubernetes clusters, powered by [Velero](https://velero.io/). (@klinch0 in https://github.com/cozystack/cozystack/pull/1132)
* [nfs-driver] Enable NFS support by introducing a new optional system module `nfs-driver`. (@kvaps in https://github.com/cozystack/cozystack/pull/1133)
* [virtual-machine] Configure CPU sockets available to VMs with the `resources.cpu.sockets` configuration value. (@klinch0 in https://github.com/cozystack/cozystack/pull/1131)
* [virtual-machine] Add support for using pre-imported "golden image" disks for virtual machines, enabling faster provisioning by referencing existing images instead of downloading via HTTP. (@gwynbleidd2106 in https://github.com/cozystack/cozystack/pull/1112)
* [kubernetes] Add an option to expose the Ingress-NGINX controller in tenant Kubernetes cluster via LoadBalancer. New configuration value `exposeMethod` offers a choice of `Proxied` and `LoadBalancer`. (@kvaps in https://github.com/cozystack/cozystack/pull/1114)
* [apps] When updating from earlier Cozystack versions, automatically migrate to the new resource definition format: from `resources.requests.[cpu,memory]` and `resources.limits.[cpu,memory]` to `resources.[cpu,memory]`. (@kvaps in https://github.com/cozystack/cozystack/pull/1127)
* [apps] Give examples of new resource definitions in the managed app README's. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/1120)
* [tenant] Respect `cpu-allocation-ratio` in tenant's `resourceQuotas`.(@kvaps in https://github.com/cozystack/cozystack/pull/1119)
* [cozy-lib] Introduce helper function to calculate Java heap params based on memory requests and limits. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1157)
## Security
* [monitoring] Disable sign up in Alerta. (@klinch0 in https://github.com/cozystack/cozystack/pull/1129)
## Fixes
* [platform] Always set resources for managed apps . (@lllamnyp in https://github.com/cozystack/cozystack/pull/1156)
* [platform] Remove the memory limit for Keycloak deployment. (@klinch0 in https://github.com/cozystack/cozystack/pull/1122)
* [kubernetes] Fix a condition in the ingress template for tenant Kubernetes. (@kvaps in https://github.com/cozystack/cozystack/pull/1143)
* [kubernetes] Fix a deadlock on reattaching a KubeVirt-CSI volume. (@kvaps in https://github.com/cozystack/cozystack/pull/1135)
* [mysql] MySQL applications with a single replica now correctly create a `LoadBalancer` service. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1113)
* [etcd] Fix resources and headless services in the etcd application. (@kvaps in https://github.com/cozystack/cozystack/pull/1128)
* [apps] Enable selecting `resourcePreset` from a drop-down list for all applications by adding enum of allowed values in the config scheme. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/1117)
* [apps] Refactor resource presets provided to managed apps by `cozy-lib`. (@kvaps in https://github.com/cozystack/cozystack/pull/1155)
* [keycloak] Calculate and pass Java heap parameters explicitly to prevent OOM errors. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1157)
## Development, Testing, and CI/CD
* [dx] Introduce cozyreport tool and gather reports in CI. (@kvaps in https://github.com/cozystack/cozystack/pull/1139)
* [ci] Use Nexus as a pull-through cache for CI. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1124)
* [ci] Save a list of observed images after each workflow run. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1089)
* [ci] Skip Cozystack tests on PRs that only change the docs. Don't restart CI when a PR is labeled. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/1136)
* [dx] Fix Makefile variables for `capi-providers`. (@kvaps in https://github.com/cozystack/cozystack/pull/1115)
* [tests] Introduce self-destructing testing environments. (@kvaps in https://github.com/cozystack/cozystack/pull/1138, https://github.com/cozystack/cozystack/pull/1140, https://github.com/cozystack/cozystack/pull/1141, https://github.com/cozystack/cozystack/pull/1142)
* [e2e] Retry flaky application tests to improve total test time. (@kvaps in https://github.com/cozystack/cozystack/pull/1123)
* [maintenance] Add a PR template. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/1121)
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.32.1...v0.33.0

View File

@@ -1,3 +0,0 @@
## Fixes
* [kubevirt-csi] Fix a regression by updating the role of the CSI controller. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1165)

View File

@@ -1,19 +0,0 @@
## Features and Improvements
* [vm-instance] Enable running [Windows](https://cozystack.io/docs/operations/virtualization/windows/) and [MikroTik RouterOS](https://cozystack.io/docs/operations/virtualization/mikrotik/) in Cozystack. Add `bus` option and always specify `bootOrder` for all disks. (@kvaps in https://github.com/cozystack/cozystack/pull/1168)
* [cozystack-api] Refactor OpenAPI Schema and support reading it from config. (@kvaps in https://github.com/cozystack/cozystack/pull/1173)
* [cozystack-api] Enable using singular resource names in Cozystack API. For example, `kubectl get tenant` is now a valid command, in addition to `kubectl get tenants`. (@kvaps in https://github.com/cozystack/cozystack/pull/1169)
* [postgres] Explain how to back up and restore PostgreSQL using Velero backups. (@klinch0 and @NickVolynkin in https://github.com/cozystack/cozystack/pull/1141)
## Fixes
* [virtual-machine,vm-instance] Adjusted RBAC role to let users read the service associated with the VMs they create. Consequently, users can now see details of the service in the dashboard and therefore read the IP address of the VM. (@klinch0 in https://github.com/cozystack/cozystack/pull/1161)
* [cozystack-api] Fix an error with `resourceVersion` which resulted in message 'failed to update HelmRelease: helmreleases.helm.toolkit.fluxcd.io "xxx" is invalid...'. (@kvaps in https://github.com/cozystack/cozystack/pull/1170)
* [cozystack-api] Fix an error in updating lists in Cozystack objects, which resulted in message "Warning: resource ... is missing the kubectl.kubernetes.io/last-applied-configuration annotation". (@kvaps in https://github.com/cozystack/cozystack/pull/1171)
* [cozystack-api] Disable `startegic-json-patch` support. (@kvaps in https://github.com/cozystack/cozystack/pull/1179)
* [dashboard] Fix the code for removing dashboard comments which used to mistakenly remove shebang from cloudInit scripts. (@kvaps in https://github.com/cozystack/cozystack/pull/1175).
* [virtual-machine] Fix cloudInit and sshKeys processing. (@kvaps in https://github.com/cozystack/cozystack/pull/1175 and https://github.com/cozystack/cozystack/commit/da3ee5d0ea9e87529c8adc4fcccffabe8782292e)
* [applications] Fix a typo in preset resource tables in the built-in documentation of managed applications. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/1172)
* [kubernetes] Enable deleting Velero component from a tenant Kubernetes cluster. (@klinch0 in https://github.com/cozystack/cozystack/pull/1176)
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.33.1...v0.33.2

13
go.mod
View File

@@ -37,7 +37,6 @@ require (
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fluxcd/pkg/apis/kustomize v1.6.1 // indirect
@@ -92,14 +91,14 @@ require (
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/crypto v0.31.0 // indirect
golang.org/x/crypto v0.28.0 // indirect
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
golang.org/x/net v0.33.0 // indirect
golang.org/x/net v0.30.0 // indirect
golang.org/x/oauth2 v0.23.0 // indirect
golang.org/x/sync v0.10.0 // indirect
golang.org/x/sys v0.28.0 // indirect
golang.org/x/term v0.27.0 // indirect
golang.org/x/text v0.21.0 // indirect
golang.org/x/sync v0.8.0 // indirect
golang.org/x/sys v0.26.0 // indirect
golang.org/x/term v0.25.0 // indirect
golang.org/x/text v0.19.0 // indirect
golang.org/x/time v0.7.0 // indirect
golang.org/x/tools v0.26.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect

28
go.sum
View File

@@ -26,8 +26,8 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k=
github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
@@ -212,8 +212,8 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
@@ -222,26 +222,26 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=

View File

@@ -1,32 +0,0 @@
#!/bin/bash
set -e
name="$1"
url="$2"
if [ -z "$name" ] || [ -z "$url" ]; then
echo "Usage: <name> <url>"
echo "Example: 'ubuntu' 'https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img'"
exit 1
fi
#### create DV ubuntu source for CDI image cloning
kubectl create -f - <<EOF
apiVersion: cdi.kubevirt.io/v1beta1
kind: DataVolume
metadata:
name: "vm-image-$name"
namespace: cozy-public
annotations:
cdi.kubevirt.io/storage.bind.immediate.requested: "true"
spec:
source:
http:
url: "$url"
storage:
resources:
requests:
storage: 5Gi
storageClassName: replicated
EOF

View File

@@ -1,8 +0,0 @@
#!/bin/sh
for node in 11 12 13; do
talosctl -n 192.168.123.${node} -e 192.168.123.${node} images ls >> images.tmp
talosctl -n 192.168.123.${node} -e 192.168.123.${node} images --namespace system ls >> images.tmp
done
while read _ name sha _ ; do echo $sha $name ; done < images.tmp | sort -u > images.txt

View File

@@ -1,147 +0,0 @@
#!/bin/sh
REPORT_DATE=$(date +%Y-%m-%d_%H-%M-%S)
REPORT_NAME=${1:-cozyreport-$REPORT_DATE}
REPORT_PDIR=$(mktemp -d)
REPORT_DIR=$REPORT_PDIR/$REPORT_NAME
# -- check dependencies
command -V kubectl >/dev/null || exit $?
command -V tar >/dev/null || exit $?
# -- cozystack module
echo "Collecting Cozystack information..."
mkdir -p $REPORT_DIR/cozystack
kubectl get deploy -n cozy-system cozystack -o jsonpath='{.spec.template.spec.containers[0].image}' > $REPORT_DIR/cozystack/image.txt 2>&1
kubectl get cm -n cozy-system --no-headers | awk '$1 ~ /^cozystack/' |
while read NAME _; do
DIR=$REPORT_DIR/cozystack/configs
mkdir -p $DIR
kubectl get cm -n cozy-system $NAME -o yaml > $DIR/$NAME.yaml 2>&1
done
# -- kubernetes module
echo "Collecting Kubernetes information..."
mkdir -p $REPORT_DIR/kubernetes
kubectl version > $REPORT_DIR/kubernetes/version.txt 2>&1
echo "Collecting nodes..."
kubectl get nodes -o wide > $REPORT_DIR/kubernetes/nodes.txt 2>&1
kubectl get nodes --no-headers | awk '$2 != "Ready"' |
while read NAME _; do
DIR=$REPORT_DIR/kubernetes/nodes/$NAME
mkdir -p $DIR
kubectl get node $NAME -o yaml > $DIR/node.yaml 2>&1
kubectl describe node $NAME > $DIR/describe.txt 2>&1
done
echo "Collecting namespaces..."
kubectl get ns -o wide > $REPORT_DIR/kubernetes/namespaces.txt 2>&1
kubectl get ns --no-headers | awk '$2 != "Active"' |
while read NAME _; do
DIR=$REPORT_DIR/kubernetes/namespaces/$NAME
mkdir -p $DIR
kubectl get ns $NAME -o yaml > $DIR/namespace.yaml 2>&1
kubectl describe ns $NAME > $DIR/describe.txt 2>&1
done
echo "Collecting helmreleases..."
kubectl get hr -A > $REPORT_DIR/kubernetes/helmreleases.txt 2>&1
kubectl get hr -A | awk '$4 != "True"' | \
while read NAMESPACE NAME _; do
DIR=$REPORT_DIR/kubernetes/helmreleases/$NAMESPACE/$NAME
mkdir -p $DIR
kubectl get hr -n $NAMESPACE $NAME -o yaml > $DIR/hr.yaml 2>&1
kubectl describe hr -n $NAMESPACE $NAME > $DIR/describe.txt 2>&1
done
echo "Collecting pods..."
kubectl get pod -A -o wide > $REPORT_DIR/kubernetes/pods.txt 2>&1
kubectl get pod -A --no-headers | awk '$4 !~ /Running|Succeeded|Completed/' |
while read NAMESPACE NAME _ STATE _; do
DIR=$REPORT_DIR/kubernetes/pods/$NAMESPACE/$NAME
mkdir -p $DIR
CONTAINERS=$(kubectl get pod -o jsonpath='{.spec.containers[*].name}' -n $NAMESPACE $NAME)
kubectl get pod -n $NAMESPACE $NAME -o yaml > $DIR/pod.yaml 2>&1
kubectl describe pod -n $NAMESPACE $NAME > $DIR/describe.txt 2>&1
if [ "$STATE" != "Pending" ]; then
for CONTAINER in $CONTAINERS; do
kubectl logs -n $NAMESPACE $NAME $CONTAINER > $DIR/logs-$CONTAINER.txt 2>&1
kubectl logs -n $NAMESPACE $NAME $CONTAINER --previous > $DIR/logs-$CONTAINER-previous.txt 2>&1
done
fi
done
echo "Collecting virtualmachines..."
kubectl get vm -A > $REPORT_DIR/kubernetes/vms.txt 2>&1
kubectl get vm -A --no-headers | awk '$5 != "True"' |
while read NAMESPACE NAME _; do
DIR=$REPORT_DIR/kubernetes/vm/$NAMESPACE/$NAME
mkdir -p $DIR
kubectl get vm -n $NAMESPACE $NAME -o yaml > $DIR/vm.yaml 2>&1
kubectl describe vm -n $NAMESPACE $NAME > $DIR/describe.txt 2>&1
done
echo "Collecting virtualmachine instances..."
kubectl get vmi -A > $REPORT_DIR/kubernetes/vmis.txt 2>&1
kubectl get vmi -A --no-headers | awk '$4 != "Running"' |
while read NAMESPACE NAME _; do
DIR=$REPORT_DIR/kubernetes/vmi/$NAMESPACE/$NAME
mkdir -p $DIR
kubectl get vmi -n $NAMESPACE $NAME -o yaml > $DIR/vmi.yaml 2>&1
kubectl describe vmi -n $NAMESPACE $NAME > $DIR/describe.txt 2>&1
done
echo "Collecting services..."
kubectl get svc -A > $REPORT_DIR/kubernetes/services.txt 2>&1
kubectl get svc -A --no-headers | awk '$4 == "<pending>"' |
while read NAMESPACE NAME _; do
DIR=$REPORT_DIR/kubernetes/services/$NAMESPACE/$NAME
mkdir -p $DIR
kubectl get svc -n $NAMESPACE $NAME -o yaml > $DIR/service.yaml 2>&1
kubectl describe svc -n $NAMESPACE $NAME > $DIR/describe.txt 2>&1
done
echo "Collecting pvcs..."
kubectl get pvc -A > $REPORT_DIR/kubernetes/pvcs.txt 2>&1
kubectl get pvc -A | awk '$3 != "Bound"' |
while read NAMESPACE NAME _; do
DIR=$REPORT_DIR/kubernetes/pvc/$NAMESPACE/$NAME
mkdir -p $DIR
kubectl get pvc -n $NAMESPACE $NAME -o yaml > $DIR/pvc.yaml 2>&1
kubectl describe pvc -n $NAMESPACE $NAME > $DIR/describe.txt 2>&1
done
# -- kamaji module
if kubectl get deploy -n cozy-linstor linstor-controller >/dev/null 2>&1; then
echo "Collecting kamaji resources..."
DIR=$REPORT_DIR/kamaji
mkdir -p $DIR
kubectl logs -n cozy-kamaji deployment/kamaji > $DIR/kamaji-controller.log 2>&1
kubectl get kamajicontrolplanes.controlplane.cluster.x-k8s.io -A > $DIR/kamajicontrolplanes.txt 2>&1
kubectl get kamajicontrolplanes.controlplane.cluster.x-k8s.io -A -o yaml > $DIR/kamajicontrolplanes.yaml 2>&1
kubectl get tenantcontrolplanes.kamaji.clastix.io -A > $DIR/tenantcontrolplanes.txt 2>&1
kubectl get tenantcontrolplanes.kamaji.clastix.io -A -o yaml > $DIR/tenantcontrolplanes.yaml 2>&1
fi
# -- linstor module
if kubectl get deploy -n cozy-linstor linstor-controller >/dev/null 2>&1; then
echo "Collecting linstor resources..."
DIR=$REPORT_DIR/linstor
mkdir -p $DIR
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor --no-color n l > $DIR/nodes.txt 2>&1
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor --no-color sp l > $DIR/storage-pools.txt 2>&1
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor --no-color r l > $DIR/resources.txt 2>&1
fi
# -- finalization
echo "Creating archive..."
tar -czf $REPORT_NAME.tgz -C $REPORT_PDIR .
echo "Report created: $REPORT_NAME.tgz"
echo "Cleaning up..."
rm -rf $REPORT_PDIR

94
hack/e2e-apps.bats Executable file
View File

@@ -0,0 +1,94 @@
#!/usr/bin/env bats
# -----------------------------------------------------------------------------
# Cozystack endtoend provisioning test (Bats)
# -----------------------------------------------------------------------------
@test "Create tenant with isolated mode enabled" {
kubectl create -f - <<EOF
apiVersion: apps.cozystack.io/v1alpha1
kind: Tenant
metadata:
name: test
namespace: tenant-root
spec:
etcd: false
host: ""
ingress: false
isolated: true
monitoring: false
resourceQuotas: {}
seaweedfs: false
EOF
kubectl wait hr/tenant-test -n tenant-root --timeout=1m --for=condition=ready
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
}
@test "Create a tenant Kubernetes control plane" {
kubectl create -f - <<EOF
apiVersion: apps.cozystack.io/v1alpha1
kind: Kubernetes
metadata:
name: test
namespace: tenant-test
spec:
addons:
certManager:
enabled: false
valuesOverride: {}
cilium:
valuesOverride: {}
fluxcd:
enabled: false
valuesOverride: {}
gatewayAPI:
enabled: false
gpuOperator:
enabled: false
valuesOverride: {}
ingressNginx:
enabled: true
hosts: []
valuesOverride: {}
monitoringAgents:
enabled: false
valuesOverride: {}
verticalPodAutoscaler:
valuesOverride: {}
controlPlane:
apiServer:
resources: {}
resourcesPreset: small
controllerManager:
resources: {}
resourcesPreset: micro
konnectivity:
server:
resources: {}
resourcesPreset: micro
replicas: 2
scheduler:
resources: {}
resourcesPreset: micro
host: ""
nodeGroups:
md0:
ephemeralStorage: 20Gi
gpus: []
instanceType: u1.medium
maxReplicas: 10
minReplicas: 0
resources:
cpu: ""
memory: ""
roles:
- ingress-nginx
storageClass: replicated
EOF
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
timeout 10 sh -ec 'until kubectl get kamajicontrolplane -n tenant-test kubernetes-test; do sleep 1; done'
kubectl wait --for=condition=TenantControlPlaneCreated kamajicontrolplane -n tenant-test kubernetes-test --timeout=4m
kubectl wait tcp -n tenant-test kubernetes-test --timeout=2m --for=jsonpath='{.status.kubernetesResources.version.status}'=Ready
kubectl wait deploy --timeout=4m --for=condition=available -n tenant-test kubernetes-test kubernetes-test-cluster-autoscaler kubernetes-test-kccm kubernetes-test-kcsi-controller
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=1m --for=jsonpath='{.status.replicas}'=2
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=5m --for=jsonpath='{.status.v1beta2.readyReplicas}'=2
}

View File

@@ -1,41 +0,0 @@
#!/usr/bin/env bats
@test "Create DB ClickHouse" {
name='test'
kubectl apply -f- <<EOF
apiVersion: apps.cozystack.io/v1alpha1
kind: ClickHouse
metadata:
name: $name
namespace: tenant-test
spec:
size: 10Gi
logStorageSize: 2Gi
shards: 1
replicas: 2
storageClass: ""
logTTL: 15
users:
testuser:
password: xai7Wepo
backup:
enabled: false
s3Region: us-east-1
s3Bucket: s3.example.org/clickhouse-backups
schedule: "0 2 * * *"
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
resources: {}
resourcesPreset: "nano"
EOF
sleep 5
kubectl -n tenant-test wait hr clickhouse-$name --timeout=20s --for=condition=ready
timeout 180 sh -ec "until kubectl -n tenant-test get svc chendpoint-clickhouse-$name -o jsonpath='{.spec.ports[*].port}' | grep -q '8123 9000'; do sleep 10; done"
kubectl -n tenant-test wait statefulset.apps/chi-clickhouse-$name-clickhouse-0-0 --timeout=120s --for=jsonpath='{.status.replicas}'=1
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints chi-clickhouse-$name-clickhouse-0-0 -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
timeout 100 sh -ec "until kubectl -n tenant-test get svc chi-clickhouse-$name-clickhouse-0-0 -o jsonpath='{.spec.ports[*].port}' | grep -q '9000 8123 9009'; do sleep 10; done"
timeout 80 sh -ec "until kubectl -n tenant-test get sts chi-clickhouse-$name-clickhouse-0-1 ; do sleep 10; done"
kubectl -n tenant-test wait statefulset.apps/chi-clickhouse-$name-clickhouse-0-1 --timeout=140s --for=jsonpath='{.status.replicas}'=1
}

View File

@@ -1,51 +0,0 @@
#!/usr/bin/env bats
@test "Create Kafka" {
name='test'
kubectl apply -f- <<EOF
apiVersion: apps.cozystack.io/v1alpha1
kind: Kafka
metadata:
name: $name
namespace: tenant-test
spec:
external: false
kafka:
size: 10Gi
replicas: 2
storageClass: ""
resources: {}
resourcesPreset: "nano"
zookeeper:
size: 5Gi
replicas: 2
storageClass: ""
resources:
resourcesPreset: "nano"
topics:
- name: testResults
partitions: 1
replicas: 2
config:
min.insync.replicas: 2
- name: testOrders
config:
cleanup.policy: compact
segment.ms: 3600000
max.compaction.lag.ms: 5400000
min.insync.replicas: 2
partitions: 1
replicas: 2
EOF
sleep 5
kubectl -n tenant-test wait hr kafka-$name --timeout=30s --for=condition=ready
kubectl wait kafkas -n tenant-test test --timeout=60s --for=condition=ready
timeout 60 sh -ec "until kubectl -n tenant-test get pvc data-kafka-$name-zookeeper-0; do sleep 10; done"
kubectl -n tenant-test wait pvc data-kafka-$name-zookeeper-0 --timeout=50s --for=jsonpath='{.status.phase}'=Bound
timeout 40 sh -ec "until kubectl -n tenant-test get svc kafka-$name-zookeeper-client -o jsonpath='{.spec.ports[0].port}' | grep -q '2181'; do sleep 10; done"
timeout 40 sh -ec "until kubectl -n tenant-test get svc kafka-$name-zookeeper-nodes -o jsonpath='{.spec.ports[*].port}' | grep -q '2181 2888 3888'; do sleep 10; done"
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints kafka-$name-zookeeper-nodes -o jsonpath='{.subsets[*].addresses[0].ip}' | grep -q '[0-9]'; do sleep 10; done"
kubectl -n tenant-test delete kafka.apps.cozystack.io $name
kubectl -n tenant-test delete pvc data-kafka-$name-zookeeper-0
kubectl -n tenant-test delete pvc data-kafka-$name-zookeeper-1
}

View File

@@ -1,113 +0,0 @@
#!/usr/bin/env bats
run_kubernetes_test() {
local version_expr="$1"
local test_name="$2"
local port="$3"
local k8s_version=$(yq "$version_expr" packages/apps/kubernetes/files/versions.yaml)
kubectl apply -f - <<EOF
apiVersion: apps.cozystack.io/v1alpha1
kind: Kubernetes
metadata:
name: "${test_name}"
namespace: tenant-test
spec:
addons:
certManager:
enabled: false
valuesOverride: {}
cilium:
valuesOverride: {}
fluxcd:
enabled: false
valuesOverride: {}
gatewayAPI:
enabled: false
gpuOperator:
enabled: false
valuesOverride: {}
ingressNginx:
enabled: true
hosts: []
valuesOverride: {}
monitoringAgents:
enabled: false
valuesOverride: {}
verticalPodAutoscaler:
valuesOverride: {}
controlPlane:
apiServer:
resources: {}
resourcesPreset: small
controllerManager:
resources: {}
resourcesPreset: micro
konnectivity:
server:
resources: {}
resourcesPreset: micro
replicas: 2
scheduler:
resources: {}
resourcesPreset: micro
host: ""
nodeGroups:
md0:
ephemeralStorage: 20Gi
gpus: []
instanceType: u1.medium
maxReplicas: 10
minReplicas: 0
resources:
cpu: ""
memory: ""
roles:
- ingress-nginx
storageClass: replicated
version: "${k8s_version}"
EOF
# Wait for the tenant-test namespace to be active
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
# Wait for the Kamaji control plane to be created (retry for up to 10 seconds)
timeout 10 sh -ec 'until kubectl get kamajicontrolplane -n tenant-test kubernetes-'"${test_name}"'; do sleep 1; done'
# Wait for the tenant control plane to be fully created (timeout after 4 minutes)
kubectl wait --for=condition=TenantControlPlaneCreated kamajicontrolplane -n tenant-test kubernetes-${test_name} --timeout=4m
# Wait for Kubernetes resources to be ready (timeout after 2 minutes)
kubectl wait tcp -n tenant-test kubernetes-${test_name} --timeout=2m --for=jsonpath='{.status.kubernetesResources.version.status}'=Ready
# Wait for all required deployments to be available (timeout after 4 minutes)
kubectl wait deploy --timeout=4m --for=condition=available -n tenant-test kubernetes-${test_name} kubernetes-${test_name}-cluster-autoscaler kubernetes-${test_name}-kccm kubernetes-${test_name}-kcsi-controller
# Wait for the machine deployment to scale to 2 replicas (timeout after 1 minute)
kubectl wait machinedeployment kubernetes-${test_name}-md0 -n tenant-test --timeout=1m --for=jsonpath='{.status.replicas}'=2
# Get the admin kubeconfig and save it to a file
kubectl get secret kubernetes-${test_name}-admin-kubeconfig -ojsonpath='{.data.super-admin\.conf}' -n tenant-test | base64 -d > tenantkubeconfig
# Update the kubeconfig to use localhost for the API server
yq -i ".clusters[0].cluster.server = \"https://localhost:${port}\"" tenantkubeconfig
# Set up port forwarding to the Kubernetes API server for a 40 second timeout
bash -c 'timeout 40s kubectl port-forward service/kubernetes-'"${test_name}"' -n tenant-test '"${port}"':6443 > /dev/null 2>&1 &'
# Verify the Kubernetes version matches what we expect (retry for up to 20 seconds)
timeout 20 sh -ec 'until kubectl --kubeconfig tenantkubeconfig version 2>/dev/null | grep -Fq "Server Version: ${k8s_version}"; do sleep 5; done'
# Wait for all machine deployment replicas to be ready (timeout after 10 minutes)
kubectl wait machinedeployment kubernetes-${test_name}-md0 -n tenant-test --timeout=10m --for=jsonpath='{.status.v1beta2.readyReplicas}'=2
# Clean up by deleting the Kubernetes resource
kubectl -n tenant-test delete kuberneteses.apps.cozystack.io $test_name
}
@test "Create a tenant Kubernetes control plane with latest version" {
run_kubernetes_test 'keys | sort_by(.) | .[-1]' 'test-latest-version' '59991'
}
@test "Create a tenant Kubernetes control plane with previous version" {
run_kubernetes_test 'keys | sort_by(.) | .[-2]' 'test-previous-version' '59992'
}

View File

@@ -1,46 +0,0 @@
#!/usr/bin/env bats
@test "Create DB MySQL" {
name='test'
kubectl apply -f- <<EOF
apiVersion: apps.cozystack.io/v1alpha1
kind: MySQL
metadata:
name: $name
namespace: tenant-test
spec:
external: false
size: 10Gi
replicas: 2
storageClass: ""
users:
testuser:
maxUserConnections: 1000
password: xai7Wepo
databases:
testdb:
roles:
admin:
- testuser
backup:
enabled: false
s3Region: us-east-1
s3Bucket: s3.example.org/postgres-backups
schedule: "0 2 * * *"
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
resources: {}
resourcesPreset: "nano"
EOF
sleep 5
kubectl -n tenant-test wait hr mysql-$name --timeout=30s --for=condition=ready
timeout 80 sh -ec "until kubectl -n tenant-test get svc mysql-$name -o jsonpath='{.spec.ports[0].port}' | grep -q '3306'; do sleep 10; done"
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints mysql-$name -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
kubectl -n tenant-test wait statefulset.apps/mysql-$name --timeout=110s --for=jsonpath='{.status.replicas}'=2
timeout 80 sh -ec "until kubectl -n tenant-test get svc mysql-$name-metrics -o jsonpath='{.spec.ports[0].port}' | grep -q '9104'; do sleep 10; done"
timeout 40 sh -ec "until kubectl -n tenant-test get endpoints mysql-$name-metrics -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
kubectl -n tenant-test wait deployment.apps/mysql-$name-metrics --timeout=90s --for=jsonpath='{.status.replicas}'=1
kubectl -n tenant-test delete mysqls.apps.cozystack.io $name
}

View File

@@ -1,54 +0,0 @@
#!/usr/bin/env bats
@test "Create DB PostgreSQL" {
name='test'
kubectl apply -f - <<EOF
apiVersion: apps.cozystack.io/v1alpha1
kind: Postgres
metadata:
name: $name
namespace: tenant-test
spec:
external: false
size: 10Gi
replicas: 2
storageClass: ""
postgresql:
parameters:
max_connections: 100
quorum:
minSyncReplicas: 0
maxSyncReplicas: 0
users:
testuser:
password: xai7Wepo
databases:
testdb:
roles:
admin:
- testuser
backup:
enabled: false
s3Region: us-east-1
s3Bucket: s3.example.org/postgres-backups
schedule: "0 2 * * *"
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
resources: {}
resourcesPreset: "nano"
EOF
sleep 5
kubectl -n tenant-test wait hr postgres-$name --timeout=100s --for=condition=ready
kubectl -n tenant-test wait job.batch postgres-$name-init-job --timeout=50s --for=condition=Complete
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-r -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-ro -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-rw -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-r -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
# for some reason it takes longer for the read-only endpoint to be ready
#timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-ro -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-rw -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
kubectl -n tenant-test delete postgreses.apps.cozystack.io $name
kubectl -n tenant-test delete job.batch/postgres-$name-init-job
}

View File

@@ -1,26 +0,0 @@
#!/usr/bin/env bats
@test "Create Redis" {
name='test'
kubectl apply -f- <<EOF
apiVersion: apps.cozystack.io/v1alpha1
kind: Redis
metadata:
name: $name
namespace: tenant-test
spec:
external: false
size: 1Gi
replicas: 2
storageClass: ""
authEnabled: true
resources: {}
resourcesPreset: "nano"
EOF
sleep 5
kubectl -n tenant-test wait hr redis-$name --timeout=20s --for=condition=ready
kubectl -n tenant-test wait pvc redisfailover-persistent-data-rfr-redis-$name-0 --timeout=50s --for=jsonpath='{.status.phase}'=Bound
kubectl -n tenant-test wait deploy rfs-redis-$name --timeout=90s --for=condition=available
kubectl -n tenant-test wait sts rfr-redis-$name --timeout=90s --for=jsonpath='{.status.replicas}'=2
kubectl -n tenant-test delete redis.apps.cozystack.io $name
}

View File

@@ -1,47 +0,0 @@
#!/usr/bin/env bats
@test "Create a Virtual Machine" {
name='test'
kubectl apply -f - <<EOF
apiVersion: apps.cozystack.io/v1alpha1
kind: VirtualMachine
metadata:
name: $name
namespace: tenant-test
spec:
external: false
externalMethod: PortList
externalPorts:
- 22
instanceType: "u1.medium"
instanceProfile: ubuntu
systemDisk:
image: ubuntu
storage: 5Gi
storageClass: replicated
gpus: []
resources:
cpu: ""
memory: ""
sshKeys:
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF
test@test
cloudInit: |
#cloud-config
users:
- name: test
shell: /bin/bash
sudo: ['ALL=(ALL) NOPASSWD: ALL']
groups: sudo
ssh_authorized_keys:
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF test@test
cloudInitSeed: ""
EOF
sleep 5
kubectl -n tenant-test wait hr virtual-machine-$name --timeout=10s --for=condition=ready
kubectl -n tenant-test wait dv virtual-machine-$name --timeout=150s --for=condition=ready
kubectl -n tenant-test wait pvc virtual-machine-$name --timeout=100s --for=jsonpath='{.status.phase}'=Bound
kubectl -n tenant-test wait vm virtual-machine-$name --timeout=100s --for=condition=ready
timeout 120 sh -ec "until kubectl -n tenant-test get vmi virtual-machine-$name -o jsonpath='{.status.interfaces[0].ipAddress}' | grep -q '[0-9]'; do sleep 10; done"
kubectl -n tenant-test delete virtualmachines.apps.cozystack.io $name
}

View File

@@ -1,68 +0,0 @@
#!/usr/bin/env bats
@test "Create a VM Disk" {
name='test'
kubectl apply -f - <<EOF
apiVersion: apps.cozystack.io/v1alpha1
kind: VMDisk
metadata:
name: $name
namespace: tenant-test
spec:
source:
http:
url: https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img
optical: false
storage: 5Gi
storageClass: replicated
EOF
sleep 5
kubectl -n tenant-test wait hr vm-disk-$name --timeout=5s --for=condition=ready
kubectl -n tenant-test wait dv vm-disk-$name --timeout=150s --for=condition=ready
kubectl -n tenant-test wait pvc vm-disk-$name --timeout=100s --for=jsonpath='{.status.phase}'=Bound
}
@test "Create a VM Instance" {
diskName='test'
name='test'
kubectl apply -f - <<EOF
apiVersion: apps.cozystack.io/v1alpha1
kind: VMInstance
metadata:
name: $name
namespace: tenant-test
spec:
external: false
externalMethod: PortList
externalPorts:
- 22
running: true
instanceType: "u1.medium"
instanceProfile: ubuntu
disks:
- name: $diskName
gpus: []
resources:
cpu: ""
memory: ""
sshKeys:
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF
test@test
cloudInit: |
#cloud-config
users:
- name: test
shell: /bin/bash
sudo: ['ALL=(ALL) NOPASSWD: ALL']
groups: sudo
ssh_authorized_keys:
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF test@test
cloudInitSeed: ""
EOF
sleep 5
timeout 20 sh -ec "until kubectl -n tenant-test get vmi vm-instance-$name -o jsonpath='{.status.interfaces[0].ipAddress}' | grep -q '[0-9]'; do sleep 5; done"
kubectl -n tenant-test wait hr vm-instance-$name --timeout=5s --for=condition=ready
kubectl -n tenant-test wait vm vm-instance-$name --timeout=20s --for=condition=ready
kubectl -n tenant-test delete vminstances.apps.cozystack.io $name
kubectl -n tenant-test delete vmdisks.apps.cozystack.io $diskName
}

260
hack/e2e-install-cozystack.bats → hack/e2e-cluster.bats Normal file → Executable file
View File

@@ -1,10 +1,237 @@
#!/usr/bin/env bats
# -----------------------------------------------------------------------------
# Cozystack endtoend provisioning test (Bats)
# -----------------------------------------------------------------------------
@test "Required installer assets exist" {
if [ ! -f _out/assets/cozystack-installer.yaml ]; then
echo "Missing: _out/assets/cozystack-installer.yaml" >&2
exit 1
fi
if [ ! -f _out/assets/nocloud-amd64.raw.xz ]; then
echo "Missing: _out/assets/nocloud-amd64.raw.xz" >&2
exit 1
fi
}
@test "IPv4 forwarding is enabled" {
if [ "$(cat /proc/sys/net/ipv4/ip_forward)" != 1 ]; then
echo "IPv4 forwarding is disabled!" >&2
echo >&2
echo "Enable it with:" >&2
echo " echo 1 > /proc/sys/net/ipv4/ip_forward" >&2
exit 1
fi
}
@test "Clean previous VMs" {
kill $(cat srv1/qemu.pid srv2/qemu.pid srv3/qemu.pid 2>/dev/null) 2>/dev/null || true
rm -rf srv1 srv2 srv3
}
@test "Prepare networking and masquerading" {
ip link del cozy-br0 2>/dev/null || true
ip link add cozy-br0 type bridge
ip link set cozy-br0 up
ip address add 192.168.123.1/24 dev cozy-br0
# Masquerading rule idempotent (delete first, then add)
iptables -t nat -D POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE 2>/dev/null || true
iptables -t nat -A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
}
@test "Prepare cloudinit drive for VMs" {
mkdir -p srv1 srv2 srv3
# Generate cloudinit ISOs
for i in 1 2 3; do
echo "hostname: srv${i}" > "srv${i}/meta-data"
cat > "srv${i}/user-data" <<'EOF'
#cloud-config
EOF
cat > "srv${i}/network-config" <<EOF
version: 2
ethernets:
eth0:
dhcp4: false
addresses:
- "192.168.123.1${i}/26"
gateway4: "192.168.123.1"
nameservers:
search: [cluster.local]
addresses: [8.8.8.8]
EOF
( cd "srv${i}" && genisoimage \
-output seed.img \
-volid cidata -rational-rock -joliet \
user-data meta-data network-config )
done
}
@test "Use Talos NoCloud image from assets" {
if [ ! -f _out/assets/nocloud-amd64.raw.xz ]; then
echo "Missing _out/assets/nocloud-amd64.raw.xz" 2>&1
exit 1
fi
rm -f nocloud-amd64.raw
cp _out/assets/nocloud-amd64.raw.xz .
xz --decompress nocloud-amd64.raw.xz
}
@test "Prepare VM disks" {
for i in 1 2 3; do
cp nocloud-amd64.raw srv${i}/system.img
qemu-img resize srv${i}/system.img 50G
qemu-img create srv${i}/data.img 100G
done
}
@test "Create tap devices" {
for i in 1 2 3; do
ip link del cozy-srv${i} 2>/dev/null || true
ip tuntap add dev cozy-srv${i} mode tap
ip link set cozy-srv${i} up
ip link set cozy-srv${i} master cozy-br0
done
}
@test "Boot QEMU VMs" {
for i in 1 2 3; do
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 8 -m 16384 \
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5${i} \
-netdev tap,id=net0,ifname=cozy-srv${i},script=no,downscript=no \
-drive file=srv${i}/system.img,if=virtio,format=raw \
-drive file=srv${i}/seed.img,if=virtio,format=raw \
-drive file=srv${i}/data.img,if=virtio,format=raw \
-display none -daemonize -pidfile srv${i}/qemu.pid
done
# Give qemu a few seconds to start up networking
sleep 5
}
@test "Wait until Talos API port 50000 is reachable on all machines" {
timeout 60 sh -ec 'until nc -nz 192.168.123.11 50000 && nc -nz 192.168.123.12 50000 && nc -nz 192.168.123.13 50000; do sleep 1; done'
}
@test "Generate Talos cluster configuration" {
# Clusterwide patches
cat > patch.yaml <<'EOF'
machine:
kubelet:
nodeIP:
validSubnets:
- 192.168.123.0/24
extraConfig:
maxPods: 512
kernel:
modules:
- name: openvswitch
- name: drbd
parameters:
- usermode_helper=disabled
- name: zfs
- name: spl
registries:
mirrors:
docker.io:
endpoints:
- https://mirror.gcr.io
files:
- content: |
[plugins]
[plugins."io.containerd.cri.v1.runtime"]
device_ownership_from_security_context = true
path: /etc/cri/conf.d/20-customization.part
op: create
cluster:
apiServer:
extraArgs:
oidc-issuer-url: "https://keycloak.example.org/realms/cozy"
oidc-client-id: "kubernetes"
oidc-username-claim: "preferred_username"
oidc-groups-claim: "groups"
network:
cni:
name: none
dnsDomain: cozy.local
podSubnets:
- 10.244.0.0/16
serviceSubnets:
- 10.96.0.0/16
EOF
# Controlplaneonly patches
cat > patch-controlplane.yaml <<'EOF'
machine:
nodeLabels:
node.kubernetes.io/exclude-from-external-load-balancers:
$patch: delete
network:
interfaces:
- interface: eth0
vip:
ip: 192.168.123.10
cluster:
allowSchedulingOnControlPlanes: true
controllerManager:
extraArgs:
bind-address: 0.0.0.0
scheduler:
extraArgs:
bind-address: 0.0.0.0
apiServer:
certSANs:
- 127.0.0.1
proxy:
disabled: true
discovery:
enabled: false
etcd:
advertisedSubnets:
- 192.168.123.0/24
EOF
# Generate secrets once
if [ ! -f secrets.yaml ]; then
talosctl gen secrets
fi
rm -f controlplane.yaml worker.yaml talosconfig kubeconfig
talosctl gen config --with-secrets secrets.yaml cozystack https://192.168.123.10:6443 \
--config-patch=@patch.yaml --config-patch-control-plane @patch-controlplane.yaml
}
@test "Apply Talos configuration to the node" {
# Apply the configuration to all three nodes
for node in 11 12 13; do
talosctl apply -f controlplane.yaml -n 192.168.123.${node} -e 192.168.123.${node} -i
done
# Wait for Talos services to come up again
timeout 60 sh -ec 'until nc -nz 192.168.123.11 50000 && nc -nz 192.168.123.12 50000 && nc -nz 192.168.123.13 50000; do sleep 1; done'
}
@test "Bootstrap Talos cluster" {
# Bootstrap etcd on the first node
timeout 10 sh -ec 'until talosctl bootstrap -n 192.168.123.11 -e 192.168.123.11; do sleep 1; done'
# Wait until etcd is healthy
timeout 180 sh -ec 'until talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 >/dev/null 2>&1; do sleep 1; done'
timeout 60 sh -ec 'while talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1 | grep -q "rpc error"; do sleep 1; done'
# Retrieve kubeconfig
rm -f kubeconfig
talosctl kubeconfig kubeconfig -e 192.168.123.10 -n 192.168.123.10
# Wait until all three nodes register in Kubernetes
timeout 60 sh -ec 'until [ $(kubectl get node --no-headers | wc -l) -eq 3 ]; do sleep 1; done'
}
@test "Install Cozystack" {
@@ -27,14 +254,14 @@
kubectl wait deployment/cozystack -n cozy-system --timeout=1m --for=condition=Available
# Wait until HelmReleases appear & reconcile them
timeout 60 sh -ec 'until kubectl get hr -A -l cozystack.io/system-app=true | grep -q cozys; do sleep 1; done'
timeout 60 sh -ec 'until kubectl get hr -A | grep -q cozys; do sleep 1; done'
sleep 5
kubectl get hr -A -l cozystack.io/system-app=true | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n "$1" hr/"$2" &"} END {print "wait"}' | sh -ex
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n "$1" hr/"$2" &"} END {print "wait"}' | sh -ex
# Fail the test if any HelmRelease is not Ready
if kubectl get hr -A | grep -v " True " | grep -v NAME; then
kubectl get hr -A
echo "Some HelmReleases failed to reconcile" >&2
fail "Some HelmReleases failed to reconcile"
fi
}
@@ -49,11 +276,7 @@
kubectl wait deployment/linstor-controller -n cozy-linstor --timeout=5m --for=condition=available
timeout 60 sh -ec 'until [ $(kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor node list | grep -c Online) -eq 3 ]; do sleep 1; done'
created_pools=$(kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor sp l -s data --pastable | awk '$2 == "data" {printf " " $4} END{printf " "}')
for node in srv1 srv2 srv3; do
case $created_pools in
*" $node "*) echo "Storage pool 'data' already exists on node $node"; continue;;
esac
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs ${node} /dev/vdc --pool-name data --storage-pool data
done
@@ -88,7 +311,7 @@ parameters:
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-no-data-accessible: suspend-io
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-suspended-primary-outdated: force-secondary
property.linstor.csi.linbit.com/DrbdOptions/Net/rr-conflict: retry-connect
volumeBindingMode: Immediate
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
EOF
}
@@ -166,24 +389,3 @@ EOF
timeout 120 sh -ec 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator >/dev/null 2>&1; do sleep 1; done'
kubectl wait hr/keycloak hr/keycloak-configure hr/keycloak-operator -n cozy-keycloak --timeout=10m --for=condition=ready
}
@test "Create tenant with isolated mode enabled" {
kubectl -n tenant-root get tenants.apps.cozystack.io test ||
kubectl apply -f - <<EOF
apiVersion: apps.cozystack.io/v1alpha1
kind: Tenant
metadata:
name: test
namespace: tenant-root
spec:
etcd: false
host: ""
ingress: false
isolated: true
monitoring: false
resourceQuotas: {}
seaweedfs: false
EOF
kubectl wait hr/tenant-test -n tenant-root --timeout=1m --for=condition=ready
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
}

View File

@@ -1,248 +0,0 @@
#!/usr/bin/env bats
# -----------------------------------------------------------------------------
# Cozystack endtoend provisioning test (Bats)
# -----------------------------------------------------------------------------
@test "Required installer assets exist" {
if [ ! -f _out/assets/nocloud-amd64.raw.xz ]; then
echo "Missing: _out/assets/nocloud-amd64.raw.xz" >&2
exit 1
fi
}
@test "IPv4 forwarding is enabled" {
if [ "$(cat /proc/sys/net/ipv4/ip_forward)" != 1 ]; then
echo "IPv4 forwarding is disabled!" >&2
echo >&2
echo "Enable it with:" >&2
echo " echo 1 > /proc/sys/net/ipv4/ip_forward" >&2
exit 1
fi
}
@test "Clean previous VMs" {
kill $(cat srv1/qemu.pid srv2/qemu.pid srv3/qemu.pid 2>/dev/null) 2>/dev/null || true
rm -rf srv1 srv2 srv3
}
@test "Prepare networking and masquerading" {
ip link del cozy-br0 2>/dev/null || true
ip link add cozy-br0 type bridge
ip link set cozy-br0 up
ip address add 192.168.123.1/24 dev cozy-br0
# Masquerading rule idempotent (delete first, then add)
iptables -t nat -D POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE 2>/dev/null || true
iptables -t nat -A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
}
@test "Prepare cloudinit drive for VMs" {
mkdir -p srv1 srv2 srv3
# Generate cloudinit ISOs
for i in 1 2 3; do
echo "hostname: srv${i}" > "srv${i}/meta-data"
cat > "srv${i}/user-data" <<'EOF'
#cloud-config
EOF
cat > "srv${i}/network-config" <<EOF
version: 2
ethernets:
eth0:
dhcp4: false
addresses:
- "192.168.123.1${i}/26"
gateway4: "192.168.123.1"
nameservers:
search: [cluster.local]
addresses: [8.8.8.8]
EOF
( cd "srv${i}" && genisoimage \
-output seed.img \
-volid cidata -rational-rock -joliet \
user-data meta-data network-config )
done
}
@test "Use Talos NoCloud image from assets" {
if [ ! -f _out/assets/nocloud-amd64.raw.xz ]; then
echo "Missing _out/assets/nocloud-amd64.raw.xz" 2>&1
exit 1
fi
rm -f nocloud-amd64.raw
cp _out/assets/nocloud-amd64.raw.xz .
xz --decompress nocloud-amd64.raw.xz
}
@test "Prepare VM disks" {
for i in 1 2 3; do
cp nocloud-amd64.raw srv${i}/system.img
qemu-img resize srv${i}/system.img 50G
qemu-img create srv${i}/data.img 100G
done
}
@test "Create tap devices" {
for i in 1 2 3; do
ip link del cozy-srv${i} 2>/dev/null || true
ip tuntap add dev cozy-srv${i} mode tap
ip link set cozy-srv${i} up
ip link set cozy-srv${i} master cozy-br0
done
}
@test "Boot QEMU VMs" {
for i in 1 2 3; do
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 8 -m 24576 \
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5${i} \
-netdev tap,id=net0,ifname=cozy-srv${i},script=no,downscript=no \
-drive file=srv${i}/system.img,if=virtio,format=raw \
-drive file=srv${i}/seed.img,if=virtio,format=raw \
-drive file=srv${i}/data.img,if=virtio,format=raw \
-display none -daemonize -pidfile srv${i}/qemu.pid
done
# Give qemu a few seconds to start up networking
sleep 5
}
@test "Wait until Talos API port 50000 is reachable on all machines" {
timeout 60 sh -ec 'until nc -nz 192.168.123.11 50000 && nc -nz 192.168.123.12 50000 && nc -nz 192.168.123.13 50000; do sleep 1; done'
}
@test "Generate Talos cluster configuration" {
# Clusterwide patches
cat > patch.yaml <<'EOF'
machine:
kubelet:
nodeIP:
validSubnets:
- 192.168.123.0/24
extraConfig:
maxPods: 512
kernel:
modules:
- name: openvswitch
- name: drbd
parameters:
- usermode_helper=disabled
- name: zfs
- name: spl
registries:
mirrors:
docker.io:
endpoints:
- https://dockerio.nexus.lllamnyp.su
cr.fluentbit.io:
endpoints:
- https://fluentbit.nexus.lllamnyp.su
docker-registry3.mariadb.com:
endpoints:
- https://mariadb.nexus.lllamnyp.su
gcr.io:
endpoints:
- https://gcr.nexus.lllamnyp.su
ghcr.io:
endpoints:
- https://ghcr.nexus.lllamnyp.su
quay.io:
endpoints:
- https://quay.nexus.lllamnyp.su
registry.k8s.io:
endpoints:
- https://k8s.nexus.lllamnyp.su
files:
- content: |
[plugins]
[plugins."io.containerd.cri.v1.runtime"]
device_ownership_from_security_context = true
path: /etc/cri/conf.d/20-customization.part
op: create
cluster:
apiServer:
extraArgs:
oidc-issuer-url: "https://keycloak.example.org/realms/cozy"
oidc-client-id: "kubernetes"
oidc-username-claim: "preferred_username"
oidc-groups-claim: "groups"
network:
cni:
name: none
dnsDomain: cozy.local
podSubnets:
- 10.244.0.0/16
serviceSubnets:
- 10.96.0.0/16
EOF
# Controlplaneonly patches
cat > patch-controlplane.yaml <<'EOF'
machine:
nodeLabels:
node.kubernetes.io/exclude-from-external-load-balancers:
$patch: delete
network:
interfaces:
- interface: eth0
vip:
ip: 192.168.123.10
cluster:
allowSchedulingOnControlPlanes: true
controllerManager:
extraArgs:
bind-address: 0.0.0.0
scheduler:
extraArgs:
bind-address: 0.0.0.0
apiServer:
certSANs:
- 127.0.0.1
proxy:
disabled: true
discovery:
enabled: false
etcd:
advertisedSubnets:
- 192.168.123.0/24
EOF
# Generate secrets once
if [ ! -f secrets.yaml ]; then
talosctl gen secrets
fi
rm -f controlplane.yaml worker.yaml talosconfig kubeconfig
talosctl gen config --with-secrets secrets.yaml cozystack https://192.168.123.10:6443 \
--config-patch=@patch.yaml --config-patch-control-plane @patch-controlplane.yaml
}
@test "Apply Talos configuration to the node" {
# Apply the configuration to all three nodes
for node in 11 12 13; do
talosctl apply -f controlplane.yaml -n 192.168.123.${node} -e 192.168.123.${node} -i
done
# Wait for Talos services to come up again
timeout 60 sh -ec 'until nc -nz 192.168.123.11 50000 && nc -nz 192.168.123.12 50000 && nc -nz 192.168.123.13 50000; do sleep 1; done'
}
@test "Bootstrap Talos cluster" {
# Bootstrap etcd on the first node
timeout 10 sh -ec 'until talosctl bootstrap -n 192.168.123.11 -e 192.168.123.11; do sleep 1; done'
# Wait until etcd is healthy
timeout 180 sh -ec 'until talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 >/dev/null 2>&1; do sleep 1; done'
timeout 60 sh -ec 'while talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1 | grep -q "rpc error"; do sleep 1; done'
# Retrieve kubeconfig
rm -f kubeconfig
talosctl kubeconfig kubeconfig -e 192.168.123.10 -n 192.168.123.10
# Wait until all three nodes register in Kubernetes
timeout 60 sh -ec 'until [ $(kubectl get node --no-headers | wc -l) -eq 3 ]; do sleep 1; done'
}

View File

@@ -1,139 +0,0 @@
package controller
import (
"context"
"crypto/sha256"
"encoding/hex"
"fmt"
"sort"
"time"
helmv2 "github.com/fluxcd/helm-controller/api/v2"
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/predicate"
)
type CozystackConfigReconciler struct {
client.Client
Scheme *runtime.Scheme
}
var configMapNames = []string{"cozystack", "cozystack-branding", "cozystack-scheduling"}
const configMapNamespace = "cozy-system"
const digestAnnotation = "cozystack.io/cozy-config-digest"
const forceReconcileKey = "reconcile.fluxcd.io/forceAt"
const requestedAt = "reconcile.fluxcd.io/requestedAt"
func (r *CozystackConfigReconciler) Reconcile(ctx context.Context, _ ctrl.Request) (ctrl.Result, error) {
log := log.FromContext(ctx)
digest, err := r.computeDigest(ctx)
if err != nil {
log.Error(err, "failed to compute config digest")
return ctrl.Result{}, nil
}
var helmList helmv2.HelmReleaseList
if err := r.List(ctx, &helmList); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to list HelmReleases: %w", err)
}
now := time.Now().Format(time.RFC3339Nano)
updated := 0
for _, hr := range helmList.Items {
isSystemApp := hr.Labels["cozystack.io/system-app"] == "true"
isTenantRoot := hr.Namespace == "tenant-root" && hr.Name == "tenant-root"
if !isSystemApp && !isTenantRoot {
continue
}
patchTarget := hr.DeepCopy()
if hr.Annotations == nil {
hr.Annotations = map[string]string{}
}
if hr.Annotations[digestAnnotation] == digest {
continue
}
patchTarget.Annotations[digestAnnotation] = digest
patchTarget.Annotations[forceReconcileKey] = now
patchTarget.Annotations[requestedAt] = now
patch := client.MergeFrom(hr.DeepCopy())
if err := r.Patch(ctx, patchTarget, patch); err != nil {
log.Error(err, "failed to patch HelmRelease", "name", hr.Name, "namespace", hr.Namespace)
continue
}
updated++
log.Info("patched HelmRelease with new config digest", "name", hr.Name, "namespace", hr.Namespace)
}
log.Info("finished reconciliation", "updatedHelmReleases", updated)
return ctrl.Result{}, nil
}
func (r *CozystackConfigReconciler) computeDigest(ctx context.Context) (string, error) {
hash := sha256.New()
for _, name := range configMapNames {
var cm corev1.ConfigMap
err := r.Get(ctx, client.ObjectKey{Namespace: configMapNamespace, Name: name}, &cm)
if err != nil {
if kerrors.IsNotFound(err) {
continue // ignore missing
}
return "", err
}
// Sort keys for consistent hashing
var keys []string
for k := range cm.Data {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
v := cm.Data[k]
fmt.Fprintf(hash, "%s:%s=%s\n", name, k, v)
}
}
return hex.EncodeToString(hash.Sum(nil)), nil
}
func (r *CozystackConfigReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
WithEventFilter(predicate.Funcs{
UpdateFunc: func(e event.UpdateEvent) bool {
cm, ok := e.ObjectNew.(*corev1.ConfigMap)
return ok && cm.Namespace == configMapNamespace && contains(configMapNames, cm.Name)
},
CreateFunc: func(e event.CreateEvent) bool {
cm, ok := e.Object.(*corev1.ConfigMap)
return ok && cm.Namespace == configMapNamespace && contains(configMapNames, cm.Name)
},
DeleteFunc: func(e event.DeleteEvent) bool {
cm, ok := e.Object.(*corev1.ConfigMap)
return ok && cm.Namespace == configMapNamespace && contains(configMapNames, cm.Name)
},
}).
For(&corev1.ConfigMap{}).
Complete(r)
}
func contains(slice []string, val string) bool {
for _, s := range slice {
if s == val {
return true
}
}
return false
}

View File

@@ -3,7 +3,6 @@ package controller
import (
"context"
"strings"
"time"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -16,10 +15,6 @@ import (
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
)
const (
deletionRequeueDelay = 30 * time.Second
)
// WorkloadMonitorReconciler reconciles a WorkloadMonitor object
type WorkloadReconciler struct {
client.Client
@@ -57,9 +52,6 @@ func (r *WorkloadReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c
// found object, nothing to do
if err == nil {
if !t.GetDeletionTimestamp().IsZero() {
return ctrl.Result{RequeueAfter: deletionRequeueDelay}, nil
}
return ctrl.Result{}, nil
}

View File

@@ -248,24 +248,15 @@ func (r *WorkloadMonitorReconciler) reconcilePodForMonitor(
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("pod-%s", pod.Name),
Namespace: pod.Namespace,
Labels: map[string]string{},
},
}
metaLabels := r.getWorkloadMetadata(&pod)
_, err := ctrl.CreateOrUpdate(ctx, r.Client, workload, func() error {
// Update owner references with the new monitor
updateOwnerReferences(workload.GetObjectMeta(), monitor)
// Copy labels from the Pod if needed
for k, v := range pod.Labels {
workload.Labels[k] = v
}
// Add workload meta to labels
for k, v := range metaLabels {
workload.Labels[k] = v
}
workload.Labels = pod.Labels
// Fill Workload status fields:
workload.Status.Kind = monitor.Spec.Kind
@@ -442,12 +433,3 @@ func mapObjectToMonitor[T client.Object](_ T, c client.Client) func(ctx context.
return requests
}
}
func (r *WorkloadMonitorReconciler) getWorkloadMetadata(obj client.Object) map[string]string {
labels := make(map[string]string)
annotations := obj.GetAnnotations()
if instanceType, ok := annotations["kubevirt.io/cluster-instancetype-name"]; ok {
labels["workloads.cozystack.io/kubevirt-vmi-instance-type"] = instanceType
}
return labels
}

View File

@@ -16,10 +16,10 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.2.0
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "0.2.0"
appVersion: "0.1.0"

View File

@@ -1,4 +1,4 @@
include ../../../scripts/package.mk
generate:
readme-generator-for-helm -v values.yaml -s values.schema.json -r README.md
readme-generator -v values.yaml -s values.schema.json -r README.md

View File

@@ -1 +0,0 @@
../../../library/cozy-lib

View File

@@ -18,14 +18,3 @@ rules:
resourceNames:
- {{ .Release.Name }}-ui
verbs: ["get", "list", "watch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Release.Name }}-dashboard-resources
subjects:
{{ include "cozy-lib.rbac.subjectsForTenantAndAccessLevel" (list "use" .Release.Namespace) }}
roleRef:
kind: Role
name: {{ .Release.Name }}-dashboard-resources
apiGroup: rbac.authorization.k8s.io

View File

@@ -1,5 +1,5 @@
{
"properties": {},
"title": "Chart Values",
"type": "object"
"type": "object",
"properties": {}
}

View File

@@ -16,10 +16,15 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.11.1
version: 0.9.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "24.9.2"
dependencies:
- name: cozy-lib
version: 0.1.0
repository: "http://cozystack.cozy-system.svc/repos/library"

View File

@@ -1,12 +1,10 @@
CLICKHOUSE_BACKUP_TAG = $(shell awk '$$0 ~ /^version:/ {print $$2}' Chart.yaml)
PRESET_ENUM := ["nano","micro","small","medium","large","xlarge","2xlarge"]
CLICKHOUSE_BACKUP_TAG = $(shell awk '$$1 == "version:" {print $$2}' Chart.yaml)
include ../../../scripts/common-envs.mk
include ../../../scripts/package.mk
generate:
readme-generator-for-helm -v values.yaml -s values.schema.json -r README.md
yq -i -o json --indent 4 '.properties.resourcesPreset.enum = $(PRESET_ENUM)' values.schema.json
readme-generator -v values.yaml -s values.schema.json -r README.md
image:
docker buildx build images/clickhouse-backup \

View File

@@ -1,80 +1,50 @@
# Managed ClickHouse Service
# Managed Clickhouse Service
ClickHouse is an open source high-performance and column-oriented SQL database management system (DBMS).
It is used for online analytical processing (OLAP).
### How to restore backup:
### How to restore backup from S3
find snapshot:
```
restic -r s3:s3.example.org/clickhouse-backups/table_name snapshots
```
1. Find the snapshot:
restore:
```
restic -r s3:s3.example.org/clickhouse-backups/table_name restore latest --target /tmp/
```
```bash
restic -r s3:s3.example.org/clickhouse-backups/table_name snapshots
```
2. Restore it:
```bash
restic -r s3:s3.example.org/clickhouse-backups/table_name restore latest --target /tmp/
```
For more details, read [Restic: Effective Backup from Stdin](https://blog.aenix.io/restic-effective-backup-from-stdin-4bc1e8f083c1).
more details:
- https://itnext.io/restic-effective-backup-from-stdin-4bc1e8f083c1
## Parameters
### Common parameters
| Name | Description | Value |
| ----------------- | --------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| `replicas` | Number of Clickhouse replicas | `2` |
| `shards` | Number of Clickhouse shards | `1` |
| `resources` | Explicit CPU and memory configuration for each ClickHouse replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
| `resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: nano, micro, small, medium, large, xlarge, 2xlarge. | `small` |
| `size` | Persistent Volume Claim size, available for application data | `10Gi` |
| `storageClass` | StorageClass used to store the application data | `""` |
| Name | Description | Value |
| ---------------- | ----------------------------------- | ------ |
| `size` | Persistent Volume size | `10Gi` |
| `logStorageSize` | Persistent Volume for logs size | `2Gi` |
| `shards` | Number of Clickhouse replicas | `1` |
| `replicas` | Number of Clickhouse shards | `2` |
| `storageClass` | StorageClass used to store the data | `""` |
| `logTTL` | for query_log and query_thread_log | `15` |
### Application-specific parameters
### Configuration parameters
| Name | Description | Value |
| ---------------- | -------------------------------------------------------- | ----- |
| `logStorageSize` | Size of Persistent Volume for logs | `2Gi` |
| `logTTL` | TTL (expiration time) for query_log and query_thread_log | `15` |
| `users` | Users configuration | `{}` |
| Name | Description | Value |
| ------- | ------------------- | ----- |
| `users` | Users configuration | `{}` |
### Backup parameters
| Name | Description | Value |
| ------------------------ | ---------------------------------------------- | ------------------------------------------------------ |
| `backup.enabled` | Enable periodic backups | `false` |
| `backup.s3Region` | AWS S3 region where backups are stored | `us-east-1` |
| `backup.s3Bucket` | S3 bucket used for storing backups | `s3.example.org/clickhouse-backups` |
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
| `backup.cleanupStrategy` | Retention strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
| `backup.s3AccessKey` | Access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
| `backup.s3SecretKey` | Secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
| `backup.resticPassword` | Password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
## Parameter examples and reference
### resources and resourcesPreset
`resources` sets explicit CPU and memory configurations for each replica.
When left empty, the preset defined in `resourcesPreset` is applied.
```yaml
resources:
cpu: 4000m
memory: 4Gi
```
`resourcesPreset` sets named CPU and memory configurations for each replica.
This setting is ignored if the corresponding `resources` value is set.
| Preset name | CPU | memory |
|-------------|--------|---------|
| `nano` | `250m` | `128Mi` |
| `micro` | `500m` | `256Mi` |
| `small` | `1` | `512Mi` |
| `medium` | `1` | `1Gi` |
| `large` | `2` | `2Gi` |
| `xlarge` | `4` | `4Gi` |
| `2xlarge` | `8` | `8Gi` |
| Name | Description | Value |
| ------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ |
| `backup.enabled` | Enable pereiodic backups | `false` |
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/clickhouse-backups` |
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
| `backup.cleanupStrategy` | The strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
| `resources` | Resources | `{}` |
| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/clickhouse-backup:0.11.1@sha256:3faf7a4cebf390b9053763107482de175aa0fdb88c1e77424fd81100b1c3a205
ghcr.io/cozystack/cozystack/clickhouse-backup:0.9.0@sha256:3faf7a4cebf390b9053763107482de175aa0fdb88c1e77424fd81100b1c3a205

View File

@@ -1,5 +1,3 @@
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
{{- $clusterDomain := (index $cozyConfig.data "cluster-domain") | default "cozy.local" }}
{{- $existingSecret := lookup "v1" "Secret" .Release.Namespace (printf "%s-credentials" .Release.Name) }}
{{- $passwords := dict }}
{{- $users := .Values.users }}
@@ -34,7 +32,7 @@ kind: "ClickHouseInstallation"
metadata:
name: "{{ .Release.Name }}"
spec:
namespaceDomainPattern: "%s.svc.{{ $clusterDomain }}"
namespaceDomainPattern: "%s.svc.cozy.local"
defaults:
templates:
dataVolumeClaimTemplate: data-volume-template
@@ -94,9 +92,6 @@ spec:
templates:
volumeClaimTemplates:
- name: data-volume-template
metadata:
labels:
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
accessModes:
- ReadWriteOnce
@@ -104,9 +99,6 @@ spec:
requests:
storage: {{ .Values.size }}
- name: log-volume-template
metadata:
labels:
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
accessModes:
- ReadWriteOnce
@@ -115,9 +107,6 @@ spec:
storage: {{ .Values.logStorageSize }}
podTemplates:
- name: clickhouse-per-host
metadata:
labels:
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
affinity:
podAntiAffinity:
@@ -132,7 +121,11 @@ spec:
containers:
- name: clickhouse
image: clickhouse/clickhouse-server:24.9.2.42
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.resourcesPreset .Values.resources $) | nindent 16 }}
{{- if .Values.resources }}
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.resources $) | nindent 16 }}
{{- else if ne .Values.resourcesPreset "none" }}
resources: {{- include "cozy-lib.resources.preset" (list .Values.resourcesPreset $) | nindent 16 }}
{{- end }}
volumeMounts:
- name: data-volume-template
mountPath: /var/lib/clickhouse
@@ -140,9 +133,6 @@ spec:
mountPath: /var/log/clickhouse-server
serviceTemplates:
- name: svc-template
metadata:
labels:
app.kubernetes.io/instance: {{ .Release.Name }}
generateName: chendpoint-{chi}
spec:
ports:

View File

@@ -24,14 +24,3 @@ rules:
resourceNames:
- {{ .Release.Name }}
verbs: ["get", "list", "watch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Release.Name }}-dashboard-resources
subjects:
{{ include "cozy-lib.rbac.subjectsForTenantAndAccessLevel" (list "use" .Release.Namespace) }}
roleRef:
kind: Role
name: {{ .Release.Name }}-dashboard-resources
apiGroup: rbac.authorization.k8s.io

View File

@@ -9,5 +9,5 @@ spec:
kind: clickhouse
type: clickhouse
selector:
app.kubernetes.io/instance: {{ $.Release.Name }}
clickhouse.altinity.com/chi: {{ $.Release.Name }}
version: {{ $.Chart.Version }}

View File

@@ -1,100 +1,91 @@
{
"title": "Chart Values",
"type": "object",
"properties": {
"backup": {
"properties": {
"cleanupStrategy": {
"default": "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m",
"description": "Retention strategy for cleaning up old backups",
"type": "string"
},
"enabled": {
"default": false,
"description": "Enable periodic backups",
"type": "boolean"
},
"resticPassword": {
"default": "ChaXoveekoh6eigh4siesheeda2quai0",
"description": "Password for Restic backup encryption",
"type": "string"
},
"s3AccessKey": {
"default": "oobaiRus9pah8PhohL1ThaeTa4UVa7gu",
"description": "Access key for S3, used for authentication",
"type": "string"
},
"s3Bucket": {
"default": "s3.example.org/clickhouse-backups",
"description": "S3 bucket used for storing backups",
"type": "string"
},
"s3Region": {
"default": "us-east-1",
"description": "AWS S3 region where backups are stored",
"type": "string"
},
"s3SecretKey": {
"default": "ju3eum4dekeich9ahM1te8waeGai0oog",
"description": "Secret key for S3, used for authentication",
"type": "string"
},
"schedule": {
"default": "0 2 * * *",
"description": "Cron schedule for automated backups",
"type": "string"
}
},
"type": "object"
"size": {
"type": "string",
"description": "Persistent Volume size",
"default": "10Gi"
},
"logStorageSize": {
"default": "2Gi",
"description": "Size of Persistent Volume for logs",
"type": "string"
},
"logTTL": {
"default": 15,
"description": "TTL (expiration time) for query_log and query_thread_log",
"type": "number"
},
"replicas": {
"default": 2,
"description": "Number of Clickhouse replicas",
"type": "number"
},
"resources": {
"default": {},
"description": "Explicit CPU and memory configuration for each ClickHouse replica. When left empty, the preset defined in `resourcesPreset` is applied.",
"type": "object"
},
"resourcesPreset": {
"default": "small",
"description": "Default sizing preset used when `resources` is omitted. Allowed values: nano, micro, small, medium, large, xlarge, 2xlarge.",
"type": "string",
"enum": [
"nano",
"micro",
"small",
"medium",
"large",
"xlarge",
"2xlarge"
]
"description": "Persistent Volume for logs size",
"default": "2Gi"
},
"shards": {
"default": 1,
"description": "Number of Clickhouse shards",
"type": "number"
"type": "number",
"description": "Number of Clickhouse replicas",
"default": 1
},
"size": {
"default": "10Gi",
"description": "Persistent Volume Claim size, available for application data",
"type": "string"
"replicas": {
"type": "number",
"description": "Number of Clickhouse shards",
"default": 2
},
"storageClass": {
"default": "",
"description": "StorageClass used to store the application data",
"type": "string"
"type": "string",
"description": "StorageClass used to store the data",
"default": ""
},
"logTTL": {
"type": "number",
"description": "for query_log and query_thread_log",
"default": 15
},
"backup": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean",
"description": "Enable pereiodic backups",
"default": false
},
"s3Region": {
"type": "string",
"description": "The AWS S3 region where backups are stored",
"default": "us-east-1"
},
"s3Bucket": {
"type": "string",
"description": "The S3 bucket used for storing backups",
"default": "s3.example.org/clickhouse-backups"
},
"schedule": {
"type": "string",
"description": "Cron schedule for automated backups",
"default": "0 2 * * *"
},
"cleanupStrategy": {
"type": "string",
"description": "The strategy for cleaning up old backups",
"default": "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
},
"s3AccessKey": {
"type": "string",
"description": "The access key for S3, used for authentication",
"default": "oobaiRus9pah8PhohL1ThaeTa4UVa7gu"
},
"s3SecretKey": {
"type": "string",
"description": "The secret key for S3, used for authentication",
"default": "ju3eum4dekeich9ahM1te8waeGai0oog"
},
"resticPassword": {
"type": "string",
"description": "The password for Restic backup encryption",
"default": "ChaXoveekoh6eigh4siesheeda2quai0"
}
}
},
"resources": {
"type": "object",
"description": "Resources",
"default": {}
},
"resourcesPreset": {
"type": "string",
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
"default": "nano"
}
},
"title": "Chart Values",
"type": "object"
}
}
}

View File

@@ -1,29 +1,21 @@
## @section Common parameters
##
## @param replicas Number of Clickhouse replicas
replicas: 2
## @param shards Number of Clickhouse shards
shards: 1
## @param resources Explicit CPU and memory configuration for each ClickHouse replica. When left empty, the preset defined in `resourcesPreset` is applied.
resources: {}
# resources:
# cpu: 4000m
# memory: 4Gi
## @param resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: nano, micro, small, medium, large, xlarge, 2xlarge.
resourcesPreset: "small"
## @param size Persistent Volume Claim size, available for application data
## @param size Persistent Volume size
## @param logStorageSize Persistent Volume for logs size
## @param shards Number of Clickhouse replicas
## @param replicas Number of Clickhouse shards
## @param storageClass StorageClass used to store the data
## @param logTTL for query_log and query_thread_log
##
size: 10Gi
## @param storageClass StorageClass used to store the application data
storageClass: ""
## @section Application-specific parameters
##
## @param logStorageSize Size of Persistent Volume for logs
logStorageSize: 2Gi
## @param logTTL TTL (expiration time) for query_log and query_thread_log
shards: 1
replicas: 2
storageClass: ""
logTTL: 15
## @section Configuration parameters
## @param users [object] Users configuration
## Example:
## users:
@@ -35,17 +27,16 @@ logTTL: 15
##
users: {}
## @section Backup parameters
## @param backup.enabled Enable periodic backups
## @param backup.s3Region AWS S3 region where backups are stored
## @param backup.s3Bucket S3 bucket used for storing backups
## @param backup.enabled Enable pereiodic backups
## @param backup.s3Region The AWS S3 region where backups are stored
## @param backup.s3Bucket The S3 bucket used for storing backups
## @param backup.schedule Cron schedule for automated backups
## @param backup.cleanupStrategy Retention strategy for cleaning up old backups
## @param backup.s3AccessKey Access key for S3, used for authentication
## @param backup.s3SecretKey Secret key for S3, used for authentication
## @param backup.resticPassword Password for Restic backup encryption
## @param backup.cleanupStrategy The strategy for cleaning up old backups
## @param backup.s3AccessKey The access key for S3, used for authentication
## @param backup.s3SecretKey The secret key for S3, used for authentication
## @param backup.resticPassword The password for Restic backup encryption
backup:
enabled: false
s3Region: us-east-1
@@ -56,3 +47,15 @@ backup:
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
## @param resources Resources
resources: {}
# resources:
# limits:
# cpu: 4000m
# memory: 4Gi
# requests:
# cpu: 100m
# memory: 512Mi
## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
resourcesPreset: "nano"

View File

@@ -16,10 +16,10 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 1.0.0
version: 0.6.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: 2.4.0
appVersion: "1.24.0"

View File

@@ -1,13 +1,4 @@
include ../../../scripts/package.mk
PRESET_ENUM := ["nano","micro","small","medium","large","xlarge","2xlarge"]
generate:
readme-generator-for-helm -v values.yaml -s values.schema.json -r README.md
yq -i -o json --indent 4 '.properties.resourcesPreset.enum = $(PRESET_ENUM)' values.schema.json
update:
tag=$$(git ls-remote --tags --sort="v:refname" https://github.com/FerretDB/FerretDB | awk -F'[/^]' '{sub("^v", "", $$3)} END{print $$3}') && \
pgtag=$$(skopeo list-tags docker://ghcr.io/ferretdb/postgres-documentdb | jq -r --arg tag "$$tag" '.Tags[] | select(endswith("ferretdb-" + $$tag))' | sort -V | tail -n1) && \
sed -i "s|\(imageName: ghcr.io/ferretdb/postgres-documentdb:\).*|\1$$pgtag|" templates/postgres.yaml && \
sed -i "s|\(image: ghcr.io/ferretdb/ferretdb:\).*|\1$$tag|" templates/ferretdb.yaml && \
sed -i "s|\(appVersion: \).*|\1$$tag|" Chart.yaml
readme-generator -v values.yaml -s values.schema.json -r README.md

View File

@@ -1,72 +1,37 @@
# Managed FerretDB Service
FerretDB is an open source MongoDB alternative.
It translates MongoDB wire protocol queries to SQL and can be used as a direct replacement for MongoDB 5.0+.
Internally, FerretDB service is backed by Postgres.
## Parameters
### Common parameters
| Name | Description | Value |
| ----------------- | ------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| `replicas` | Number of replicas | `2` |
| `resources` | Explicit CPU and memory configuration for each FerretDB replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
| `resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `micro` |
| `size` | Persistent Volume size | `10Gi` |
| `storageClass` | StorageClass used to store the data | `""` |
| `external` | Enable external access from outside the cluster | `false` |
| Name | Description | Value |
| ------------------------ | ----------------------------------------------------------------------------------------------------------------------- | ------- |
| `external` | Enable external access from outside the cluster | `false` |
| `size` | Persistent Volume size | `10Gi` |
| `replicas` | Number of Postgres replicas | `2` |
| `storageClass` | StorageClass used to store the data | `""` |
| `quorum.minSyncReplicas` | Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed. | `0` |
| `quorum.maxSyncReplicas` | Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the number of instances). | `0` |
### Application-specific parameters
### Configuration parameters
| Name | Description | Value |
| ------------------------ | --------------------------------------------------------------------------------------------------------------------------- | ----- |
| `quorum.minSyncReplicas` | Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed | `0` |
| `quorum.maxSyncReplicas` | Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the total number of replicas) | `0` |
| `users` | Users configuration | `{}` |
| Name | Description | Value |
| ------- | ------------------- | ----- |
| `users` | Users configuration | `{}` |
### Backup parameters
| Name | Description | Value |
| ------------------------ | ---------------------------------------------------------- | ----------------------------------- |
| `backup.enabled` | Enable regular backups | `false` |
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * * *` |
| `backup.retentionPolicy` | Retention policy | `30d` |
| `backup.destinationPath` | Path to store the backup (i.e. s3://bucket/path/to/folder) | `s3://bucket/path/to/folder/` |
| `backup.endpointURL` | S3 Endpoint used to upload data to the cloud | `http://minio-gateway-service:9000` |
| `backup.s3AccessKey` | Access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
| `backup.s3SecretKey` | Secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
| Name | Description | Value |
| ------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ |
| `backup.enabled` | Enable pereiodic backups | `false` |
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/postgres-backups` |
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
| `backup.cleanupStrategy` | The strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
| `resources` | Resources | `{}` |
| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
### Bootstrap (recovery) parameters
| Name | Description | Value |
| ------------------------ | -------------------------------------------------------------------------------------------------------------------- | ------- |
| `bootstrap.enabled` | Restore database cluster from a backup | `false` |
| `bootstrap.recoveryTime` | Timestamp (PITR) up to which recovery will proceed, expressed in RFC 3339 format. If left empty, will restore latest | `""` |
| `bootstrap.oldName` | Name of database cluster before deleting | `""` |
## Parameter examples and reference
### resources and resourcesPreset
`resources` sets explicit CPU and memory configurations for each replica.
When left empty, the preset defined in `resourcesPreset` is applied.
```yaml
resources:
cpu: 4000m
memory: 4Gi
```
`resourcesPreset` sets named CPU and memory configurations for each replica.
This setting is ignored if the corresponding `resources` value is set.
| Preset name | CPU | memory |
|-------------|--------|---------|
| `nano` | `250m` | `128Mi` |
| `micro` | `500m` | `256Mi` |
| `small` | `1` | `512Mi` |
| `medium` | `1` | `1Gi` |
| `large` | `2` | `2Gi` |
| `xlarge` | `4` | `4Gi` |
| `2xlarge` | `8` | `8Gi` |

View File

@@ -1 +0,0 @@
../../../library/cozy-lib

View File

@@ -0,0 +1 @@
ghcr.io/cozystack/cozystack/postgres-backup:0.12.0@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f

View File

@@ -0,0 +1,99 @@
{{- if .Values.backup.enabled }}
{{ $image := .Files.Get "images/backup.json" | fromJson }}
apiVersion: batch/v1
kind: CronJob
metadata:
name: {{ .Release.Name }}-backup
spec:
schedule: "{{ .Values.backup.schedule }}"
concurrencyPolicy: Forbid
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 3
jobTemplate:
spec:
backoffLimit: 2
template:
spec:
restartPolicy: OnFailure
template:
metadata:
annotations:
checksum/config: {{ include (print $.Template.BasePath "/backup-script.yaml") . | sha256sum }}
checksum/secret: {{ include (print $.Template.BasePath "/backup-secret.yaml") . | sha256sum }}
spec:
restartPolicy: Never
containers:
- name: pgdump
image: "{{ $.Files.Get "images/postgres-backup.tag" | trim }}"
command:
- /bin/sh
- /scripts/backup.sh
env:
- name: REPO_PREFIX
value: {{ required "s3Bucket is not specified!" .Values.backup.s3Bucket | quote }}
- name: CLEANUP_STRATEGY
value: {{ required "cleanupStrategy is not specified!" .Values.backup.cleanupStrategy | quote }}
- name: PGUSER
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-postgres-superuser
key: username
- name: PGPASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-postgres-superuser
key: password
- name: PGHOST
value: {{ .Release.Name }}-postgres-rw
- name: PGPORT
value: "5432"
- name: PGDATABASE
value: postgres
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-backup
key: s3AccessKey
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-backup
key: s3SecretKey
- name: AWS_DEFAULT_REGION
value: {{ .Values.backup.s3Region }}
- name: RESTIC_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-backup
key: resticPassword
volumeMounts:
- mountPath: /scripts
name: scripts
- mountPath: /tmp
name: tmp
- mountPath: /.cache
name: cache
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
volumes:
- name: scripts
secret:
secretName: {{ .Release.Name }}-backup-script
- name: tmp
emptyDir: {}
- name: cache
emptyDir: {}
securityContext:
runAsNonRoot: true
runAsUser: 9000
runAsGroup: 9000
seccompProfile:
type: RuntimeDefault
{{- end }}

View File

@@ -0,0 +1,50 @@
{{- if .Values.backup.enabled }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-backup-script
stringData:
backup.sh: |
#!/bin/sh
set -e
set -o pipefail
JOB_ID="job-$(uuidgen|cut -f1 -d-)"
DB_LIST=$(psql -Atq -c 'SELECT datname FROM pg_catalog.pg_database;' | grep -v '^\(postgres\|app\|template.*\)$')
echo DB_LIST=$(echo "$DB_LIST" | shuf) # shuffle list
echo "Job ID: $JOB_ID"
echo "Target repo: $REPO_PREFIX"
echo "Cleanup strategy: $CLEANUP_STRATEGY"
echo "Start backup for:"
echo "$DB_LIST"
echo
echo "Backup started at `date +%Y-%m-%d\ %H:%M:%S`"
for db in $DB_LIST; do
(
set -x
restic -r "s3:${REPO_PREFIX}/$db" cat config >/dev/null 2>&1 || \
restic -r "s3:${REPO_PREFIX}/$db" init --repository-version 2
restic -r "s3:${REPO_PREFIX}/$db" unlock --remove-all >/dev/null 2>&1 || true # no locks, k8s takes care of it
pg_dump -Z0 -Ft -d "$db" | \
restic -r "s3:${REPO_PREFIX}/$db" backup --tag "$JOB_ID" --stdin --stdin-filename dump.tar
restic -r "s3:${REPO_PREFIX}/$db" tag --tag "$JOB_ID" --set "completed"
)
done
echo "Backup finished at `date +%Y-%m-%d\ %H:%M:%S`"
echo
echo "Run cleanup:"
echo
echo "Cleanup started at `date +%Y-%m-%d\ %H:%M:%S`"
for db in $DB_LIST; do
(
set -x
restic forget -r "s3:${REPO_PREFIX}/$db" --group-by=tags --keep-tag "completed" # keep completed snapshots only
restic forget -r "s3:${REPO_PREFIX}/$db" --group-by=tags $CLEANUP_STRATEGY
restic prune -r "s3:${REPO_PREFIX}/$db"
)
done
echo "Cleanup finished at `date +%Y-%m-%d\ %H:%M:%S`"
{{- end }}

View File

@@ -0,0 +1,11 @@
{{- if .Values.backup.enabled }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-backup
stringData:
s3AccessKey: {{ required "s3AccessKey is not specified!" .Values.backup.s3AccessKey }}
s3SecretKey: {{ required "s3SecretKey is not specified!" .Values.backup.s3SecretKey }}
resticPassword: {{ required "resticPassword is not specified!" .Values.backup.resticPassword }}
{{- end }}

View File

@@ -1,12 +0,0 @@
{{- if .Values.backup.enabled }}
---
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: {{ .Release.Name }}-postgres
spec:
schedule: {{ .Values.backup.schedule | quote }}
backupOwnerReference: self
cluster:
name: {{ .Release.Name }}-postgres
{{- end }}

View File

@@ -24,14 +24,3 @@ rules:
resourceNames:
- {{ .Release.Name }}
verbs: ["get", "list", "watch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Release.Name }}-dashboard-resources
subjects:
{{ include "cozy-lib.rbac.subjectsForTenantAndAccessLevel" (list "use" .Release.Namespace) }}
roleRef:
kind: Role
name: {{ .Release.Name }}-dashboard-resources
apiGroup: rbac.authorization.k8s.io

View File

@@ -2,8 +2,6 @@ apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}
labels:
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
type: {{ ternary "LoadBalancer" "ClusterIP" .Values.external }}
{{- if .Values.external }}

View File

@@ -12,18 +12,15 @@ spec:
metadata:
labels:
app: {{ .Release.Name }}
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
containers:
- name: ferretdb
image: ghcr.io/ferretdb/ferretdb:2.4.0
image: ghcr.io/ferretdb/ferretdb:1.24.0
ports:
- containerPort: 27017
env:
- name: POSTGRESQL_PASSWORD
- name: FERRETDB_POSTGRESQL_URL
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-postgres-superuser
key: password
- name: FERRETDB_POSTGRESQL_URL
value: "postgresql://postgres:$(POSTGRESQL_PASSWORD)@{{ .Release.Name }}-postgres-rw:5432/postgres"
name: {{ .Release.Name }}-postgres-app
key: uri

View File

@@ -0,0 +1,66 @@
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Release.Name }}-init-job
annotations:
"helm.sh/hook": post-install,post-upgrade
"helm.sh/hook-weight": "-5"
"helm.sh/hook-delete-policy": before-hook-creation
spec:
template:
metadata:
name: {{ .Release.Name }}-init-job
annotations:
checksum/config: {{ include (print $.Template.BasePath "/init-script.yaml") . | sha256sum }}
spec:
restartPolicy: Never
containers:
- name: postgres
image: ghcr.io/cloudnative-pg/postgresql:15.3
command:
- bash
- /scripts/init.sh
env:
- name: PGUSER
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-postgres-superuser
key: username
- name: PGPASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-postgres-superuser
key: password
- name: PGHOST
value: {{ .Release.Name }}-postgres-rw
- name: PGPORT
value: "5432"
- name: PGDATABASE
value: postgres
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
volumeMounts:
- mountPath: /etc/secret
name: secret
- mountPath: /scripts
name: scripts
securityContext:
fsGroup: 26
runAsGroup: 26
runAsNonRoot: true
runAsUser: 26
seccompProfile:
type: RuntimeDefault
volumes:
- name: secret
secret:
secretName: {{ .Release.Name }}-postgres-superuser
- name: scripts
secret:
secretName: {{ .Release.Name }}-init-script

View File

@@ -0,0 +1,131 @@
{{- $existingSecret := lookup "v1" "Secret" .Release.Namespace (printf "%s-credentials" .Release.Name) }}
{{- $passwords := dict }}
{{- with (index $existingSecret "data") }}
{{- range $k, $v := . }}
{{- $_ := set $passwords $k (b64dec $v) }}
{{- end }}
{{- end }}
{{- range $user, $u := .Values.users }}
{{- if $u.password }}
{{- $_ := set $passwords $user $u.password }}
{{- else if not (index $passwords $user) }}
{{- $_ := set $passwords $user (randAlphaNum 16) }}
{{- end }}
{{- end }}
{{- if .Values.users }}
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-credentials
stringData:
{{- range $user, $u := .Values.users }}
{{ quote $user }}: {{ quote (index $passwords $user) }}
{{- end }}
{{- end }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-init-script
stringData:
init.sh: |
#!/bin/bash
set -e
until pg_isready ; do sleep 5; done
echo "== create users"
{{- if .Values.users }}
psql -v ON_ERROR_STOP=1 <<\EOT
{{- range $user, $u := .Values.users }}
SELECT 'CREATE ROLE {{ $user }} LOGIN INHERIT;'
WHERE NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = '{{ $user }}')\gexec
ALTER ROLE {{ $user }} WITH PASSWORD '{{ index $passwords $user }}' LOGIN INHERIT {{ ternary "REPLICATION" "NOREPLICATION" (default false $u.replication) }};
COMMENT ON ROLE {{ $user }} IS 'user managed by helm';
{{- end }}
EOT
{{- end }}
echo "== delete users"
MANAGED_USERS=$(echo '\du+' | psql | awk -F'|' '$4 == " user managed by helm" {print $1}' | awk NF=NF RS= OFS=' ')
DEFINED_USERS="{{ join " " (keys .Values.users) }}"
DELETE_USERS=$(for user in $MANAGED_USERS; do case " $DEFINED_USERS " in *" $user "*) :;; *) echo $user;; esac; done)
echo "users to delete: $DELETE_USERS"
for user in $DELETE_USERS; do
# https://stackoverflow.com/a/51257346/2931267
psql -v ON_ERROR_STOP=1 --echo-all <<EOT
REASSIGN OWNED BY $user TO postgres;
DROP OWNED BY $user;
DROP USER $user;
EOT
done
echo "== create roles"
psql -v ON_ERROR_STOP=1 --echo-all <<\EOT
SELECT 'CREATE ROLE app_admin NOINHERIT;'
WHERE NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'app_admin')\gexec
COMMENT ON ROLE app_admin IS 'role managed by helm';
EOT
echo "== grant privileges on databases to roles"
psql -v ON_ERROR_STOP=1 --echo-all -d "app" <<\EOT
ALTER DATABASE app OWNER TO app_admin;
DO $$
DECLARE
schema_record record;
BEGIN
-- Loop over all schemas
FOR schema_record IN SELECT schema_name FROM information_schema.schemata WHERE schema_name NOT IN ('pg_catalog', 'information_schema') LOOP
-- Changing Schema Ownership
EXECUTE format('ALTER SCHEMA %I OWNER TO %I', schema_record.schema_name, 'app_admin');
-- Add rights for the admin role
EXECUTE format('GRANT ALL ON SCHEMA %I TO %I', schema_record.schema_name, 'app_admin');
EXECUTE format('GRANT ALL ON ALL TABLES IN SCHEMA %I TO %I', schema_record.schema_name, 'app_admin');
EXECUTE format('GRANT ALL ON ALL SEQUENCES IN SCHEMA %I TO %I', schema_record.schema_name, 'app_admin');
EXECUTE format('GRANT ALL ON ALL FUNCTIONS IN SCHEMA %I TO %I', schema_record.schema_name, 'app_admin');
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON TABLES TO %I', schema_record.schema_name, 'app_admin');
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON SEQUENCES TO %I', schema_record.schema_name, 'app_admin');
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON FUNCTIONS TO %I', schema_record.schema_name, 'app_admin');
END LOOP;
END$$;
EOT
echo "== setup event trigger for schema creation"
psql -v ON_ERROR_STOP=1 --echo-all -d "app" <<\EOT
CREATE OR REPLACE FUNCTION auto_grant_schema_privileges()
RETURNS event_trigger LANGUAGE plpgsql AS $$
DECLARE
obj record;
BEGIN
FOR obj IN SELECT * FROM pg_event_trigger_ddl_commands() WHERE command_tag = 'CREATE SCHEMA' LOOP
-- Set owner for schema
EXECUTE format('ALTER SCHEMA %I OWNER TO %I', obj.object_identity, 'app_admin');
-- Set privileges for admin role
EXECUTE format('GRANT ALL ON SCHEMA %I TO %I', obj.object_identity, 'app_admin');
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON TABLES TO %I', obj.object_identity, 'app_admin');
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON SEQUENCES TO %I', obj.object_identity, 'app_admin');
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON FUNCTIONS TO %I', obj.object_identity, 'app_admin');
END LOOP;
END;
$$;
DROP EVENT TRIGGER IF EXISTS trigger_auto_grant;
CREATE EVENT TRIGGER trigger_auto_grant ON ddl_command_end
WHEN TAG IN ('CREATE SCHEMA')
EXECUTE PROCEDURE auto_grant_schema_privileges();
EOT
echo "== assign roles to users"
psql -v ON_ERROR_STOP=1 --echo-all <<\EOT
GRANT app_admin TO app;
{{- range $user, $u := $.Values.users }}
GRANT app_admin TO {{ $user }};
{{- end }}
EOT

View File

@@ -5,50 +5,6 @@ metadata:
name: {{ .Release.Name }}-postgres
spec:
instances: {{ .Values.replicas }}
{{- if .Values.backup.enabled }}
backup:
barmanObjectStore:
destinationPath: {{ .Values.backup.destinationPath }}
endpointURL: {{ .Values.backup.endpointURL }}
s3Credentials:
accessKeyId:
name: {{ .Release.Name }}-s3-creds
key: AWS_ACCESS_KEY_ID
secretAccessKey:
name: {{ .Release.Name }}-s3-creds
key: AWS_SECRET_ACCESS_KEY
retentionPolicy: {{ .Values.backup.retentionPolicy }}
{{- end }}
bootstrap:
initdb:
postInitSQL:
- 'CREATE EXTENSION IF NOT EXISTS documentdb CASCADE;'
{{- if .Values.bootstrap.enabled }}
recovery:
source: {{ .Values.bootstrap.oldName }}
{{- if .Values.bootstrap.recoveryTime }}
recoveryTarget:
targetTime: {{ .Values.bootstrap.recoveryTime }}
{{- end }}
{{- end }}
{{- if .Values.bootstrap.enabled }}
externalClusters:
- name: {{ .Values.bootstrap.oldName }}
barmanObjectStore:
destinationPath: {{ .Values.backup.destinationPath }}
endpointURL: {{ .Values.backup.endpointURL }}
s3Credentials:
accessKeyId:
name: {{ .Release.Name }}-s3-creds
key: AWS_ACCESS_KEY_ID
secretAccessKey:
name: {{ .Release.Name }}-s3-creds
key: AWS_SECRET_ACCESS_KEY
{{- end }}
imageName: ghcr.io/ferretdb/postgres-documentdb:17-0.105.0-ferretdb-2.4.0
postgresUID: 999
postgresGID: 999
enableSuperuserAccess: true
{{- $configMap := lookup "v1" "ConfigMap" "cozy-system" "cozystack-scheduling" }}
{{- if $configMap }}
@@ -62,21 +18,14 @@ spec:
{{- end }}
minSyncReplicas: {{ .Values.quorum.minSyncReplicas }}
maxSyncReplicas: {{ .Values.quorum.maxSyncReplicas }}
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.resourcesPreset .Values.resources $) | nindent 4 }}
{{- if .Values.resources }}
resources: {{- toYaml .Values.resources | nindent 4 }}
{{- else if ne .Values.resourcesPreset "none" }}
resources: {{- include "resources.preset" (dict "type" .Values.resourcesPreset "Release" .Release) | nindent 4 }}
{{- end }}
monitoring:
enablePodMonitor: true
postgresql:
shared_preload_libraries:
- pg_cron
- pg_documentdb_core
- pg_documentdb
parameters:
cron.database_name: 'postgres'
pg_hba:
- host postgres postgres 127.0.0.1/32 trust
- host postgres postgres ::1/128 trust
storage:
size: {{ required ".Values.size is required" .Values.size }}
{{- with .Values.storageClass }}
@@ -86,7 +35,6 @@ spec:
inheritedMetadata:
labels:
policy.cozystack.io/allow-to-apiserver: "true"
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Values.users }}
managed:
@@ -97,6 +45,8 @@ spec:
passwordSecret:
name: {{ printf "%s-user-%s" $.Release.Name $user }}
login: true
inRoles:
- app
{{- end }}
{{- end }}

View File

@@ -9,5 +9,5 @@ spec:
kind: ferretdb
type: ferretdb
selector:
app.kubernetes.io/instance: {{ $.Release.Name }}
app: {{ $.Release.Name }}
version: {{ $.Chart.Version }}

View File

@@ -1,120 +1,96 @@
{
"title": "Chart Values",
"type": "object",
"properties": {
"backup": {
"properties": {
"destinationPath": {
"default": "s3://bucket/path/to/folder/",
"description": "Path to store the backup (i.e. s3://bucket/path/to/folder)",
"type": "string"
},
"enabled": {
"default": false,
"description": "Enable regular backups",
"type": "boolean"
},
"endpointURL": {
"default": "http://minio-gateway-service:9000",
"description": "S3 Endpoint used to upload data to the cloud",
"type": "string"
},
"retentionPolicy": {
"default": "30d",
"description": "Retention policy",
"type": "string"
},
"s3AccessKey": {
"default": "oobaiRus9pah8PhohL1ThaeTa4UVa7gu",
"description": "Access key for S3, used for authentication",
"type": "string"
},
"s3SecretKey": {
"default": "ju3eum4dekeich9ahM1te8waeGai0oog",
"description": "Secret key for S3, used for authentication",
"type": "string"
},
"schedule": {
"default": "0 2 * * * *",
"description": "Cron schedule for automated backups",
"type": "string"
}
},
"type": "object"
},
"bootstrap": {
"properties": {
"enabled": {
"default": false,
"description": "Restore database cluster from a backup",
"type": "boolean"
},
"oldName": {
"default": "",
"description": "Name of database cluster before deleting",
"type": "string"
},
"recoveryTime": {
"default": "",
"description": "Timestamp (PITR) up to which recovery will proceed, expressed in RFC 3339 format. If left empty, will restore latest",
"type": "string"
}
},
"type": "object"
},
"external": {
"default": false,
"type": "boolean",
"description": "Enable external access from outside the cluster",
"type": "boolean"
},
"quorum": {
"properties": {
"maxSyncReplicas": {
"default": 0,
"description": "Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the total number of replicas)",
"type": "number"
},
"minSyncReplicas": {
"default": 0,
"description": "Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed",
"type": "number"
}
},
"type": "object"
},
"replicas": {
"default": 2,
"description": "Number of replicas",
"type": "number"
},
"resources": {
"default": {},
"description": "Explicit CPU and memory configuration for each FerretDB replica. When left empty, the preset defined in `resourcesPreset` is applied.",
"type": "object"
},
"resourcesPreset": {
"default": "micro",
"description": "Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.",
"type": "string",
"enum": [
"nano",
"micro",
"small",
"medium",
"large",
"xlarge",
"2xlarge"
]
"default": false
},
"size": {
"default": "10Gi",
"type": "string",
"description": "Persistent Volume size",
"type": "string"
"default": "10Gi"
},
"replicas": {
"type": "number",
"description": "Number of Postgres replicas",
"default": 2
},
"storageClass": {
"default": "",
"type": "string",
"description": "StorageClass used to store the data",
"type": "string"
"default": ""
},
"quorum": {
"type": "object",
"properties": {
"minSyncReplicas": {
"type": "number",
"description": "Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed.",
"default": 0
},
"maxSyncReplicas": {
"type": "number",
"description": "Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the number of instances).",
"default": 0
}
}
},
"backup": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean",
"description": "Enable pereiodic backups",
"default": false
},
"s3Region": {
"type": "string",
"description": "The AWS S3 region where backups are stored",
"default": "us-east-1"
},
"s3Bucket": {
"type": "string",
"description": "The S3 bucket used for storing backups",
"default": "s3.example.org/postgres-backups"
},
"schedule": {
"type": "string",
"description": "Cron schedule for automated backups",
"default": "0 2 * * *"
},
"cleanupStrategy": {
"type": "string",
"description": "The strategy for cleaning up old backups",
"default": "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
},
"s3AccessKey": {
"type": "string",
"description": "The access key for S3, used for authentication",
"default": "oobaiRus9pah8PhohL1ThaeTa4UVa7gu"
},
"s3SecretKey": {
"type": "string",
"description": "The secret key for S3, used for authentication",
"default": "ju3eum4dekeich9ahM1te8waeGai0oog"
},
"resticPassword": {
"type": "string",
"description": "The password for Restic backup encryption",
"default": "ChaXoveekoh6eigh4siesheeda2quai0"
}
}
},
"resources": {
"type": "object",
"description": "Resources",
"default": {}
},
"resourcesPreset": {
"type": "string",
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
"default": "nano"
}
},
"title": "Chart Values",
"type": "object"
}
}
}

View File

@@ -1,30 +1,24 @@
## @section Common parameters
##
## @param replicas Number of replicas
replicas: 2
## @param resources Explicit CPU and memory configuration for each FerretDB replica. When left empty, the preset defined in `resourcesPreset` is applied.
resources: {}
# resources:
# cpu: 4000m
# memory: 4Gi
## @param resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.
resourcesPreset: "micro"
## @param size Persistent Volume size
size: 10Gi
## @param storageClass StorageClass used to store the data
storageClass: ""
## @param external Enable external access from outside the cluster
external: false
## @section Application-specific parameters
## @param size Persistent Volume size
## @param replicas Number of Postgres replicas
## @param storageClass StorageClass used to store the data
##
external: false
size: 10Gi
replicas: 2
storageClass: ""
## Configuration for the quorum-based synchronous replication
## @param quorum.minSyncReplicas Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed
## @param quorum.maxSyncReplicas Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the total number of replicas)
## @param quorum.minSyncReplicas Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed.
## @param quorum.maxSyncReplicas Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the number of instances).
quorum:
minSyncReplicas: 0
maxSyncReplicas: 0
## @section Configuration parameters
## @param users [object] Users configuration
## Example:
## users:
@@ -35,36 +29,35 @@ quorum:
##
users: {}
## @section Backup parameters
##
## @param backup.enabled Enable regular backups
## @param backup.enabled Enable pereiodic backups
## @param backup.s3Region The AWS S3 region where backups are stored
## @param backup.s3Bucket The S3 bucket used for storing backups
## @param backup.schedule Cron schedule for automated backups
## @param backup.retentionPolicy Retention policy
## @param backup.destinationPath Path to store the backup (i.e. s3://bucket/path/to/folder)
## @param backup.endpointURL S3 Endpoint used to upload data to the cloud
## @param backup.s3AccessKey Access key for S3, used for authentication
## @param backup.s3SecretKey Secret key for S3, used for authentication
## @param backup.cleanupStrategy The strategy for cleaning up old backups
## @param backup.s3AccessKey The access key for S3, used for authentication
## @param backup.s3SecretKey The secret key for S3, used for authentication
## @param backup.resticPassword The password for Restic backup encryption
backup:
enabled: false
retentionPolicy: 30d
destinationPath: s3://bucket/path/to/folder/
endpointURL: http://minio-gateway-service:9000
schedule: "0 2 * * * *"
s3Region: us-east-1
s3Bucket: s3.example.org/postgres-backups
schedule: "0 2 * * *"
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
## @section Bootstrap (recovery) parameters
##
## @param bootstrap.enabled Restore database cluster from a backup
## @param bootstrap.recoveryTime Timestamp (PITR) up to which recovery will proceed, expressed in RFC 3339 format. If left empty, will restore latest
## @param bootstrap.oldName Name of database cluster before deleting
##
bootstrap:
enabled: false
# example: 2020-11-26 15:22:00.00000+00
recoveryTime: ""
oldName: ""
## @param resources Resources
resources: {}
# resources:
# limits:
# cpu: 4000m
# memory: 4Gi
# requests:
# cpu: 100m
# memory: 512Mi
## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
resourcesPreset: "nano"

View File

@@ -16,7 +16,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.6.1
version: 0.5.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to

View File

@@ -1,5 +1,4 @@
NGINX_CACHE_TAG = $(shell awk '$$1 == "version:" {print $$2}' Chart.yaml)
PRESET_ENUM := ["nano","micro","small","medium","large","xlarge","2xlarge"]
include ../../../scripts/common-envs.mk
include ../../../scripts/package.mk
@@ -23,9 +22,7 @@ image-nginx:
rm -f images/nginx-cache.json
generate:
readme-generator-for-helm -v values.yaml -s values.schema.json -r README.md
yq -i -o json --indent 4 '.properties.haproxy.properties.resourcesPreset.enum = $(PRESET_ENUM)' values.schema.json
yq -i -o json --indent 4 '.properties.nginx.properties.resourcesPreset.enum = $(PRESET_ENUM)' values.schema.json
readme-generator -v values.yaml -s values.schema.json -r README.md
update:
tag=$$(git ls-remote --tags --sort="v:refname" https://github.com/chrislim2888/IP2Location-C-Library | awk -F'[/^]' 'END{print $$3}') && \

View File

@@ -1,9 +1,8 @@
# Managed Nginx-based HTTP Cache Service
# Managed Nginx Caching Service
The Nginx-based HTTP caching service is designed to optimize web traffic and enhance web application performance.
This service combines custom-built Nginx instances with HAProxy for efficient caching and load balancing.
The Nginx Caching Service is designed to optimize web traffic and enhance web application performance. This service combines custom-built Nginx instances with HAproxy for efficient caching and load balancing.
## Deployment information
## Deployment infromation
The Nginx instances include the following modules and features:
@@ -54,77 +53,27 @@ The deployment architecture is illustrated in the diagram below:
## Known issues
- VTS module shows wrong upstream response time, [github.com/vozlt/nginx-module-vts#198](https://github.com/vozlt/nginx-module-vts/issues/198)
VTS module shows wrong upstream resonse time
- https://github.com/vozlt/nginx-module-vts/issues/198
## Parameters
### Common parameters
| Name | Description | Value |
| -------------- | ----------------------------------------------- | ------- |
| `size` | Persistent Volume size | `10Gi` |
| `storageClass` | StorageClass used to store the data | `""` |
| `external` | Enable external access from outside the cluster | `false` |
| Name | Description | Value |
| ------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| `external` | Enable external access from outside the cluster | `false` |
| `size` | Persistent Volume size | `10Gi` |
| `storageClass` | StorageClass used to store the data | `""` |
| `haproxy.replicas` | Number of HAProxy replicas | `2` |
| `nginx.replicas` | Number of Nginx replicas | `2` |
| `haproxy.resources` | Resources | `{}` |
| `haproxy.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
| `nginx.resources` | Resources | `{}` |
| `nginx.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
### Application-specific parameters
### Configuration parameters
| Name | Description | Value |
| ----------- | ----------------------- | ----- |
| `endpoints` | Endpoints configuration | `[]` |
### HAProxy parameters
| Name | Description | Value |
| ------------------------- | ------------------------------------------------------------------------------------------------------------------------------------ | ------ |
| `haproxy.replicas` | Number of HAProxy replicas | `2` |
| `haproxy.resources` | Explicit CPU and memory configuration for each HAProxy replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
| `haproxy.resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: nano, micro, small, medium, large, xlarge, 2xlarge. | `nano` |
### Nginx parameters
| Name | Description | Value |
| ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ------ |
| `nginx.replicas` | Number of Nginx replicas | `2` |
| `nginx.resources` | Explicit CPU and memory configuration for each nginx replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
| `nginx.resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: nano, micro, small, medium, large, xlarge, 2xlarge. | `nano` |
## Parameter examples and reference
### resources and resourcesPreset
`resources` sets explicit CPU and memory configurations for each replica.
When left empty, the preset defined in `resourcesPreset` is applied.
```yaml
resources:
cpu: 4000m
memory: 4Gi
```
`resourcesPreset` sets named CPU and memory configurations for each replica.
This setting is ignored if the corresponding `resources` value is set.
| Preset name | CPU | memory |
|-------------|--------|---------|
| `nano` | `250m` | `128Mi` |
| `micro` | `500m` | `256Mi` |
| `small` | `1` | `512Mi` |
| `medium` | `1` | `1Gi` |
| `large` | `2` | `2Gi` |
| `xlarge` | `4` | `4Gi` |
| `2xlarge` | `8` | `8Gi` |
### endpoints
`endpoints` is a flat list of IP addresses:
```yaml
endpoints:
- 10.100.3.1:80
- 10.100.3.11:80
- 10.100.3.2:80
- 10.100.3.12:80
- 10.100.3.3:80
- 10.100.3.13:80
```

View File

@@ -1 +0,0 @@
../../../library/cozy-lib

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/nginx-cache:0.6.1@sha256:e0a07082bb6fc6aeaae2315f335386f1705a646c72f9e0af512aebbca5cb2b15
ghcr.io/cozystack/cozystack/nginx-cache:0.5.0@sha256:99cd04f09f80eb0c60cc0b2f6bc8180ada7ada00cb594606447674953dfa1b67

View File

@@ -33,7 +33,11 @@ spec:
containers:
- image: haproxy:latest
name: haproxy
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.haproxy.resourcesPreset .Values.haproxy.resources $) | nindent 10 }}
{{- if .Values.haproxy.resources }}
resources: {{- toYaml .Values.haproxy.resources | nindent 10 }}
{{- else if ne .Values.haproxy.resourcesPreset "none" }}
resources: {{- include "resources.preset" (dict "type" .Values.haproxy.resourcesPreset "Release" .Release) | nindent 10 }}
{{- end }}
ports:
- containerPort: 8080
name: http

View File

@@ -52,7 +52,11 @@ spec:
shareProcessNamespace: true
containers:
- name: nginx
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list $.Values.nginx.resourcesPreset $.Values.nginx.resources $) | nindent 10 }}
{{- if $.Values.nginx.resources }}
resources: {{- toYaml $.Values.nginx.resources | nindent 10 }}
{{- else if ne $.Values.nginx.resourcesPreset "none" }}
resources: {{- include "resources.preset" (dict "type" $.Values.nginx.resourcesPreset "Release" $.Release) | nindent 10 }}
{{- end }}
image: "{{ $.Files.Get "images/nginx-cache.tag" | trim }}"
readinessProbe:
httpGet:

View File

@@ -1,39 +0,0 @@
---
apiVersion: cozystack.io/v1alpha1
kind: WorkloadMonitor
metadata:
name: {{ $.Release.Name }}-haproxy
spec:
replicas: {{ .Values.haproxy.replicas }}
minReplicas: 1
kind: http-cache
type: http-cache
selector:
app: {{ $.Release.Name }}-haproxy
version: {{ $.Chart.Version }}
---
apiVersion: cozystack.io/v1alpha1
kind: WorkloadMonitor
metadata:
name: {{ $.Release.Name }}-nginx
spec:
replicas: {{ .Values.nginx.replicas }}
minReplicas: 1
kind: http-cache
type: http-cache
selector:
app: {{ $.Release.Name }}-nginx-cache
version: {{ $.Chart.Version }}
---
apiVersion: cozystack.io/v1alpha1
kind: WorkloadMonitor
metadata:
name: {{ $.Release.Name }}
spec:
replicas: {{ .Values.replicas }}
minReplicas: 1
kind: http-cache
type: http-cache
selector:
app.kubernetes.io/instance: {{ $.Release.Name }}
version: {{ $.Chart.Version }}

View File

@@ -1,85 +1,67 @@
{
"title": "Chart Values",
"type": "object",
"properties": {
"endpoints": {
"default": [],
"description": "Endpoints configuration",
"items": {},
"type": "array"
},
"external": {
"default": false,
"type": "boolean",
"description": "Enable external access from outside the cluster",
"type": "boolean"
},
"haproxy": {
"properties": {
"replicas": {
"default": 2,
"description": "Number of HAProxy replicas",
"type": "number"
},
"resources": {
"default": {},
"description": "Explicit CPU and memory configuration for each HAProxy replica. When left empty, the preset defined in `resourcesPreset` is applied.",
"type": "object"
},
"resourcesPreset": {
"default": "nano",
"description": "Default sizing preset used when `resources` is omitted. Allowed values: nano, micro, small, medium, large, xlarge, 2xlarge.",
"type": "string",
"enum": [
"nano",
"micro",
"small",
"medium",
"large",
"xlarge",
"2xlarge"
]
}
},
"type": "object"
},
"nginx": {
"properties": {
"replicas": {
"default": 2,
"description": "Number of Nginx replicas",
"type": "number"
},
"resources": {
"default": {},
"description": "Explicit CPU and memory configuration for each nginx replica. When left empty, the preset defined in `resourcesPreset` is applied.",
"type": "object"
},
"resourcesPreset": {
"default": "nano",
"description": "Default sizing preset used when `resources` is omitted. Allowed values: nano, micro, small, medium, large, xlarge, 2xlarge.",
"type": "string",
"enum": [
"nano",
"micro",
"small",
"medium",
"large",
"xlarge",
"2xlarge"
]
}
},
"type": "object"
"default": false
},
"size": {
"default": "10Gi",
"type": "string",
"description": "Persistent Volume size",
"type": "string"
"default": "10Gi"
},
"storageClass": {
"default": "",
"type": "string",
"description": "StorageClass used to store the data",
"type": "string"
"default": ""
},
"haproxy": {
"type": "object",
"properties": {
"replicas": {
"type": "number",
"description": "Number of HAProxy replicas",
"default": 2
},
"resources": {
"type": "object",
"description": "Resources",
"default": {}
},
"resourcesPreset": {
"type": "string",
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
"default": "nano"
}
}
},
"nginx": {
"type": "object",
"properties": {
"replicas": {
"type": "number",
"description": "Number of Nginx replicas",
"default": 2
},
"resources": {
"type": "object",
"description": "Resources",
"default": {}
},
"resourcesPreset": {
"type": "string",
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
"default": "nano"
}
}
},
"endpoints": {
"type": "array",
"description": "Endpoints configuration",
"default": [],
"items": {}
}
},
"title": "Chart Values",
"type": "object"
}
}
}

View File

@@ -1,12 +1,45 @@
## @section Common parameters
##
## @param size Persistent Volume size
size: 10Gi
## @param storageClass StorageClass used to store the data
storageClass: ""
## @param external Enable external access from outside the cluster
## @param size Persistent Volume size
## @param storageClass StorageClass used to store the data
## @param haproxy.replicas Number of HAProxy replicas
## @param nginx.replicas Number of Nginx replicas
##
external: false
## @section Application-specific parameters
size: 10Gi
storageClass: ""
haproxy:
replicas: 2
## @param haproxy.resources Resources
resources: {}
# resources:
# limits:
# cpu: 4000m
# memory: 4Gi
# requests:
# cpu: 100m
# memory: 512Mi
## @param haproxy.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
resourcesPreset: "nano"
nginx:
replicas: 2
## @param nginx.resources Resources
resources: {}
# resources:
# limits:
# cpu: 4000m
# memory: 4Gi
# requests:
# cpu: 100m
# memory: 512Mi
## @param nginx.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
resourcesPreset: "nano"
## @section Configuration parameters
## @param endpoints Endpoints configuration
## Example:
@@ -19,29 +52,3 @@ external: false
## - 10.100.3.13:80
##
endpoints: []
## @section HAProxy parameters
haproxy:
## @param haproxy.replicas Number of HAProxy replicas
replicas: 2
## @param haproxy.resources Explicit CPU and memory configuration for each HAProxy replica. When left empty, the preset defined in `resourcesPreset` is applied.
resources: {}
# resources:
# cpu: 4000m
# memory: 4Gi
## @param haproxy.resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: nano, micro, small, medium, large, xlarge, 2xlarge.
resourcesPreset: "nano"
## @section Nginx parameters
nginx:
## @param nginx.replicas Number of Nginx replicas
replicas: 2
## @param nginx.resources Explicit CPU and memory configuration for each nginx replica. When left empty, the preset defined in `resourcesPreset` is applied.
resources: {}
# resources:
# cpu: 4000m
# memory: 4Gi
## @param nginx.resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: nano, micro, small, medium, large, xlarge, 2xlarge.
resourcesPreset: "nano"

View File

@@ -16,7 +16,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.8.1
version: 0.6.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to

View File

@@ -1,7 +1,4 @@
include ../../../scripts/package.mk
PRESET_ENUM := ["nano","micro","small","medium","large","xlarge","2xlarge"]
generate:
readme-generator-for-helm -v values.yaml -s values.schema.json -r README.md
yq -i -o json --indent 4 '.properties.kafka.properties.resourcesPreset.enum = $(PRESET_ENUM)' values.schema.json
yq -i -o json --indent 4 '.properties.zookeeper.properties.resourcesPreset.enum = $(PRESET_ENUM)' values.schema.json
readme-generator -v values.yaml -s values.schema.json -r README.md

View File

@@ -4,77 +4,22 @@
### Common parameters
| Name | Description | Value |
| ---------- | ----------------------------------------------- | ------- |
| `external` | Enable external access from outside the cluster | `false` |
| Name | Description | Value |
| --------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| `external` | Enable external access from outside the cluster | `false` |
| `kafka.size` | Persistent Volume size for Kafka | `10Gi` |
| `kafka.replicas` | Number of Kafka replicas | `3` |
| `kafka.storageClass` | StorageClass used to store the Kafka data | `""` |
| `zookeeper.size` | Persistent Volume size for ZooKeeper | `5Gi` |
| `zookeeper.replicas` | Number of ZooKeeper replicas | `3` |
| `zookeeper.storageClass` | StorageClass used to store the ZooKeeper data | `""` |
| `kafka.resources` | Resources | `{}` |
| `kafka.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
| `zookeeper.resources` | Resources | `{}` |
| `zookeeper.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
### Application-specific parameters
### Configuration parameters
| Name | Description | Value |
| -------- | ---------------------------------- | ----- |
| `topics` | Topics configuration (see example) | `[]` |
### Kafka configuration
| Name | Description | Value |
| ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ------- |
| `kafka.replicas` | Number of Kafka replicas | `3` |
| `kafka.resources` | Explicit CPU and memory configuration for each Kafka replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
| `kafka.resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: nano, micro, small, medium, large, xlarge, 2xlarge. | `small` |
| `kafka.size` | Persistent Volume size for Kafka | `10Gi` |
| `kafka.storageClass` | StorageClass used to store the Kafka data | `""` |
### Zookeeper configuration
| Name | Description | Value |
| --------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| `zookeeper.replicas` | Number of ZooKeeper replicas | `3` |
| `zookeeper.resources` | Explicit CPU and memory configuration for each Zookeeper replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
| `zookeeper.resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: nano, micro, small, medium, large, xlarge, 2xlarge. | `small` |
| `zookeeper.size` | Persistent Volume size for ZooKeeper | `5Gi` |
| `zookeeper.storageClass` | StorageClass used to store the ZooKeeper data | `""` |
## Parameter examples and reference
### resources and resourcesPreset
`resources` sets explicit CPU and memory configurations for each replica.
When left empty, the preset defined in `resourcesPreset` is applied.
```yaml
resources:
cpu: 4000m
memory: 4Gi
```
`resourcesPreset` sets named CPU and memory configurations for each replica.
This setting is ignored if the corresponding `resources` value is set.
| Preset name | CPU | memory |
|-------------|--------|---------|
| `nano` | `250m` | `128Mi` |
| `micro` | `500m` | `256Mi` |
| `small` | `1` | `512Mi` |
| `medium` | `1` | `1Gi` |
| `large` | `2` | `2Gi` |
| `xlarge` | `4` | `4Gi` |
| `2xlarge` | `8` | `8Gi` |
### topics
```yaml
topics:
- name: Results
partitions: 1
replicas: 3
config:
min.insync.replicas: 2
- name: Orders
config:
cleanup.policy: compact
segment.ms: 3600000
max.compaction.lag.ms: 5400000
min.insync.replicas: 2
partitions: 1
replicas: 3
```
| Name | Description | Value |
| -------- | -------------------- | ----- |
| `topics` | Topics configuration | `[]` |

View File

@@ -1 +0,0 @@
../../../library/cozy-lib

View File

@@ -25,14 +25,3 @@ rules:
- {{ .Release.Name }}
- {{ $.Release.Name }}-zookeeper
verbs: ["get", "list", "watch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Release.Name }}-dashboard-resources
subjects:
{{ include "cozy-lib.rbac.subjectsForTenantAndAccessLevel" (list "use" .Release.Namespace) }}
roleRef:
kind: Role
name: {{ .Release.Name }}-dashboard-resources
apiGroup: rbac.authorization.k8s.io

View File

@@ -8,7 +8,11 @@ metadata:
spec:
kafka:
replicas: {{ .Values.kafka.replicas }}
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.kafka.resourcesPreset .Values.kafka.resources $) | nindent 6 }}
{{- if .Values.kafka.resources }}
resources: {{- toYaml .Values.kafka.resources | nindent 6 }}
{{- else if ne .Values.kafka.resourcesPreset "none" }}
resources: {{- include "resources.preset" (dict "type" .Values.kafka.resourcesPreset "Release" .Release) | nindent 6 }}
{{- end }}
listeners:
- name: plain
port: 9092
@@ -66,7 +70,11 @@ spec:
key: kafka-metrics-config.yml
zookeeper:
replicas: {{ .Values.zookeeper.replicas }}
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.zookeeper.resourcesPreset .Values.zookeeper.resources $) | nindent 6 }}
{{- if .Values.zookeeper.resources }}
resources: {{- toYaml .Values.zookeeper.resources | nindent 6 }}
{{- else if ne .Values.zookeeper.resourcesPreset "none" }}
resources: {{- include "resources.preset" (dict "type" .Values.zookeeper.resourcesPreset "Release" .Release) | nindent 6 }}
{{- end }}
storage:
type: persistent-claim
{{- with .Values.zookeeper.size }}

View File

@@ -1,95 +1,77 @@
{
"title": "Chart Values",
"type": "object",
"properties": {
"external": {
"default": false,
"type": "boolean",
"description": "Enable external access from outside the cluster",
"type": "boolean"
"default": false
},
"kafka": {
"type": "object",
"properties": {
"replicas": {
"default": 3,
"description": "Number of Kafka replicas",
"type": "number"
},
"resources": {
"default": {},
"description": "Explicit CPU and memory configuration for each Kafka replica. When left empty, the preset defined in `resourcesPreset` is applied.",
"type": "object"
},
"resourcesPreset": {
"default": "small",
"description": "Default sizing preset used when `resources` is omitted. Allowed values: nano, micro, small, medium, large, xlarge, 2xlarge.",
"type": "string",
"enum": [
"nano",
"micro",
"small",
"medium",
"large",
"xlarge",
"2xlarge"
]
},
"size": {
"default": "10Gi",
"type": "string",
"description": "Persistent Volume size for Kafka",
"type": "string"
"default": "10Gi"
},
"replicas": {
"type": "number",
"description": "Number of Kafka replicas",
"default": 3
},
"storageClass": {
"default": "",
"type": "string",
"description": "StorageClass used to store the Kafka data",
"type": "string"
"default": ""
},
"resources": {
"type": "object",
"description": "Resources",
"default": {}
},
"resourcesPreset": {
"type": "string",
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
"default": "nano"
}
},
"type": "object"
},
"topics": {
"default": [],
"description": "Topics configuration (see example)",
"items": {},
"type": "array"
}
},
"zookeeper": {
"type": "object",
"properties": {
"replicas": {
"default": 3,
"description": "Number of ZooKeeper replicas",
"type": "number"
},
"resources": {
"default": {},
"description": "Explicit CPU and memory configuration for each Zookeeper replica. When left empty, the preset defined in `resourcesPreset` is applied.",
"type": "object"
},
"resourcesPreset": {
"default": "small",
"description": "Default sizing preset used when `resources` is omitted. Allowed values: nano, micro, small, medium, large, xlarge, 2xlarge.",
"type": "string",
"enum": [
"nano",
"micro",
"small",
"medium",
"large",
"xlarge",
"2xlarge"
]
},
"size": {
"default": "5Gi",
"type": "string",
"description": "Persistent Volume size for ZooKeeper",
"type": "string"
"default": "5Gi"
},
"replicas": {
"type": "number",
"description": "Number of ZooKeeper replicas",
"default": 3
},
"storageClass": {
"default": "",
"type": "string",
"description": "StorageClass used to store the ZooKeeper data",
"type": "string"
"default": ""
},
"resources": {
"type": "object",
"description": "Resources",
"default": {}
},
"resourcesPreset": {
"type": "string",
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
"default": "nano"
}
},
"type": "object"
}
},
"topics": {
"type": "array",
"description": "Topics configuration",
"default": [],
"items": {}
}
},
"title": "Chart Values",
"type": "object"
}
}
}

View File

@@ -1,12 +1,52 @@
## @section Common parameters
##
## @param external Enable external access from outside the cluster
external: false
## @section Application-specific parameters
## @param kafka.size Persistent Volume size for Kafka
## @param kafka.replicas Number of Kafka replicas
## @param kafka.storageClass StorageClass used to store the Kafka data
## @param zookeeper.size Persistent Volume size for ZooKeeper
## @param zookeeper.replicas Number of ZooKeeper replicas
## @param zookeeper.storageClass StorageClass used to store the ZooKeeper data
##
## @param topics Topics configuration (see example)
external: false
kafka:
size: 10Gi
replicas: 3
storageClass: ""
## @param kafka.resources Resources
resources: {}
# resources:
# limits:
# cpu: 4000m
# memory: 4Gi
# requests:
# cpu: 100m
# memory: 512Mi
## @param kafka.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
resourcesPreset: "nano"
zookeeper:
size: 5Gi
replicas: 3
storageClass: ""
## @param zookeeper.resources Resources
resources: {}
# resources:
# limits:
# cpu: 4000m
# memory: 4Gi
# requests:
# cpu: 100m
# memory: 512Mi
## @param zookeeper.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
resourcesPreset: "nano"
## @section Configuration parameters
## @param topics Topics configuration
## Example:
## topics:
## - name: Results
@@ -24,41 +64,3 @@ external: false
## replicas: 3
##
topics: []
## @section Kafka configuration
##
kafka:
## @param kafka.replicas Number of Kafka replicas
replicas: 3
## @param kafka.resources Explicit CPU and memory configuration for each Kafka replica. When left empty, the preset defined in `resourcesPreset` is applied.
resources: {}
# resources:
# cpu: 4000m
# memory: 4Gi
## @param kafka.resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: nano, micro, small, medium, large, xlarge, 2xlarge.
resourcesPreset: "small"
## @param kafka.size Persistent Volume size for Kafka
size: 10Gi
## @param kafka.storageClass StorageClass used to store the Kafka data
storageClass: ""
## @section Zookeeper configuration
##
zookeeper:
## @param zookeeper.replicas Number of ZooKeeper replicas
replicas: 3
## @param zookeeper.resources Explicit CPU and memory configuration for each Zookeeper replica. When left empty, the preset defined in `resourcesPreset` is applied.
resources: {}
# resources:
# cpu: 4000m
# memory: 4Gi
## @param zookeeper.resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: nano, micro, small, medium, large, xlarge, 2xlarge.
resourcesPreset: "small"
## @param zookeeper.size Persistent Volume size for ZooKeeper
size: 5Gi
## @param zookeeper.storageClass StorageClass used to store the ZooKeeper data
storageClass: ""

View File

@@ -16,10 +16,10 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.26.0
version: 0.21.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: 1.32.6
appVersion: 1.32.4

View File

@@ -1,18 +1,15 @@
KUBERNETES_VERSION = v1.32
KUBERNETES_PKG_TAG = $(shell awk '$$1 == "version:" {print $$2}' Chart.yaml)
PRESET_ENUM := ["nano","micro","small","medium","large","xlarge","2xlarge"]
include ../../../scripts/common-envs.mk
include ../../../scripts/package.mk
generate:
readme-generator-for-helm -v values.yaml -s values.schema.json -r README.md
yq -o=json -i '.properties.version.enum = (load("files/versions.yaml") | keys)' values.schema.json
yq -o json -i '.properties.addons.properties.ingressNginx.properties.exposeMethod.enum = ["Proxied","LoadBalancer"]' values.schema.json
yq -o json -i '.properties.controlPlane.properties.apiServer.properties.resourcesPreset.enum = $(PRESET_ENUM)' values.schema.json
yq -o json -i '.properties.controlPlane.properties.controllerManager.properties.resourcesPreset.enum = $(PRESET_ENUM)' values.schema.json
yq -o json -i '.properties.controlPlane.properties.scheduler.properties.resourcesPreset.enum = $(PRESET_ENUM)' values.schema.json
yq -o json -i '.properties.controlPlane.properties.konnectivity.properties.server.properties.resourcesPreset.enum = $(PRESET_ENUM)' values.schema.json
readme-generator -v values.yaml -s values.schema.json -r README.md
yq -o json -i '.properties.controlPlane.properties.apiServer.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
yq -o json -i '.properties.controlPlane.properties.controllerManager.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
yq -o json -i '.properties.controlPlane.properties.scheduler.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
yq -o json -i '.properties.controlPlane.properties.konnectivity.properties.server.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
image: image-ubuntu-container-disk image-kubevirt-cloud-provider image-kubevirt-csi-driver image-cluster-autoscaler
@@ -66,8 +63,6 @@ image-kubevirt-csi-driver:
--load=$(LOAD)
echo "$(REGISTRY)/kubevirt-csi-driver:$(call settag,$(KUBERNETES_PKG_TAG))@$$(yq e '."containerimage.digest"' images/kubevirt-csi-driver.json -o json -r)" \
> images/kubevirt-csi-driver.tag
IMAGE=$$(cat images/kubevirt-csi-driver.tag) \
yq -i '.csiDriver.image = strenv(IMAGE)' ../../system/kubevirt-csi-node/values.yaml
rm -f images/kubevirt-csi-driver.json

View File

@@ -11,9 +11,6 @@ Tenant clusters are fully separated from the management cluster and are intended
Within a tenant cluster, users can take advantage of LoadBalancer services and easily provision physical volumes as needed.
The control-plane operates within containers, while the worker nodes are deployed as virtual machines, all seamlessly managed by the application.
Kubernetes version in tenant clusters is independent of Kubernetes in the management cluster.
Users can select the latest patch versions from 1.28 to 1.33.
## Why Use a Managed Kubernetes Cluster?
Kubernetes has emerged as the industry standard, providing a unified and accessible API, primarily utilizing YAML for configuration.
@@ -84,79 +81,62 @@ See the reference for components utilized in this service:
### Common Parameters
| Name | Description | Value |
| -------------- | ------------------------------------- | ------------ |
| `storageClass` | StorageClass used to store user data. | `replicated` |
### Application-specific parameters
| Name | Description | Value |
| ------------ | ----------------------------------------------------------------------------------------------------------------- | ------- |
| `version` | Kubernetes version given as vMAJOR.MINOR. Available are versions from 1.28 to 1.33. | `v1.32` |
| `host` | Hostname used to access the Kubernetes cluster externally. Defaults to `<cluster-name>.<tenant-host>` when empty. | `""` |
| `nodeGroups` | Worker nodes configuration (see example) | `{}` |
| Name | Description | Value |
| ----------------------- | ----------------------------------------------------------------------------------------------------------------- | ------------ |
| `host` | Hostname used to access the Kubernetes cluster externally. Defaults to `<cluster-name>.<tenant-host>` when empty. | `""` |
| `controlPlane.replicas` | Number of replicas for Kubernetes control-plane components. | `2` |
| `storageClass` | StorageClass used to store user data. | `replicated` |
| `nodeGroups` | nodeGroups configuration | `{}` |
### Cluster Addons
| Name | Description | Value |
| --------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- |
| `addons.certManager.enabled` | Enable cert-manager, which automatically creates and manages SSL/TLS certificates. | `false` |
| `addons.certManager.valuesOverride` | Custom values to override | `{}` |
| `addons.cilium.valuesOverride` | Custom values to override | `{}` |
| `addons.gatewayAPI.enabled` | Enable the Gateway API | `false` |
| `addons.ingressNginx.enabled` | Enable the Ingress-NGINX controller (requires nodes labeled with the 'ingress-nginx' role). | `false` |
| `addons.ingressNginx.exposeMethod` | Method to expose the Ingress-NGINX controller. (allowed values: Proxied, LoadBalancer) | `Proxied` |
| `addons.ingressNginx.hosts` | List of domain names that the parent cluster should route to this tenant cluster. Taken into account only when `exposeMethod` is set to `Proxied`. | `[]` |
| `addons.ingressNginx.valuesOverride` | Custom values to override | `{}` |
| `addons.gpuOperator.enabled` | Enable the GPU-operator | `false` |
| `addons.gpuOperator.valuesOverride` | Custom values to override | `{}` |
| `addons.fluxcd.enabled` | Enable FluxCD | `false` |
| `addons.fluxcd.valuesOverride` | Custom values to override | `{}` |
| `addons.monitoringAgents.enabled` | Enable monitoring agents (Fluent Bit and VMAgents) to send logs and metrics. If tenant monitoring is enabled, data is sent to tenant storage; otherwise, it goes to root storage. | `false` |
| `addons.monitoringAgents.valuesOverride` | Custom values to override | `{}` |
| `addons.verticalPodAutoscaler.valuesOverride` | Custom values to override | `{}` |
| `addons.velero.enabled` | Enable velero for backup and restore k8s cluster. | `false` |
| `addons.velero.valuesOverride` | Custom values to override | `{}` |
| Name | Description | Value |
| --------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| `addons.certManager.enabled` | Enable cert-manager, which automatically creates and manages SSL/TLS certificates. | `false` |
| `addons.certManager.valuesOverride` | Custom values to override | `{}` |
| `addons.cilium.valuesOverride` | Custom values to override | `{}` |
| `addons.gatewayAPI.enabled` | Enable the Gateway API | `false` |
| `addons.ingressNginx.enabled` | Enable the Ingress-NGINX controller (requires nodes labeled with the 'ingress-nginx' role). | `false` |
| `addons.ingressNginx.valuesOverride` | Custom values to override | `{}` |
| `addons.ingressNginx.hosts` | List of domain names that the parent cluster should route to this tenant cluster. | `[]` |
| `addons.gpuOperator.enabled` | Enable the GPU-operator | `false` |
| `addons.gpuOperator.valuesOverride` | Custom values to override | `{}` |
| `addons.fluxcd.enabled` | Enable FluxCD | `false` |
| `addons.fluxcd.valuesOverride` | Custom values to override | `{}` |
| `addons.monitoringAgents.enabled` | Enable monitoring agents (Fluent Bit and VMAgents) to send logs and metrics. If tenant monitoring is enabled, data is sent to tenant storage; otherwise, it goes to root storage. | `false` |
| `addons.monitoringAgents.valuesOverride` | Custom values to override | `{}` |
| `addons.verticalPodAutoscaler.valuesOverride` | Custom values to override | `{}` |
### Kubernetes Control Plane Configuration
| Name | Description | Value |
| -------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | -------- |
| `controlPlane.replicas` | Number of replicas for Kubernetes control-plane components. | `2` |
| `controlPlane.apiServer.resources` | Explicit CPU and memory configuration for the API Server. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
| `controlPlane.apiServer.resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: nano, micro, small, medium, large, xlarge, 2xlarge. | `medium` |
| `controlPlane.controllerManager.resources` | Explicit CPU and memory configuration for the Controller Manager. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
| `controlPlane.controllerManager.resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: nano, micro, small, medium, large, xlarge, 2xlarge. | `micro` |
| `controlPlane.scheduler.resources` | Explicit CPU and memory configuration for the Scheduler. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
| `controlPlane.scheduler.resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: nano, micro, small, medium, large, xlarge, 2xlarge. | `micro` |
| `controlPlane.konnectivity.server.resources` | Explicit CPU and memory configuration for Konnectivity. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
| `controlPlane.konnectivity.server.resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: nano, micro, small, medium, large, xlarge, 2xlarge. | `micro` |
| Name | Description | Value |
| -------------------------------------------------- | ---------------------------------------------------------------------------- | ------- |
| `controlPlane.apiServer.resources` | Explicit CPU/memory resource requests and limits for the API server. | `{}` |
| `controlPlane.apiServer.resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `small` |
| `controlPlane.controllerManager.resources` | Explicit CPU/memory resource requests and limits for the controller manager. | `{}` |
| `controlPlane.controllerManager.resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `micro` |
| `controlPlane.scheduler.resources` | Explicit CPU/memory resource requests and limits for the scheduler. | `{}` |
| `controlPlane.scheduler.resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `micro` |
| `controlPlane.konnectivity.server.resources` | Explicit CPU/memory resource requests and limits for the Konnectivity. | `{}` |
| `controlPlane.konnectivity.server.resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `micro` |
## Parameter examples and reference
### resources and resourcesPreset
`resources` sets explicit CPU and memory configurations for each replica.
When left empty, the preset defined in `resourcesPreset` is applied.
In production environments, it's recommended to set `resources` explicitly.
Example of `controlPlane.*.resources`:
```yaml
resources:
cpu: 4000m
memory: 4Gi
limits:
cpu: 4000m
memory: 4Gi
requests:
cpu: 100m
memory: 512Mi
```
`resourcesPreset` sets named CPU and memory configurations for each replica.
This setting is ignored if the corresponding `resources` value is set.
Allowed values for `controlPlane.*.resourcesPreset` are `none`, `nano`, `micro`, `small`, `medium`, `large`, `xlarge`, `2xlarge`.
This value is ignored if the corresponding `resources` value is set.
| Preset name | CPU | memory |
|-------------|--------|---------|
| `nano` | `250m` | `128Mi` |
| `micro` | `500m` | `256Mi` |
| `small` | `1` | `512Mi` |
| `medium` | `1` | `1Gi` |
| `large` | `2` | `2Gi` |
| `xlarge` | `4` | `4Gi` |
| `2xlarge` | `8` | `8Gi` |
## Resources Reference
### instanceType Resources
@@ -320,3 +300,4 @@ Specific characteristics of this series are:
workload.
- *vCPU-To-Memory Ratio (1:4)* - A vCPU-to-Memory ratio of 1:4 starting from
the medium size.

View File

@@ -1 +0,0 @@
../../../library/cozy-lib

View File

@@ -1,6 +0,0 @@
"v1.28": "v1.28.15"
"v1.29": "v1.29.15"
"v1.30": "v1.30.14"
"v1.31": "v1.31.10"
"v1.32": "v1.32.6"
"v1.33": "v1.33.0"

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.26.0@sha256:3a8170433e1632e5cc2b6d9db34d0605e8e6c63c158282c38450415e700e932e
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.21.0@sha256:7315850634728a5864a3de3150c12f0e1454f3f1ce33cdf21a278f57611dd5e9

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.26.0@sha256:71f9afa218693a890f827cb5cda98ba327302bd9f58afde767740557538e07d9
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.21.0@sha256:6962bdf51ab2ff40b420b9cff7c850aeea02187da2a65a67f10e0471744649d7

View File

@@ -8,7 +8,7 @@ ENV GOARCH=$TARGETARCH
RUN git clone https://github.com/kubevirt/cloud-provider-kubevirt /go/src/kubevirt.io/cloud-provider-kubevirt \
&& cd /go/src/kubevirt.io/cloud-provider-kubevirt \
&& git checkout a0acf33
&& git checkout 443a1fe
WORKDIR /go/src/kubevirt.io/cloud-provider-kubevirt

View File

@@ -37,7 +37,7 @@ index 74166b5d9..4e744f8de 100644
klog.Infof("Initializing kubevirtEPSController")
diff --git a/pkg/controller/kubevirteps/kubevirteps_controller.go b/pkg/controller/kubevirteps/kubevirteps_controller.go
index 53388eb8e..b56882c12 100644
index 6f6e3d322..b56882c12 100644
--- a/pkg/controller/kubevirteps/kubevirteps_controller.go
+++ b/pkg/controller/kubevirteps/kubevirteps_controller.go
@@ -54,10 +54,10 @@ type Controller struct {
@@ -286,6 +286,22 @@ index 53388eb8e..b56882c12 100644
for _, eps := range slicesToDelete {
err := c.infraClient.DiscoveryV1().EndpointSlices(eps.Namespace).Delete(context.TODO(), eps.Name, metav1.DeleteOptions{})
if err != nil {
@@ -474,11 +538,11 @@ func (c *Controller) reconcileByAddressType(service *v1.Service, tenantSlices []
// Create the desired port configuration
var desiredPorts []discovery.EndpointPort
- for _, port := range service.Spec.Ports {
+ for i := range service.Spec.Ports {
desiredPorts = append(desiredPorts, discovery.EndpointPort{
- Port: &port.TargetPort.IntVal,
- Protocol: &port.Protocol,
- Name: &port.Name,
+ Port: &service.Spec.Ports[i].TargetPort.IntVal,
+ Protocol: &service.Spec.Ports[i].Protocol,
+ Name: &service.Spec.Ports[i].Name,
})
}
@@ -588,55 +652,114 @@ func ownedBy(endpointSlice *discovery.EndpointSlice, svc *v1.Service) bool {
return false
}
@@ -421,10 +437,18 @@ index 53388eb8e..b56882c12 100644
return nil
diff --git a/pkg/controller/kubevirteps/kubevirteps_controller_test.go b/pkg/controller/kubevirteps/kubevirteps_controller_test.go
index 1c97035b4..14d92d340 100644
index 1fb86e25f..14d92d340 100644
--- a/pkg/controller/kubevirteps/kubevirteps_controller_test.go
+++ b/pkg/controller/kubevirteps/kubevirteps_controller_test.go
@@ -190,7 +190,7 @@ func setupTestKubevirtEPSController() *testKubevirtEPSController {
@@ -13,6 +13,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/intstr"
+ "k8s.io/apimachinery/pkg/util/sets"
dfake "k8s.io/client-go/dynamic/fake"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/testing"
@@ -189,7 +190,7 @@ func setupTestKubevirtEPSController() *testKubevirtEPSController {
}: "VirtualMachineInstanceList",
})
@@ -433,87 +457,83 @@ index 1c97035b4..14d92d340 100644
err := controller.Init()
if err != nil {
@@ -697,51 +697,43 @@ var _ = g.Describe("KubevirtEPSController", g.Ordered, func() {
*createPort("http", 80, v1.ProtocolTCP),
[]discoveryv1.Endpoint{*createEndpoint("123.45.67.89", "worker-0-test", true, true, false)})
- // Define several unique ports for the Service
@@ -686,5 +687,229 @@ var _ = g.Describe("KubevirtEPSController", g.Ordered, func() {
return false, err
}).Should(BeTrue(), "EndpointSlice in infra cluster should be recreated by the controller after deletion")
})
+
+ g.It("Should correctly handle multiple unique ports in EndpointSlice", func() {
+ // Create a VMI in the infra cluster
+ createAndAssertVMI("worker-0-test", "ip-10-32-5-13", "123.45.67.89")
+
+ // Create an EndpointSlice in the tenant cluster
+ createAndAssertTenantSlice("test-epslice", "tenant-service-name", discoveryv1.AddressTypeIPv4,
+ *createPort("http", 80, v1.ProtocolTCP),
+ []discoveryv1.Endpoint{*createEndpoint("123.45.67.89", "worker-0-test", true, true, false)})
+
+ // Define multiple ports for the Service
servicePorts := []v1.ServicePort{
{
- Name: "client",
- Protocol: v1.ProtocolTCP,
- Port: 10001,
- TargetPort: intstr.FromInt(30396),
- NodePort: 30396,
- AppProtocol: nil,
+ servicePorts := []v1.ServicePort{
+ {
+ Name: "client",
+ Protocol: v1.ProtocolTCP,
+ Port: 10001,
+ TargetPort: intstr.FromInt(30396),
+ NodePort: 30396,
},
{
- Name: "dashboard",
- Protocol: v1.ProtocolTCP,
- Port: 8265,
- TargetPort: intstr.FromInt(31003),
- NodePort: 31003,
- AppProtocol: nil,
+ },
+ {
+ Name: "dashboard",
+ Protocol: v1.ProtocolTCP,
+ Port: 8265,
+ TargetPort: intstr.FromInt(31003),
+ NodePort: 31003,
},
{
- Name: "metrics",
- Protocol: v1.ProtocolTCP,
- Port: 8080,
- TargetPort: intstr.FromInt(30452),
- NodePort: 30452,
- AppProtocol: nil,
+ },
+ {
+ Name: "metrics",
+ Protocol: v1.ProtocolTCP,
+ Port: 8080,
+ TargetPort: intstr.FromInt(30452),
+ NodePort: 30452,
},
}
- // Create a Service with the first port
createAndAssertInfraServiceLB("infra-multiport-service", "tenant-service-name", "test-cluster",
- servicePorts[0],
- v1.ServiceExternalTrafficPolicyLocal)
+ },
+ }
+
+ createAndAssertInfraServiceLB("infra-multiport-service", "tenant-service-name", "test-cluster",
+ servicePorts[0], v1.ServiceExternalTrafficPolicyLocal)
- // Update the Service by adding the remaining ports
svc, err := testVals.infraClient.CoreV1().Services(infraNamespace).Get(context.TODO(), "infra-multiport-service", metav1.GetOptions{})
Expect(err).To(BeNil())
svc.Spec.Ports = servicePorts
-
_, err = testVals.infraClient.CoreV1().Services(infraNamespace).Update(context.TODO(), svc, metav1.UpdateOptions{})
Expect(err).To(BeNil())
var epsListMultiPort *discoveryv1.EndpointSliceList
- // Verify that the EndpointSlice is created with correct unique ports
Eventually(func() (bool, error) {
epsListMultiPort, err = testVals.infraClient.DiscoveryV1().EndpointSlices(infraNamespace).List(context.TODO(), metav1.ListOptions{})
if len(epsListMultiPort.Items) != 1 {
@@ -758,7 +750,6 @@ var _ = g.Describe("KubevirtEPSController", g.Ordered, func() {
}
}
- // Verify that all expected ports are present and without duplicates
if len(foundPortNames) != len(expectedPortNames) {
return false, err
}
@@ -769,5 +760,156 @@ var _ = g.Describe("KubevirtEPSController", g.Ordered, func() {
}).Should(BeTrue(), "EndpointSlice should contain all unique ports from the Service without duplicates")
})
+
+ svc, err := testVals.infraClient.CoreV1().Services(infraNamespace).Get(context.TODO(), "infra-multiport-service", metav1.GetOptions{})
+ Expect(err).To(BeNil())
+
+ svc.Spec.Ports = servicePorts
+ _, err = testVals.infraClient.CoreV1().Services(infraNamespace).Update(context.TODO(), svc, metav1.UpdateOptions{})
+ Expect(err).To(BeNil())
+
+ var epsListMultiPort *discoveryv1.EndpointSliceList
+
+ Eventually(func() (bool, error) {
+ epsListMultiPort, err = testVals.infraClient.DiscoveryV1().EndpointSlices(infraNamespace).List(context.TODO(), metav1.ListOptions{})
+ if len(epsListMultiPort.Items) != 1 {
+ return false, err
+ }
+
+ createdSlice := epsListMultiPort.Items[0]
+ expectedPortNames := []string{"client", "dashboard", "metrics"}
+ foundPortNames := []string{}
+
+ for _, port := range createdSlice.Ports {
+ if port.Name != nil {
+ foundPortNames = append(foundPortNames, *port.Name)
+ }
+ }
+
+ if len(foundPortNames) != len(expectedPortNames) {
+ return false, err
+ }
+
+ portSet := sets.NewString(foundPortNames...)
+ expectedPortSet := sets.NewString(expectedPortNames...)
+ return portSet.Equal(expectedPortSet), err
+ }).Should(BeTrue(), "EndpointSlice should contain all unique ports from the Service without duplicates")
+ })
+
+ g.It("Should not panic when Service changes to have a non-nil selector, causing EndpointSlice deletion with no new slices to create", func() {
+ createAndAssertVMI("worker-0-test", "ip-10-32-5-13", "123.45.67.89")
+ createAndAssertTenantSlice("test-epslice", "tenant-service-name", discoveryv1.AddressTypeIPv4,

View File

@@ -1,142 +0,0 @@
diff --git a/pkg/controller/kubevirteps/kubevirteps_controller.go b/pkg/controller/kubevirteps/kubevirteps_controller.go
index 53388eb8e..28644236f 100644
--- a/pkg/controller/kubevirteps/kubevirteps_controller.go
+++ b/pkg/controller/kubevirteps/kubevirteps_controller.go
@@ -12,7 +12,6 @@ import (
apiequality "k8s.io/apimachinery/pkg/api/equality"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
@@ -669,35 +668,50 @@ func (c *Controller) getDesiredEndpoints(service *v1.Service, tenantSlices []*di
for _, slice := range tenantSlices {
for _, endpoint := range slice.Endpoints {
// find all unique nodes that correspond to an endpoint in a tenant slice
+ if endpoint.NodeName == nil {
+ klog.Warningf("Skipping endpoint without NodeName in slice %s/%s", slice.Namespace, slice.Name)
+ continue
+ }
nodeSet.Insert(*endpoint.NodeName)
}
}
- klog.Infof("Desired nodes for service %s in namespace %s: %v", service.Name, service.Namespace, sets.List(nodeSet))
+ klog.Infof("Desired nodes for service %s/%s: %v", service.Namespace, service.Name, sets.List(nodeSet))
for _, node := range sets.List(nodeSet) {
// find vmi for node name
- obj := &unstructured.Unstructured{}
- vmi := &kubevirtv1.VirtualMachineInstance{}
-
- obj, err := c.infraDynamic.Resource(kubevirtv1.VirtualMachineInstanceGroupVersionKind.GroupVersion().WithResource("virtualmachineinstances")).Namespace(c.infraNamespace).Get(context.TODO(), node, metav1.GetOptions{})
+ obj, err := c.infraDynamic.
+ Resource(kubevirtv1.VirtualMachineInstanceGroupVersionKind.GroupVersion().WithResource("virtualmachineinstances")).
+ Namespace(c.infraNamespace).
+ Get(context.TODO(), node, metav1.GetOptions{})
if err != nil {
- klog.Errorf("Failed to get VirtualMachineInstance %s in namespace %s:%v", node, c.infraNamespace, err)
+ klog.Errorf("Failed to get VMI %q in namespace %q: %v", node, c.infraNamespace, err)
continue
}
+ vmi := &kubevirtv1.VirtualMachineInstance{}
err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, vmi)
if err != nil {
klog.Errorf("Failed to convert Unstructured to VirtualMachineInstance: %v", err)
- klog.Fatal(err)
+ continue
}
+ if vmi.Status.NodeName == "" {
+ klog.Warningf("Skipping VMI %s/%s: NodeName is empty", vmi.Namespace, vmi.Name)
+ continue
+ }
+ nodeNamePtr := &vmi.Status.NodeName
+
ready := vmi.Status.Phase == kubevirtv1.Running
serving := vmi.Status.Phase == kubevirtv1.Running
terminating := vmi.Status.Phase == kubevirtv1.Failed || vmi.Status.Phase == kubevirtv1.Succeeded
for _, i := range vmi.Status.Interfaces {
if i.Name == "default" {
+ if i.IP == "" {
+ klog.Warningf("VMI %s/%s interface %q has no IP, skipping", vmi.Namespace, vmi.Name, i.Name)
+ continue
+ }
desiredEndpoints = append(desiredEndpoints, &discovery.Endpoint{
Addresses: []string{i.IP},
Conditions: discovery.EndpointConditions{
@@ -705,9 +719,9 @@ func (c *Controller) getDesiredEndpoints(service *v1.Service, tenantSlices []*di
Serving: &serving,
Terminating: &terminating,
},
- NodeName: &vmi.Status.NodeName,
+ NodeName: nodeNamePtr,
})
- continue
+ break
}
}
}
diff --git a/pkg/controller/kubevirteps/kubevirteps_controller_test.go b/pkg/controller/kubevirteps/kubevirteps_controller_test.go
index 1c97035b4..d205d0bed 100644
--- a/pkg/controller/kubevirteps/kubevirteps_controller_test.go
+++ b/pkg/controller/kubevirteps/kubevirteps_controller_test.go
@@ -771,3 +771,55 @@ var _ = g.Describe("KubevirtEPSController", g.Ordered, func() {
})
})
+
+var _ = g.Describe("getDesiredEndpoints", func() {
+ g.It("should skip endpoints without NodeName and VMIs without NodeName or IP", func() {
+ // Setup controller
+ ctrl := setupTestKubevirtEPSController().controller
+
+ // Manually inject dynamic client content (1 VMI with missing NodeName)
+ vmi := createUnstructuredVMINode("vmi-without-node", "", "10.0.0.1") // empty NodeName
+ _, err := ctrl.infraDynamic.
+ Resource(kubevirtv1.VirtualMachineInstanceGroupVersionKind.GroupVersion().WithResource("virtualmachineinstances")).
+ Namespace(infraNamespace).
+ Create(context.TODO(), vmi, metav1.CreateOptions{})
+ Expect(err).To(BeNil())
+
+ // Create service
+ svc := createInfraServiceLB("test-svc", "test-svc", "test-cluster",
+ v1.ServicePort{
+ Name: "http",
+ Port: 80,
+ TargetPort: intstr.FromInt(8080),
+ Protocol: v1.ProtocolTCP,
+ },
+ v1.ServiceExternalTrafficPolicyLocal,
+ )
+
+ // One endpoint has nil NodeName, another is valid
+ nodeName := "vmi-without-node"
+ tenantSlice := &discoveryv1.EndpointSlice{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "slice",
+ Namespace: tenantNamespace,
+ Labels: map[string]string{
+ discoveryv1.LabelServiceName: "test-svc",
+ },
+ },
+ AddressType: discoveryv1.AddressTypeIPv4,
+ Endpoints: []discoveryv1.Endpoint{
+ { // should be skipped due to nil NodeName
+ Addresses: []string{"10.1.1.1"},
+ NodeName: nil,
+ },
+ { // will hit VMI without NodeName and also be skipped
+ Addresses: []string{"10.1.1.2"},
+ NodeName: &nodeName,
+ },
+ },
+ }
+
+ endpoints := ctrl.getDesiredEndpoints(svc, []*discoveryv1.EndpointSlice{tenantSlice})
+ Expect(endpoints).To(HaveLen(0), "Expected no endpoints due to missing NodeName or IP")
+ })
+})

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.26.0@sha256:445c2727b04ac68595b43c988ff17b3d69a7b22b0644fde3b10c65b47a7bc036
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.21.0@sha256:b1525163cd21938ac934bb1b860f2f3151464fa463b82880ab058167aeaf3e29

View File

@@ -3,7 +3,7 @@ ARG builder_image=docker.io/library/golang:1.22.5
FROM ${builder_image} AS builder
RUN git clone https://github.com/kubevirt/csi-driver /src/kubevirt-csi-driver \
&& cd /src/kubevirt-csi-driver \
&& git checkout a8d6605bc9997bcfda3fb9f1f82ba6445b4984cc
&& git checkout 35836e0c8b68d9916d29a838ea60cdd3fc6199cf
ARG TARGETOS
ARG TARGETARCH
@@ -11,7 +11,6 @@ ENV GOOS=$TARGETOS
ENV GOARCH=$TARGETARCH
WORKDIR /src/kubevirt-csi-driver
RUN make build
FROM quay.io/centos/centos:stream9

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/ubuntu-container-disk:v1.32@sha256:e53f2394c7aa76ad10818ffb945e40006cd77406999e47e036d41b8b0bf094cc
ghcr.io/cozystack/cozystack/ubuntu-container-disk:v1.32@sha256:bfe568db4b768a4b6c67a8d562892bbba766d0245e140d431754589b347f0b41

View File

@@ -1,7 +0,0 @@
{{- define "kubernetes.versionMap" }}
{{- $versionMap := .Files.Get "files/versions.yaml" | fromYaml }}
{{- if not (hasKey $versionMap .Values.version) }}
{{- printf `Kubernetes version %s is not supported, allowed versions are %s` $.Values.version (keys $versionMap) | fail }}
{{- end }}
{{- index $versionMap .Values.version }}
{{- end }}

Some files were not shown because too many files have changed in this diff Show More