mirror of
https://github.com/outbackdingo/cozystack.git
synced 2026-02-05 08:17:59 +00:00
Compare commits
1 Commits
hcloud
...
bugfix-fix
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b1ed061de9 |
24
.github/PULL_REQUEST_TEMPLATE.md
vendored
24
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,24 +0,0 @@
|
|||||||
<!-- Thank you for making a contribution! Here are some tips for you:
|
|
||||||
- Start the PR title with the [label] of Cozystack component:
|
|
||||||
- For system components: [platform], [system], [linstor], [cilium], [kube-ovn], [dashboard], [cluster-api], etc.
|
|
||||||
- For managed apps: [apps], [tenant], [kubernetes], [postgres], [virtual-machine] etc.
|
|
||||||
- For development and maintenance: [tests], [ci], [docs], [maintenance].
|
|
||||||
- If it's a work in progress, consider creating this PR as a draft.
|
|
||||||
- Don't hesistate to ask for opinion and review in the community chats, even if it's still a draft.
|
|
||||||
- Add the label `backport` if it's a bugfix that needs to be backported to a previous version.
|
|
||||||
-->
|
|
||||||
|
|
||||||
## What this PR does
|
|
||||||
|
|
||||||
|
|
||||||
### Release note
|
|
||||||
|
|
||||||
<!-- Write a release note:
|
|
||||||
- Explain what has changed internally and for users.
|
|
||||||
- Start with the same [label] as in the PR title
|
|
||||||
- Follow the guidelines at https://github.com/kubernetes/community/blob/master/contributors/guide/release-notes.md.
|
|
||||||
-->
|
|
||||||
|
|
||||||
```release-note
|
|
||||||
[]
|
|
||||||
```
|
|
||||||
2
.github/workflows/pre-commit.yml
vendored
2
.github/workflows/pre-commit.yml
vendored
@@ -2,7 +2,7 @@ name: Pre-Commit Checks
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
types: [opened, synchronize, reopened]
|
types: [labeled, opened, synchronize, reopened]
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: pre-commit-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
group: pre-commit-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||||
|
|||||||
95
.github/workflows/pull-requests-release.yaml
vendored
95
.github/workflows/pull-requests-release.yaml
vendored
@@ -1,17 +1,100 @@
|
|||||||
name: "Releasing PR"
|
name: Releasing PR
|
||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
types: [closed]
|
types: [labeled, opened, synchronize, reopened, closed]
|
||||||
paths-ignore:
|
|
||||||
- 'docs/**/*'
|
|
||||||
|
|
||||||
# Cancel in‑flight runs for the same PR when a new push arrives.
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: pr-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
group: pull-requests-release-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
verify:
|
||||||
|
name: Test Release
|
||||||
|
runs-on: [self-hosted]
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
|
|
||||||
|
if: |
|
||||||
|
contains(github.event.pull_request.labels.*.name, 'release') &&
|
||||||
|
github.event.action != 'closed'
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
fetch-tags: true
|
||||||
|
|
||||||
|
- name: Login to GitHub Container Registry
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ github.repository_owner }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
registry: ghcr.io
|
||||||
|
|
||||||
|
- name: Extract tag from PR branch
|
||||||
|
id: get_tag
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
const branch = context.payload.pull_request.head.ref;
|
||||||
|
const m = branch.match(/^release-(\d+\.\d+\.\d+(?:[-\w\.]+)?)$/);
|
||||||
|
if (!m) {
|
||||||
|
core.setFailed(`❌ Branch '${branch}' does not match 'release-X.Y.Z[-suffix]'`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const tag = `v${m[1]}`;
|
||||||
|
core.setOutput('tag', tag);
|
||||||
|
|
||||||
|
- name: Find draft release and get asset IDs
|
||||||
|
id: fetch_assets
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
github-token: ${{ secrets.GH_PAT }}
|
||||||
|
script: |
|
||||||
|
const tag = '${{ steps.get_tag.outputs.tag }}';
|
||||||
|
const releases = await github.rest.repos.listReleases({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
per_page: 100
|
||||||
|
});
|
||||||
|
const draft = releases.data.find(r => r.tag_name === tag && r.draft);
|
||||||
|
if (!draft) {
|
||||||
|
core.setFailed(`Draft release '${tag}' not found`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const findAssetId = (name) =>
|
||||||
|
draft.assets.find(a => a.name === name)?.id;
|
||||||
|
const installerId = findAssetId("cozystack-installer.yaml");
|
||||||
|
const diskId = findAssetId("nocloud-amd64.raw.xz");
|
||||||
|
if (!installerId || !diskId) {
|
||||||
|
core.setFailed("Missing required assets");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
core.setOutput("installer_id", installerId);
|
||||||
|
core.setOutput("disk_id", diskId);
|
||||||
|
|
||||||
|
- name: Download assets from GitHub API
|
||||||
|
run: |
|
||||||
|
mkdir -p _out/assets
|
||||||
|
curl -sSL \
|
||||||
|
-H "Authorization: token ${GH_PAT}" \
|
||||||
|
-H "Accept: application/octet-stream" \
|
||||||
|
-o _out/assets/cozystack-installer.yaml \
|
||||||
|
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/assets/${{ steps.fetch_assets.outputs.installer_id }}"
|
||||||
|
curl -sSL \
|
||||||
|
-H "Authorization: token ${GH_PAT}" \
|
||||||
|
-H "Accept: application/octet-stream" \
|
||||||
|
-o _out/assets/nocloud-amd64.raw.xz \
|
||||||
|
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/assets/${{ steps.fetch_assets.outputs.disk_id }}"
|
||||||
|
env:
|
||||||
|
GH_PAT: ${{ secrets.GH_PAT }}
|
||||||
|
|
||||||
|
- name: Run tests
|
||||||
|
run: make test
|
||||||
|
|
||||||
finalize:
|
finalize:
|
||||||
name: Finalize Release
|
name: Finalize Release
|
||||||
runs-on: [self-hosted]
|
runs-on: [self-hosted]
|
||||||
|
|||||||
306
.github/workflows/pull-requests.yaml
vendored
306
.github/workflows/pull-requests.yaml
vendored
@@ -2,13 +2,10 @@ name: Pull Request
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
types: [opened, synchronize, reopened]
|
types: [labeled, opened, synchronize, reopened]
|
||||||
paths-ignore:
|
|
||||||
- 'docs/**/*'
|
|
||||||
|
|
||||||
# Cancel in‑flight runs for the same PR when a new push arrives.
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: pr-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
group: pull-requests-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
@@ -47,17 +44,6 @@ jobs:
|
|||||||
- name: Build Talos image
|
- name: Build Talos image
|
||||||
run: make -C packages/core/installer talos-nocloud
|
run: make -C packages/core/installer talos-nocloud
|
||||||
|
|
||||||
- name: Save git diff as patch
|
|
||||||
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
|
|
||||||
run: git diff HEAD > _out/assets/pr.patch
|
|
||||||
|
|
||||||
- name: Upload git diff patch
|
|
||||||
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: pr-patch
|
|
||||||
path: _out/assets/pr.patch
|
|
||||||
|
|
||||||
- name: Upload installer
|
- name: Upload installer
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
@@ -70,267 +56,92 @@ jobs:
|
|||||||
name: talos-image
|
name: talos-image
|
||||||
path: _out/assets/nocloud-amd64.raw.xz
|
path: _out/assets/nocloud-amd64.raw.xz
|
||||||
|
|
||||||
resolve_assets:
|
prepare_env:
|
||||||
name: "Resolve assets"
|
name: Prepare environment
|
||||||
runs-on: ubuntu-latest
|
runs-on: [self-hosted]
|
||||||
if: contains(github.event.pull_request.labels.*.name, 'release')
|
needs: build
|
||||||
outputs:
|
|
||||||
installer_id: ${{ steps.fetch_assets.outputs.installer_id }}
|
# Never run when the PR carries the "release" label.
|
||||||
disk_id: ${{ steps.fetch_assets.outputs.disk_id }}
|
if: |
|
||||||
|
!contains(github.event.pull_request.labels.*.name, 'release')
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
if: contains(github.event.pull_request.labels.*.name, 'release')
|
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
fetch-tags: true
|
fetch-tags: true
|
||||||
|
|
||||||
- name: Extract tag from PR branch (release PR)
|
- name: Download installer
|
||||||
if: contains(github.event.pull_request.labels.*.name, 'release')
|
|
||||||
id: get_tag
|
|
||||||
uses: actions/github-script@v7
|
|
||||||
with:
|
|
||||||
script: |
|
|
||||||
const branch = context.payload.pull_request.head.ref;
|
|
||||||
const m = branch.match(/^release-(\d+\.\d+\.\d+(?:[-\w\.]+)?)$/);
|
|
||||||
if (!m) {
|
|
||||||
core.setFailed(`❌ Branch '${branch}' does not match 'release-X.Y.Z[-suffix]'`);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
core.setOutput('tag', `v${m[1]}`);
|
|
||||||
|
|
||||||
- name: Find draft release & asset IDs (release PR)
|
|
||||||
if: contains(github.event.pull_request.labels.*.name, 'release')
|
|
||||||
id: fetch_assets
|
|
||||||
uses: actions/github-script@v7
|
|
||||||
with:
|
|
||||||
github-token: ${{ secrets.GH_PAT }}
|
|
||||||
script: |
|
|
||||||
const tag = '${{ steps.get_tag.outputs.tag }}';
|
|
||||||
const releases = await github.rest.repos.listReleases({
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: context.repo.repo,
|
|
||||||
per_page: 100
|
|
||||||
});
|
|
||||||
const draft = releases.data.find(r => r.tag_name === tag && r.draft);
|
|
||||||
if (!draft) {
|
|
||||||
core.setFailed(`Draft release '${tag}' not found`);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
const find = (n) => draft.assets.find(a => a.name === n)?.id;
|
|
||||||
const installerId = find('cozystack-installer.yaml');
|
|
||||||
const diskId = find('nocloud-amd64.raw.xz');
|
|
||||||
if (!installerId || !diskId) {
|
|
||||||
core.setFailed('Required assets missing in draft release');
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
core.setOutput('installer_id', installerId);
|
|
||||||
core.setOutput('disk_id', diskId);
|
|
||||||
|
|
||||||
|
|
||||||
prepare_env:
|
|
||||||
name: "Prepare environment"
|
|
||||||
runs-on: [self-hosted]
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
packages: read
|
|
||||||
needs: ["build", "resolve_assets"]
|
|
||||||
if: ${{ always() && (needs.build.result == 'success' || needs.resolve_assets.result == 'success') }}
|
|
||||||
|
|
||||||
steps:
|
|
||||||
# ▸ Checkout and prepare the codebase
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
# ▸ Regular PR path – download artefacts produced by the *build* job
|
|
||||||
- name: "Download Talos image (regular PR)"
|
|
||||||
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: talos-image
|
|
||||||
path: _out/assets
|
|
||||||
|
|
||||||
- name: Download PR patch
|
|
||||||
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: pr-patch
|
|
||||||
path: _out/assets
|
|
||||||
|
|
||||||
- name: Apply patch
|
|
||||||
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
|
|
||||||
run: |
|
|
||||||
git apply _out/assets/pr.patch
|
|
||||||
|
|
||||||
# ▸ Release PR path – fetch artefacts from the corresponding draft release
|
|
||||||
- name: Download assets from draft release (release PR)
|
|
||||||
if: contains(github.event.pull_request.labels.*.name, 'release')
|
|
||||||
run: |
|
|
||||||
mkdir -p _out/assets
|
|
||||||
curl -sSL -H "Authorization: token ${GH_PAT}" -H "Accept: application/octet-stream" \
|
|
||||||
-o _out/assets/nocloud-amd64.raw.xz \
|
|
||||||
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/assets/${{ needs.resolve_assets.outputs.disk_id }}"
|
|
||||||
env:
|
|
||||||
GH_PAT: ${{ secrets.GH_PAT }}
|
|
||||||
|
|
||||||
- name: Set sandbox ID
|
|
||||||
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
# ▸ Start actual job steps
|
|
||||||
- name: Prepare workspace
|
|
||||||
run: |
|
|
||||||
rm -rf /tmp/$SANDBOX_NAME
|
|
||||||
cp -r ${{ github.workspace }} /tmp/$SANDBOX_NAME
|
|
||||||
|
|
||||||
- name: Prepare environment
|
|
||||||
run: |
|
|
||||||
cd /tmp/$SANDBOX_NAME
|
|
||||||
attempt=0
|
|
||||||
until make SANDBOX_NAME=$SANDBOX_NAME prepare-env; do
|
|
||||||
attempt=$((attempt + 1))
|
|
||||||
if [ $attempt -ge 3 ]; then
|
|
||||||
echo "❌ Attempt $attempt failed, exiting..."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo "❌ Attempt $attempt failed, retrying..."
|
|
||||||
done
|
|
||||||
echo "✅ The task completed successfully after $attempt attempts"
|
|
||||||
|
|
||||||
install_cozystack:
|
|
||||||
name: "Install Cozystack"
|
|
||||||
runs-on: [self-hosted]
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
packages: read
|
|
||||||
needs: ["prepare_env", "resolve_assets"]
|
|
||||||
if: ${{ always() && needs.prepare_env.result == 'success' }}
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Prepare _out/assets directory
|
|
||||||
run: mkdir -p _out/assets
|
|
||||||
|
|
||||||
# ▸ Regular PR path – download artefacts produced by the *build* job
|
|
||||||
- name: "Download installer (regular PR)"
|
|
||||||
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
|
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: cozystack-installer
|
name: cozystack-installer
|
||||||
path: _out/assets
|
path: _out/assets/
|
||||||
|
|
||||||
# ▸ Release PR path – fetch artefacts from the corresponding draft release
|
- name: Download Talos image
|
||||||
- name: Download assets from draft release (release PR)
|
uses: actions/download-artifact@v4
|
||||||
if: contains(github.event.pull_request.labels.*.name, 'release')
|
with:
|
||||||
run: |
|
name: talos-image
|
||||||
mkdir -p _out/assets
|
path: _out/assets/
|
||||||
curl -sSL -H "Authorization: token ${GH_PAT}" -H "Accept: application/octet-stream" \
|
|
||||||
-o _out/assets/cozystack-installer.yaml \
|
|
||||||
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/assets/${{ needs.resolve_assets.outputs.installer_id }}"
|
|
||||||
env:
|
|
||||||
GH_PAT: ${{ secrets.GH_PAT }}
|
|
||||||
|
|
||||||
# ▸ Start actual job steps
|
|
||||||
- name: Set sandbox ID
|
- name: Set sandbox ID
|
||||||
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: Sync _out/assets directory
|
- name: Prepare environment
|
||||||
run: |
|
run: make SANDBOX_NAME=$SANDBOX_NAME prepare-env
|
||||||
mkdir -p /tmp/$SANDBOX_NAME/_out/assets
|
|
||||||
mv _out/assets/* /tmp/$SANDBOX_NAME/_out/assets/
|
|
||||||
|
|
||||||
- name: Install Cozystack into sandbox
|
install_cozystack:
|
||||||
run: |
|
name: Install Cozystack
|
||||||
cd /tmp/$SANDBOX_NAME
|
runs-on: [self-hosted]
|
||||||
attempt=0
|
needs: prepare_env
|
||||||
until make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME install-cozystack; do
|
|
||||||
attempt=$((attempt + 1))
|
|
||||||
if [ $attempt -ge 3 ]; then
|
|
||||||
echo "❌ Attempt $attempt failed, exiting..."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo "❌ Attempt $attempt failed, retrying..."
|
|
||||||
done
|
|
||||||
echo "✅ The task completed successfully after $attempt attempts."
|
|
||||||
|
|
||||||
detect_test_matrix:
|
# Never run when the PR carries the "release" label.
|
||||||
name: "Detect e2e test matrix"
|
if: |
|
||||||
runs-on: ubuntu-latest
|
!contains(github.event.pull_request.labels.*.name, 'release')
|
||||||
outputs:
|
|
||||||
matrix: ${{ steps.set.outputs.matrix }}
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- name: Checkout code
|
||||||
- id: set
|
uses: actions/checkout@v4
|
||||||
run: |
|
with:
|
||||||
apps=$(find hack/e2e-apps -maxdepth 1 -mindepth 1 -name '*.bats' | \
|
fetch-depth: 0
|
||||||
awk -F/ '{sub(/\..+/, "", $NF); print $NF}' | jq -R . | jq -cs .)
|
fetch-tags: true
|
||||||
echo "matrix={\"app\":$apps}" >> "$GITHUB_OUTPUT"
|
|
||||||
|
- name: Set sandbox ID
|
||||||
|
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Install Cozystack
|
||||||
|
run: make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME install-cozystack
|
||||||
|
|
||||||
test_apps:
|
test_apps:
|
||||||
strategy:
|
name: Test applications
|
||||||
matrix: ${{ fromJson(needs.detect_test_matrix.outputs.matrix) }}
|
|
||||||
name: Test ${{ matrix.app }}
|
|
||||||
runs-on: [self-hosted]
|
runs-on: [self-hosted]
|
||||||
needs: [install_cozystack,detect_test_matrix]
|
needs: install_cozystack
|
||||||
if: ${{ always() && (needs.install_cozystack.result == 'success' && needs.detect_test_matrix.result == 'success') }}
|
|
||||||
|
# Never run when the PR carries the "release" label.
|
||||||
|
if: |
|
||||||
|
!contains(github.event.pull_request.labels.*.name, 'release')
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
fetch-tags: true
|
||||||
|
|
||||||
- name: Set sandbox ID
|
- name: Set sandbox ID
|
||||||
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: E2E Apps
|
- name: E2E Apps
|
||||||
run: |
|
run: make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME test-apps
|
||||||
cd /tmp/$SANDBOX_NAME
|
|
||||||
attempt=0
|
|
||||||
until make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME test-apps-${{ matrix.app }}; do
|
|
||||||
attempt=$((attempt + 1))
|
|
||||||
if [ $attempt -ge 3 ]; then
|
|
||||||
echo "❌ Attempt $attempt failed, exiting..."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo "❌ Attempt $attempt failed, retrying..."
|
|
||||||
done
|
|
||||||
echo "✅ The task completed successfully after $attempt attempts"
|
|
||||||
|
|
||||||
collect_debug_information:
|
|
||||||
name: Collect debug information
|
|
||||||
runs-on: [self-hosted]
|
|
||||||
needs: [test_apps]
|
|
||||||
if: ${{ always() }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Set sandbox ID
|
|
||||||
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Collect report
|
|
||||||
run: |
|
|
||||||
cd /tmp/$SANDBOX_NAME
|
|
||||||
make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME collect-report
|
|
||||||
|
|
||||||
- name: Upload cozyreport.tgz
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: cozyreport
|
|
||||||
path: /tmp/${{ env.SANDBOX_NAME }}/_out/cozyreport.tgz
|
|
||||||
|
|
||||||
- name: Collect images list
|
|
||||||
run: |
|
|
||||||
cd /tmp/$SANDBOX_NAME
|
|
||||||
make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME collect-images
|
|
||||||
|
|
||||||
- name: Upload image list
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: image-list
|
|
||||||
path: /tmp/${{ env.SANDBOX_NAME }}/_out/images.txt
|
|
||||||
|
|
||||||
cleanup:
|
cleanup:
|
||||||
name: Tear down environment
|
name: Tear down environment
|
||||||
runs-on: [self-hosted]
|
runs-on: [self-hosted]
|
||||||
needs: [collect_debug_information]
|
needs: test_apps
|
||||||
if: ${{ always() && needs.test_apps.result == 'success' }}
|
|
||||||
|
# Never run when the PR carries the "release" label.
|
||||||
|
if: |
|
||||||
|
!contains(github.event.pull_request.labels.*.name, 'release')
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
@@ -342,10 +153,5 @@ jobs:
|
|||||||
- name: Set sandbox ID
|
- name: Set sandbox ID
|
||||||
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: Tear down sandbox
|
- name: E2E Apps
|
||||||
run: make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME delete
|
run: make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME delete
|
||||||
|
|
||||||
- name: Remove workspace
|
|
||||||
run: rm -rf /tmp/$SANDBOX_NAME
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
13
.github/workflows/tags.yaml
vendored
13
.github/workflows/tags.yaml
vendored
@@ -112,12 +112,9 @@ jobs:
|
|||||||
# Commit built artifacts
|
# Commit built artifacts
|
||||||
- name: Commit release artifacts
|
- name: Commit release artifacts
|
||||||
if: steps.check_release.outputs.skip == 'false'
|
if: steps.check_release.outputs.skip == 'false'
|
||||||
env:
|
|
||||||
GH_PAT: ${{ secrets.GH_PAT }}
|
|
||||||
run: |
|
run: |
|
||||||
git config user.name "cozystack-bot"
|
git config user.name "github-actions"
|
||||||
git config user.email "217169706+cozystack-bot@users.noreply.github.com"
|
git config user.email "github-actions@github.com"
|
||||||
git remote set-url origin https://cozystack-bot:${GH_PAT}@github.com/${GITHUB_REPOSITORY}
|
|
||||||
git add .
|
git add .
|
||||||
git commit -m "Prepare release ${GITHUB_REF#refs/tags/}" -s || echo "No changes to commit"
|
git commit -m "Prepare release ${GITHUB_REF#refs/tags/}" -s || echo "No changes to commit"
|
||||||
git push origin HEAD || true
|
git push origin HEAD || true
|
||||||
@@ -192,12 +189,7 @@ jobs:
|
|||||||
# Create release-X.Y.Z branch and push (force-update)
|
# Create release-X.Y.Z branch and push (force-update)
|
||||||
- name: Create release branch
|
- name: Create release branch
|
||||||
if: steps.check_release.outputs.skip == 'false'
|
if: steps.check_release.outputs.skip == 'false'
|
||||||
env:
|
|
||||||
GH_PAT: ${{ secrets.GH_PAT }}
|
|
||||||
run: |
|
run: |
|
||||||
git config user.name "cozystack-bot"
|
|
||||||
git config user.email "217169706+cozystack-bot@users.noreply.github.com"
|
|
||||||
git remote set-url origin https://cozystack-bot:${GH_PAT}@github.com/${GITHUB_REPOSITORY}
|
|
||||||
BRANCH="release-${GITHUB_REF#refs/tags/v}"
|
BRANCH="release-${GITHUB_REF#refs/tags/v}"
|
||||||
git branch -f "$BRANCH"
|
git branch -f "$BRANCH"
|
||||||
git push -f origin "$BRANCH"
|
git push -f origin "$BRANCH"
|
||||||
@@ -207,7 +199,6 @@ jobs:
|
|||||||
if: steps.check_release.outputs.skip == 'false'
|
if: steps.check_release.outputs.skip == 'false'
|
||||||
uses: actions/github-script@v7
|
uses: actions/github-script@v7
|
||||||
with:
|
with:
|
||||||
github-token: ${{ secrets.GH_PAT }}
|
|
||||||
script: |
|
script: |
|
||||||
const version = context.ref.replace('refs/tags/v', '');
|
const version = context.ref.replace('refs/tags/v', '');
|
||||||
const base = '${{ steps.get_base.outputs.branch }}';
|
const base = '${{ steps.get_base.outputs.branch }}';
|
||||||
|
|||||||
1
Makefile
1
Makefile
@@ -9,6 +9,7 @@ build-deps:
|
|||||||
|
|
||||||
build: build-deps
|
build: build-deps
|
||||||
make -C packages/apps/http-cache image
|
make -C packages/apps/http-cache image
|
||||||
|
make -C packages/apps/postgres image
|
||||||
make -C packages/apps/mysql image
|
make -C packages/apps/mysql image
|
||||||
make -C packages/apps/clickhouse image
|
make -C packages/apps/clickhouse image
|
||||||
make -C packages/apps/kubernetes image
|
make -C packages/apps/kubernetes image
|
||||||
|
|||||||
@@ -1,71 +0,0 @@
|
|||||||
Cozystack v0.32.0 is a significant release that brings new features, key fixes, and updates to underlying components.
|
|
||||||
|
|
||||||
## Major Features and Improvements
|
|
||||||
|
|
||||||
* [platform] Use `cozypkg` instead of Helm (@kvaps in https://github.com/cozystack/cozystack/pull/1057)
|
|
||||||
* [platform] Introduce the HelmRelease reconciler for system components. (@kvaps in https://github.com/cozystack/cozystack/pull/1033)
|
|
||||||
* [kubernetes] Enable using container registry mirrors by tenant Kubernetes clusters. Configure containerd for tenant Kubernetes clusters. (@klinch0 in https://github.com/cozystack/cozystack/pull/979, patched by @lllamnyp in https://github.com/cozystack/cozystack/pull/1032)
|
|
||||||
* [platform] Allow users to specify CPU requests in VCPUs. Use a library chart for resource management. (@lllamnyp in https://github.com/cozystack/cozystack/pull/972 and https://github.com/cozystack/cozystack/pull/1025)
|
|
||||||
* [platform] Annotate all child objects of apps with uniform labels for tracking by WorkloadMonitors. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1018 and https://github.com/cozystack/cozystack/pull/1024)
|
|
||||||
* [platform] Introduce `cluster-domain` option and un-hardcode `cozy.local`. (@kvaps in https://github.com/cozystack/cozystack/pull/1039)
|
|
||||||
* [platform] Get instance type when reconciling WorkloadMonitor (https://github.com/cozystack/cozystack/pull/1030)
|
|
||||||
* [virtual-machine] Add RBAC rules to allow port forwarding in KubeVirt for SSH via `virtctl`. (@mattia-eleuteri in https://github.com/cozystack/cozystack/pull/1027, patched by @klinch0 in https://github.com/cozystack/cozystack/pull/1028)
|
|
||||||
* [monitoring] Add events and audit inputs (@kevin880202 in https://github.com/cozystack/cozystack/pull/948)
|
|
||||||
|
|
||||||
## Security
|
|
||||||
|
|
||||||
* Resolve a security problem that allowed tenant administrator to gain enhanced privileges outside the tenant. (@kvaps in https://github.com/cozystack/cozystack/pull/1062)
|
|
||||||
|
|
||||||
## Fixes
|
|
||||||
|
|
||||||
* [dashboard] Fix a number of issues in the Cozystack Dashboard (@kvaps in https://github.com/cozystack/cozystack/pull/1042)
|
|
||||||
* [kafka] Specify minimal working resource presets. (@kvaps in https://github.com/cozystack/cozystack/pull/1040)
|
|
||||||
* [cilium] Fixed Gateway API manifest. (@zdenekjanda in https://github.com/cozystack/cozystack/pull/1016)
|
|
||||||
* [platform] Fix RBAC for annotating namespaces. (@kvaps in https://github.com/cozystack/cozystack/pull/1031)
|
|
||||||
* [platform] Fix dependencies for paas-hosted bundle. (@kvaps in https://github.com/cozystack/cozystack/pull/1034)
|
|
||||||
* [platform] Reduce system resource consumption by using lesser resource presets for VerticalPodAutoscaler, SeaweedFS, and KubeOVN. (@klinch0 in https://github.com/cozystack/cozystack/pull/1054)
|
|
||||||
* [virtual-machine] Fix handling of cloudinit and ssh-key input for `virtual-machine` and `vm-instance` applications. (@gwynbleidd2106 in https://github.com/cozystack/cozystack/pull/1019 and https://github.com/cozystack/cozystack/pull/1020)
|
|
||||||
* [apps] Fix Clickhouse version parsing. (@kvaps in https://github.com/cozystack/cozystack/commit/28302e776e9d2bb8f424cf467619fa61d71ac49a)
|
|
||||||
* [apps] Add resource quotas for PostgreSQL jobs and fix application readme generation check in CI. (@klinch0 in https://github.com/cozystack/cozystack/pull/1051)
|
|
||||||
* [kube-ovn] Enable database health check. (@kvaps in https://github.com/cozystack/cozystack/pull/1047)
|
|
||||||
* [kubernetes] Fix upstream issue by updating Kubevirt-CCM. (@kvaps in https://github.com/cozystack/cozystack/pull/1052)
|
|
||||||
* [kubernetes] Fix resources and introduce a migration when upgrading tenant Kubernetes to v0.32.4. (@kvaps in https://github.com/cozystack/cozystack/pull/1073)
|
|
||||||
* [cluster-api] Add a missing migration for `capi-providers`. (@kvaps in https://github.com/cozystack/cozystack/pull/1072)
|
|
||||||
|
|
||||||
## Dependencies
|
|
||||||
|
|
||||||
* Introduce cozykpg, update to v1.1.0. (@kvaps in https://github.com/cozystack/cozystack/pull/1057 and https://github.com/cozystack/cozystack/pull/1063)
|
|
||||||
* Update flux-operator to 0.22.0, Flux to 2.6.x. (@kingdonb in https://github.com/cozystack/cozystack/pull/1035)
|
|
||||||
* Update Talos Linux to v1.10.3. (@kvaps in https://github.com/cozystack/cozystack/pull/1006)
|
|
||||||
* Update Cilium to v1.17.4. (@kvaps in https://github.com/cozystack/cozystack/pull/1046)
|
|
||||||
* Update MetalLB to v0.15.2. (@kvaps in https://github.com/cozystack/cozystack/pull/1045)
|
|
||||||
* Update Kube-OVN to v1.13.13. (@kvaps in https://github.com/cozystack/cozystack/pull/1047)
|
|
||||||
|
|
||||||
## Documentation
|
|
||||||
|
|
||||||
* [Oracle Cloud Infrastructure installation guide](https://cozystack.io/docs/operations/talos/installation/oracle-cloud/). (@kvaps, @lllamnyp, and @NickVolynkin in https://github.com/cozystack/website/pull/168)
|
|
||||||
* [Cluster configuration with `talosctl`](https://cozystack.io/docs/operations/talos/configuration/talosctl/). (@NickVolynkin in https://github.com/cozystack/website/pull/211)
|
|
||||||
* [Configuring container registry mirrors for tenant Kubernetes clusters](https://cozystack.io/docs/operations/talos/configuration/air-gapped/#5-configure-container-registry-mirrors-for-tenant-kubernetes). (@klinch0 in https://github.com/cozystack/website/pull/210)
|
|
||||||
* [Explain application management strategies and available versions for managed applications.](https://cozystack.io/docs/guides/applications/). (@NickVolynkin in https://github.com/cozystack/website/pull/219)
|
|
||||||
* [How to clean up etcd state](https://cozystack.io/docs/operations/faq/#how-to-clean-up-etcd-state). (@gwynbleidd2106 in https://github.com/cozystack/website/pull/214)
|
|
||||||
* [State that Cozystack is a CNCF Sandbox project](https://github.com/cozystack/cozystack?tab=readme-ov-file#cozystack). (@NickVolynkin in https://github.com/cozystack/cozystack/pull/1055)
|
|
||||||
|
|
||||||
## Development, Testing, and CI/CD
|
|
||||||
|
|
||||||
* [tests] Add tests for applications `virtual-machine`, `vm-disk`, `vm-instance`, `postgresql`, `mysql`, and `clickhouse`. (@gwynbleidd2106 in https://github.com/cozystack/cozystack/pull/1048, patched by @kvaps in https://github.com/cozystack/cozystack/pull/1074)
|
|
||||||
* [tests] Fix concurrency for the `docker login` action. (@kvaps in https://github.com/cozystack/cozystack/pull/1014)
|
|
||||||
* [tests] Increase QEMU system disk size in tests. (@kvaps in https://github.com/cozystack/cozystack/pull/1011)
|
|
||||||
* [tests] Increase the waiting timeout for VMs in tests. (@kvaps in https://github.com/cozystack/cozystack/pull/1038)
|
|
||||||
* [ci] Separate build and testing jobs in CI. (@kvaps in https://github.com/cozystack/cozystack/pull/1005 and https://github.com/cozystack/cozystack/pull/1010)
|
|
||||||
* [ci] Fix the release assets. (@kvaps in https://github.com/cozystack/cozystack/pull/1006 and https://github.com/cozystack/cozystack/pull/1009)
|
|
||||||
|
|
||||||
## New Contributors
|
|
||||||
|
|
||||||
* @kevin880202 made their first contribution in https://github.com/cozystack/cozystack/pull/948
|
|
||||||
* @mattia-eleuteri made their first contribution in https://github.com/cozystack/cozystack/pull/1027
|
|
||||||
|
|
||||||
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.31.0...v0.32.0
|
|
||||||
|
|
||||||
<!--
|
|
||||||
HEAD https://github.com/cozystack/cozystack/commit/3ce6dbe8
|
|
||||||
-->
|
|
||||||
@@ -1,32 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
name="$1"
|
|
||||||
url="$2"
|
|
||||||
|
|
||||||
if [ -z "$name" ] || [ -z "$url" ]; then
|
|
||||||
echo "Usage: <name> <url>"
|
|
||||||
echo "Example: 'ubuntu' 'https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img'"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
#### create DV ubuntu source for CDI image cloning
|
|
||||||
kubectl create -f - <<EOF
|
|
||||||
apiVersion: cdi.kubevirt.io/v1beta1
|
|
||||||
kind: DataVolume
|
|
||||||
metadata:
|
|
||||||
name: "vm-image-$name"
|
|
||||||
namespace: cozy-public
|
|
||||||
annotations:
|
|
||||||
cdi.kubevirt.io/storage.bind.immediate.requested: "true"
|
|
||||||
spec:
|
|
||||||
source:
|
|
||||||
http:
|
|
||||||
url: "$url"
|
|
||||||
storage:
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 5Gi
|
|
||||||
storageClassName: replicated
|
|
||||||
EOF
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
for node in 11 12 13; do
|
|
||||||
talosctl -n 192.168.123.${node} -e 192.168.123.${node} images ls >> images.tmp
|
|
||||||
talosctl -n 192.168.123.${node} -e 192.168.123.${node} images --namespace system ls >> images.tmp
|
|
||||||
done
|
|
||||||
|
|
||||||
while read _ name sha _ ; do echo $sha $name ; done < images.tmp | sort -u > images.txt
|
|
||||||
@@ -1,147 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
REPORT_DATE=$(date +%Y-%m-%d_%H-%M-%S)
|
|
||||||
REPORT_NAME=${1:-cozyreport-$REPORT_DATE}
|
|
||||||
REPORT_PDIR=$(mktemp -d)
|
|
||||||
REPORT_DIR=$REPORT_PDIR/$REPORT_NAME
|
|
||||||
|
|
||||||
# -- check dependencies
|
|
||||||
command -V kubectl >/dev/null || exit $?
|
|
||||||
command -V tar >/dev/null || exit $?
|
|
||||||
|
|
||||||
# -- cozystack module
|
|
||||||
|
|
||||||
echo "Collecting Cozystack information..."
|
|
||||||
mkdir -p $REPORT_DIR/cozystack
|
|
||||||
kubectl get deploy -n cozy-system cozystack -o jsonpath='{.spec.template.spec.containers[0].image}' > $REPORT_DIR/cozystack/image.txt 2>&1
|
|
||||||
kubectl get cm -n cozy-system --no-headers | awk '$1 ~ /^cozystack/' |
|
|
||||||
while read NAME _; do
|
|
||||||
DIR=$REPORT_DIR/cozystack/configs
|
|
||||||
mkdir -p $DIR
|
|
||||||
kubectl get cm -n cozy-system $NAME -o yaml > $DIR/$NAME.yaml 2>&1
|
|
||||||
done
|
|
||||||
|
|
||||||
# -- kubernetes module
|
|
||||||
|
|
||||||
echo "Collecting Kubernetes information..."
|
|
||||||
mkdir -p $REPORT_DIR/kubernetes
|
|
||||||
kubectl version > $REPORT_DIR/kubernetes/version.txt 2>&1
|
|
||||||
|
|
||||||
echo "Collecting nodes..."
|
|
||||||
kubectl get nodes -o wide > $REPORT_DIR/kubernetes/nodes.txt 2>&1
|
|
||||||
kubectl get nodes --no-headers | awk '$2 != "Ready"' |
|
|
||||||
while read NAME _; do
|
|
||||||
DIR=$REPORT_DIR/kubernetes/nodes/$NAME
|
|
||||||
mkdir -p $DIR
|
|
||||||
kubectl get node $NAME -o yaml > $DIR/node.yaml 2>&1
|
|
||||||
kubectl describe node $NAME > $DIR/describe.txt 2>&1
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "Collecting namespaces..."
|
|
||||||
kubectl get ns -o wide > $REPORT_DIR/kubernetes/namespaces.txt 2>&1
|
|
||||||
kubectl get ns --no-headers | awk '$2 != "Active"' |
|
|
||||||
while read NAME _; do
|
|
||||||
DIR=$REPORT_DIR/kubernetes/namespaces/$NAME
|
|
||||||
mkdir -p $DIR
|
|
||||||
kubectl get ns $NAME -o yaml > $DIR/namespace.yaml 2>&1
|
|
||||||
kubectl describe ns $NAME > $DIR/describe.txt 2>&1
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "Collecting helmreleases..."
|
|
||||||
kubectl get hr -A > $REPORT_DIR/kubernetes/helmreleases.txt 2>&1
|
|
||||||
kubectl get hr -A | awk '$4 != "True"' | \
|
|
||||||
while read NAMESPACE NAME _; do
|
|
||||||
DIR=$REPORT_DIR/kubernetes/helmreleases/$NAMESPACE/$NAME
|
|
||||||
mkdir -p $DIR
|
|
||||||
kubectl get hr -n $NAMESPACE $NAME -o yaml > $DIR/hr.yaml 2>&1
|
|
||||||
kubectl describe hr -n $NAMESPACE $NAME > $DIR/describe.txt 2>&1
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "Collecting pods..."
|
|
||||||
kubectl get pod -A -o wide > $REPORT_DIR/kubernetes/pods.txt 2>&1
|
|
||||||
kubectl get pod -A --no-headers | awk '$4 !~ /Running|Succeeded|Completed/' |
|
|
||||||
while read NAMESPACE NAME _ STATE _; do
|
|
||||||
DIR=$REPORT_DIR/kubernetes/pods/$NAMESPACE/$NAME
|
|
||||||
mkdir -p $DIR
|
|
||||||
CONTAINERS=$(kubectl get pod -o jsonpath='{.spec.containers[*].name}' -n $NAMESPACE $NAME)
|
|
||||||
kubectl get pod -n $NAMESPACE $NAME -o yaml > $DIR/pod.yaml 2>&1
|
|
||||||
kubectl describe pod -n $NAMESPACE $NAME > $DIR/describe.txt 2>&1
|
|
||||||
if [ "$STATE" != "Pending" ]; then
|
|
||||||
for CONTAINER in $CONTAINERS; do
|
|
||||||
kubectl logs -n $NAMESPACE $NAME $CONTAINER > $DIR/logs-$CONTAINER.txt 2>&1
|
|
||||||
kubectl logs -n $NAMESPACE $NAME $CONTAINER --previous > $DIR/logs-$CONTAINER-previous.txt 2>&1
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "Collecting virtualmachines..."
|
|
||||||
kubectl get vm -A > $REPORT_DIR/kubernetes/vms.txt 2>&1
|
|
||||||
kubectl get vm -A --no-headers | awk '$5 != "True"' |
|
|
||||||
while read NAMESPACE NAME _; do
|
|
||||||
DIR=$REPORT_DIR/kubernetes/vm/$NAMESPACE/$NAME
|
|
||||||
mkdir -p $DIR
|
|
||||||
kubectl get vm -n $NAMESPACE $NAME -o yaml > $DIR/vm.yaml 2>&1
|
|
||||||
kubectl describe vm -n $NAMESPACE $NAME > $DIR/describe.txt 2>&1
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "Collecting virtualmachine instances..."
|
|
||||||
kubectl get vmi -A > $REPORT_DIR/kubernetes/vmis.txt 2>&1
|
|
||||||
kubectl get vmi -A --no-headers | awk '$4 != "Running"' |
|
|
||||||
while read NAMESPACE NAME _; do
|
|
||||||
DIR=$REPORT_DIR/kubernetes/vmi/$NAMESPACE/$NAME
|
|
||||||
mkdir -p $DIR
|
|
||||||
kubectl get vmi -n $NAMESPACE $NAME -o yaml > $DIR/vmi.yaml 2>&1
|
|
||||||
kubectl describe vmi -n $NAMESPACE $NAME > $DIR/describe.txt 2>&1
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "Collecting services..."
|
|
||||||
kubectl get svc -A > $REPORT_DIR/kubernetes/services.txt 2>&1
|
|
||||||
kubectl get svc -A --no-headers | awk '$4 == "<pending>"' |
|
|
||||||
while read NAMESPACE NAME _; do
|
|
||||||
DIR=$REPORT_DIR/kubernetes/services/$NAMESPACE/$NAME
|
|
||||||
mkdir -p $DIR
|
|
||||||
kubectl get svc -n $NAMESPACE $NAME -o yaml > $DIR/service.yaml 2>&1
|
|
||||||
kubectl describe svc -n $NAMESPACE $NAME > $DIR/describe.txt 2>&1
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "Collecting pvcs..."
|
|
||||||
kubectl get pvc -A > $REPORT_DIR/kubernetes/pvcs.txt 2>&1
|
|
||||||
kubectl get pvc -A | awk '$3 != "Bound"' |
|
|
||||||
while read NAMESPACE NAME _; do
|
|
||||||
DIR=$REPORT_DIR/kubernetes/pvc/$NAMESPACE/$NAME
|
|
||||||
mkdir -p $DIR
|
|
||||||
kubectl get pvc -n $NAMESPACE $NAME -o yaml > $DIR/pvc.yaml 2>&1
|
|
||||||
kubectl describe pvc -n $NAMESPACE $NAME > $DIR/describe.txt 2>&1
|
|
||||||
done
|
|
||||||
|
|
||||||
# -- kamaji module
|
|
||||||
|
|
||||||
if kubectl get deploy -n cozy-linstor linstor-controller >/dev/null 2>&1; then
|
|
||||||
echo "Collecting kamaji resources..."
|
|
||||||
DIR=$REPORT_DIR/kamaji
|
|
||||||
mkdir -p $DIR
|
|
||||||
kubectl logs -n cozy-kamaji deployment/kamaji > $DIR/kamaji-controller.log 2>&1
|
|
||||||
kubectl get kamajicontrolplanes.controlplane.cluster.x-k8s.io -A > $DIR/kamajicontrolplanes.txt 2>&1
|
|
||||||
kubectl get kamajicontrolplanes.controlplane.cluster.x-k8s.io -A -o yaml > $DIR/kamajicontrolplanes.yaml 2>&1
|
|
||||||
kubectl get tenantcontrolplanes.kamaji.clastix.io -A > $DIR/tenantcontrolplanes.txt 2>&1
|
|
||||||
kubectl get tenantcontrolplanes.kamaji.clastix.io -A -o yaml > $DIR/tenantcontrolplanes.yaml 2>&1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# -- linstor module
|
|
||||||
|
|
||||||
if kubectl get deploy -n cozy-linstor linstor-controller >/dev/null 2>&1; then
|
|
||||||
echo "Collecting linstor resources..."
|
|
||||||
DIR=$REPORT_DIR/linstor
|
|
||||||
mkdir -p $DIR
|
|
||||||
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor --no-color n l > $DIR/nodes.txt 2>&1
|
|
||||||
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor --no-color sp l > $DIR/storage-pools.txt 2>&1
|
|
||||||
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor --no-color r l > $DIR/resources.txt 2>&1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# -- finalization
|
|
||||||
|
|
||||||
echo "Creating archive..."
|
|
||||||
tar -czf $REPORT_NAME.tgz -C $REPORT_PDIR .
|
|
||||||
echo "Report created: $REPORT_NAME.tgz"
|
|
||||||
|
|
||||||
echo "Cleaning up..."
|
|
||||||
rm -rf $REPORT_PDIR
|
|
||||||
353
hack/e2e-apps.bats
Executable file
353
hack/e2e-apps.bats
Executable file
@@ -0,0 +1,353 @@
|
|||||||
|
#!/usr/bin/env bats
|
||||||
|
|
||||||
|
# -----------------------------------------------------------------------------
|
||||||
|
# Cozystack end‑to‑end provisioning test (Bats)
|
||||||
|
# -----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
@test "Create tenant with isolated mode enabled" {
|
||||||
|
kubectl -n tenant-root get tenants.apps.cozystack.io test ||
|
||||||
|
kubectl create -f - <<EOF
|
||||||
|
apiVersion: apps.cozystack.io/v1alpha1
|
||||||
|
kind: Tenant
|
||||||
|
metadata:
|
||||||
|
name: test
|
||||||
|
namespace: tenant-root
|
||||||
|
spec:
|
||||||
|
etcd: false
|
||||||
|
host: ""
|
||||||
|
ingress: false
|
||||||
|
isolated: true
|
||||||
|
monitoring: false
|
||||||
|
resourceQuotas: {}
|
||||||
|
seaweedfs: false
|
||||||
|
EOF
|
||||||
|
kubectl wait hr/tenant-test -n tenant-root --timeout=1m --for=condition=ready
|
||||||
|
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Create a tenant Kubernetes control plane" {
|
||||||
|
kubectl -n tenant-test get kuberneteses.apps.cozystack.io test ||
|
||||||
|
kubectl create -f - <<EOF
|
||||||
|
apiVersion: apps.cozystack.io/v1alpha1
|
||||||
|
kind: Kubernetes
|
||||||
|
metadata:
|
||||||
|
name: test
|
||||||
|
namespace: tenant-test
|
||||||
|
spec:
|
||||||
|
addons:
|
||||||
|
certManager:
|
||||||
|
enabled: false
|
||||||
|
valuesOverride: {}
|
||||||
|
cilium:
|
||||||
|
valuesOverride: {}
|
||||||
|
fluxcd:
|
||||||
|
enabled: false
|
||||||
|
valuesOverride: {}
|
||||||
|
gatewayAPI:
|
||||||
|
enabled: false
|
||||||
|
gpuOperator:
|
||||||
|
enabled: false
|
||||||
|
valuesOverride: {}
|
||||||
|
ingressNginx:
|
||||||
|
enabled: true
|
||||||
|
hosts: []
|
||||||
|
valuesOverride: {}
|
||||||
|
monitoringAgents:
|
||||||
|
enabled: false
|
||||||
|
valuesOverride: {}
|
||||||
|
verticalPodAutoscaler:
|
||||||
|
valuesOverride: {}
|
||||||
|
controlPlane:
|
||||||
|
apiServer:
|
||||||
|
resources: {}
|
||||||
|
resourcesPreset: small
|
||||||
|
controllerManager:
|
||||||
|
resources: {}
|
||||||
|
resourcesPreset: micro
|
||||||
|
konnectivity:
|
||||||
|
server:
|
||||||
|
resources: {}
|
||||||
|
resourcesPreset: micro
|
||||||
|
replicas: 2
|
||||||
|
scheduler:
|
||||||
|
resources: {}
|
||||||
|
resourcesPreset: micro
|
||||||
|
host: ""
|
||||||
|
nodeGroups:
|
||||||
|
md0:
|
||||||
|
ephemeralStorage: 20Gi
|
||||||
|
gpus: []
|
||||||
|
instanceType: u1.medium
|
||||||
|
maxReplicas: 10
|
||||||
|
minReplicas: 0
|
||||||
|
resources:
|
||||||
|
cpu: ""
|
||||||
|
memory: ""
|
||||||
|
roles:
|
||||||
|
- ingress-nginx
|
||||||
|
storageClass: replicated
|
||||||
|
EOF
|
||||||
|
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
|
||||||
|
timeout 10 sh -ec 'until kubectl get kamajicontrolplane -n tenant-test kubernetes-test; do sleep 1; done'
|
||||||
|
kubectl wait --for=condition=TenantControlPlaneCreated kamajicontrolplane -n tenant-test kubernetes-test --timeout=4m
|
||||||
|
kubectl wait tcp -n tenant-test kubernetes-test --timeout=2m --for=jsonpath='{.status.kubernetesResources.version.status}'=Ready
|
||||||
|
kubectl wait deploy --timeout=4m --for=condition=available -n tenant-test kubernetes-test kubernetes-test-cluster-autoscaler kubernetes-test-kccm kubernetes-test-kcsi-controller
|
||||||
|
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=1m --for=jsonpath='{.status.replicas}'=2
|
||||||
|
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=10m --for=jsonpath='{.status.v1beta2.readyReplicas}'=2
|
||||||
|
kubectl -n tenant-test delete kuberneteses.apps.cozystack.io test
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Create a VM Disk" {
|
||||||
|
name='test'
|
||||||
|
kubectl -n tenant-test get vmdisks.apps.cozystack.io $name ||
|
||||||
|
kubectl create -f - <<EOF
|
||||||
|
apiVersion: apps.cozystack.io/v1alpha1
|
||||||
|
kind: VMDisk
|
||||||
|
metadata:
|
||||||
|
name: $name
|
||||||
|
namespace: tenant-test
|
||||||
|
spec:
|
||||||
|
source:
|
||||||
|
http:
|
||||||
|
url: https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img
|
||||||
|
optical: false
|
||||||
|
storage: 5Gi
|
||||||
|
storageClass: replicated
|
||||||
|
EOF
|
||||||
|
sleep 5
|
||||||
|
kubectl -n tenant-test wait hr vm-disk-$name --timeout=5s --for=condition=ready
|
||||||
|
kubectl -n tenant-test wait dv vm-disk-$name --timeout=150s --for=condition=ready
|
||||||
|
kubectl -n tenant-test wait pvc vm-disk-$name --timeout=100s --for=jsonpath='{.status.phase}'=Bound
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Create a VM Instance" {
|
||||||
|
diskName='test'
|
||||||
|
name='test'
|
||||||
|
kubectl -n tenant-test get vminstances.apps.cozystack.io $name ||
|
||||||
|
kubectl create -f - <<EOF
|
||||||
|
apiVersion: apps.cozystack.io/v1alpha1
|
||||||
|
kind: VMInstance
|
||||||
|
metadata:
|
||||||
|
name: $name
|
||||||
|
namespace: tenant-test
|
||||||
|
spec:
|
||||||
|
external: false
|
||||||
|
externalMethod: PortList
|
||||||
|
externalPorts:
|
||||||
|
- 22
|
||||||
|
running: true
|
||||||
|
instanceType: "u1.medium"
|
||||||
|
instanceProfile: ubuntu
|
||||||
|
disks:
|
||||||
|
- name: $diskName
|
||||||
|
gpus: []
|
||||||
|
resources:
|
||||||
|
cpu: ""
|
||||||
|
memory: ""
|
||||||
|
sshKeys:
|
||||||
|
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF
|
||||||
|
test@test
|
||||||
|
cloudInit: |
|
||||||
|
#cloud-config
|
||||||
|
users:
|
||||||
|
- name: test
|
||||||
|
shell: /bin/bash
|
||||||
|
sudo: ['ALL=(ALL) NOPASSWD: ALL']
|
||||||
|
groups: sudo
|
||||||
|
ssh_authorized_keys:
|
||||||
|
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF test@test
|
||||||
|
cloudInitSeed: ""
|
||||||
|
EOF
|
||||||
|
sleep 5
|
||||||
|
timeout 20 sh -ec "until kubectl -n tenant-test get vmi vm-instance-$name -o jsonpath='{.status.interfaces[0].ipAddress}' | grep -q '[0-9]'; do sleep 5; done"
|
||||||
|
kubectl -n tenant-test wait hr vm-instance-$name --timeout=5s --for=condition=ready
|
||||||
|
kubectl -n tenant-test wait vm vm-instance-$name --timeout=20s --for=condition=ready
|
||||||
|
kubectl -n tenant-test delete vminstances.apps.cozystack.io $name
|
||||||
|
kubectl -n tenant-test delete vmdisks.apps.cozystack.io $diskName
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Create a Virtual Machine" {
|
||||||
|
name='test'
|
||||||
|
kubectl -n tenant-test get virtualmachines.apps.cozystack.io $name ||
|
||||||
|
kubectl create -f - <<EOF
|
||||||
|
apiVersion: apps.cozystack.io/v1alpha1
|
||||||
|
kind: VirtualMachine
|
||||||
|
metadata:
|
||||||
|
name: $name
|
||||||
|
namespace: tenant-test
|
||||||
|
spec:
|
||||||
|
external: false
|
||||||
|
externalMethod: PortList
|
||||||
|
externalPorts:
|
||||||
|
- 22
|
||||||
|
instanceType: "u1.medium"
|
||||||
|
instanceProfile: ubuntu
|
||||||
|
systemDisk:
|
||||||
|
image: ubuntu
|
||||||
|
storage: 5Gi
|
||||||
|
storageClass: replicated
|
||||||
|
gpus: []
|
||||||
|
resources:
|
||||||
|
cpu: ""
|
||||||
|
memory: ""
|
||||||
|
sshKeys:
|
||||||
|
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF
|
||||||
|
test@test
|
||||||
|
cloudInit: |
|
||||||
|
#cloud-config
|
||||||
|
users:
|
||||||
|
- name: test
|
||||||
|
shell: /bin/bash
|
||||||
|
sudo: ['ALL=(ALL) NOPASSWD: ALL']
|
||||||
|
groups: sudo
|
||||||
|
ssh_authorized_keys:
|
||||||
|
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF test@test
|
||||||
|
cloudInitSeed: ""
|
||||||
|
EOF
|
||||||
|
sleep 5
|
||||||
|
kubectl -n tenant-test wait hr virtual-machine-$name --timeout=10s --for=condition=ready
|
||||||
|
kubectl -n tenant-test wait dv virtual-machine-$name --timeout=150s --for=condition=ready
|
||||||
|
kubectl -n tenant-test wait pvc virtual-machine-$name --timeout=100s --for=jsonpath='{.status.phase}'=Bound
|
||||||
|
kubectl -n tenant-test wait vm virtual-machine-$name --timeout=100s --for=condition=ready
|
||||||
|
timeout 120 sh -ec "until kubectl -n tenant-test get vmi virtual-machine-$name -o jsonpath='{.status.interfaces[0].ipAddress}' | grep -q '[0-9]'; do sleep 10; done"
|
||||||
|
kubectl -n tenant-test delete virtualmachines.apps.cozystack.io $name
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Create DB PostgreSQL" {
|
||||||
|
name='test'
|
||||||
|
kubectl -n tenant-test get postgreses.apps.cozystack.io $name ||
|
||||||
|
kubectl create -f - <<EOF
|
||||||
|
apiVersion: apps.cozystack.io/v1alpha1
|
||||||
|
kind: Postgres
|
||||||
|
metadata:
|
||||||
|
name: $name
|
||||||
|
namespace: tenant-test
|
||||||
|
spec:
|
||||||
|
external: false
|
||||||
|
size: 10Gi
|
||||||
|
replicas: 2
|
||||||
|
storageClass: ""
|
||||||
|
postgresql:
|
||||||
|
parameters:
|
||||||
|
max_connections: 100
|
||||||
|
quorum:
|
||||||
|
minSyncReplicas: 0
|
||||||
|
maxSyncReplicas: 0
|
||||||
|
users:
|
||||||
|
testuser:
|
||||||
|
password: xai7Wepo
|
||||||
|
databases:
|
||||||
|
testdb:
|
||||||
|
roles:
|
||||||
|
admin:
|
||||||
|
- testuser
|
||||||
|
backup:
|
||||||
|
enabled: false
|
||||||
|
s3Region: us-east-1
|
||||||
|
s3Bucket: s3.example.org/postgres-backups
|
||||||
|
schedule: "0 2 * * *"
|
||||||
|
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
|
||||||
|
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||||
|
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||||
|
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||||
|
resources: {}
|
||||||
|
resourcesPreset: "nano"
|
||||||
|
EOF
|
||||||
|
sleep 5
|
||||||
|
kubectl -n tenant-test wait hr postgres-$name --timeout=100s --for=condition=ready
|
||||||
|
kubectl -n tenant-test wait job.batch postgres-$name-init-job --timeout=50s --for=condition=Complete
|
||||||
|
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-r -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
||||||
|
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-ro -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
||||||
|
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-rw -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
||||||
|
timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-r -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||||
|
timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-ro -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||||
|
timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-rw -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||||
|
kubectl -n tenant-test delete postgreses.apps.cozystack.io $name
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Create DB MySQL" {
|
||||||
|
name='test'
|
||||||
|
kubectl -n tenant-test get mysqls.apps.cozystack.io $name ||
|
||||||
|
kubectl create -f- <<EOF
|
||||||
|
apiVersion: apps.cozystack.io/v1alpha1
|
||||||
|
kind: MySQL
|
||||||
|
metadata:
|
||||||
|
name: $name
|
||||||
|
namespace: tenant-test
|
||||||
|
spec:
|
||||||
|
external: false
|
||||||
|
size: 10Gi
|
||||||
|
replicas: 2
|
||||||
|
storageClass: ""
|
||||||
|
users:
|
||||||
|
testuser:
|
||||||
|
maxUserConnections: 1000
|
||||||
|
password: xai7Wepo
|
||||||
|
databases:
|
||||||
|
testdb:
|
||||||
|
roles:
|
||||||
|
admin:
|
||||||
|
- testuser
|
||||||
|
backup:
|
||||||
|
enabled: false
|
||||||
|
s3Region: us-east-1
|
||||||
|
s3Bucket: s3.example.org/postgres-backups
|
||||||
|
schedule: "0 2 * * *"
|
||||||
|
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
|
||||||
|
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||||
|
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||||
|
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||||
|
resources: {}
|
||||||
|
resourcesPreset: "nano"
|
||||||
|
EOF
|
||||||
|
sleep 5
|
||||||
|
kubectl -n tenant-test wait hr mysql-$name --timeout=30s --for=condition=ready
|
||||||
|
timeout 80 sh -ec "until kubectl -n tenant-test get svc mysql-$name -o jsonpath='{.spec.ports[0].port}' | grep -q '3306'; do sleep 10; done"
|
||||||
|
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints mysql-$name -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||||
|
kubectl -n tenant-test wait statefulset.apps/mysql-$name --timeout=110s --for=jsonpath='{.status.replicas}'=2
|
||||||
|
timeout 80 sh -ec "until kubectl -n tenant-test get svc mysql-$name-metrics -o jsonpath='{.spec.ports[0].port}' | grep -q '9104'; do sleep 10; done"
|
||||||
|
timeout 40 sh -ec "until kubectl -n tenant-test get endpoints mysql-$name-metrics -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||||
|
kubectl -n tenant-test wait deployment.apps/mysql-$name-metrics --timeout=90s --for=jsonpath='{.status.replicas}'=1
|
||||||
|
kubectl -n tenant-test delete mysqls.apps.cozystack.io $name
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Create DB ClickHouse" {
|
||||||
|
name='test'
|
||||||
|
kubectl -n tenant-test get clickhouses.apps.cozystack.io $name ||
|
||||||
|
kubectl create -f- <<EOF
|
||||||
|
apiVersion: apps.cozystack.io/v1alpha1
|
||||||
|
kind: ClickHouse
|
||||||
|
metadata:
|
||||||
|
name: $name
|
||||||
|
namespace: tenant-test
|
||||||
|
spec:
|
||||||
|
size: 10Gi
|
||||||
|
logStorageSize: 2Gi
|
||||||
|
shards: 1
|
||||||
|
replicas: 2
|
||||||
|
storageClass: ""
|
||||||
|
logTTL: 15
|
||||||
|
users:
|
||||||
|
testuser:
|
||||||
|
password: xai7Wepo
|
||||||
|
backup:
|
||||||
|
enabled: false
|
||||||
|
s3Region: us-east-1
|
||||||
|
s3Bucket: s3.example.org/clickhouse-backups
|
||||||
|
schedule: "0 2 * * *"
|
||||||
|
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
|
||||||
|
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||||
|
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||||
|
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||||
|
resources: {}
|
||||||
|
resourcesPreset: "nano"
|
||||||
|
EOF
|
||||||
|
sleep 5
|
||||||
|
kubectl -n tenant-test wait hr clickhouse-$name --timeout=20s --for=condition=ready
|
||||||
|
timeout 180 sh -ec "until kubectl -n tenant-test get svc chendpoint-clickhouse-$name -o jsonpath='{.spec.ports[*].port}' | grep -q '8123 9000'; do sleep 10; done"
|
||||||
|
kubectl -n tenant-test wait statefulset.apps/chi-clickhouse-$name-clickhouse-0-0 --timeout=120s --for=jsonpath='{.status.replicas}'=1
|
||||||
|
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints chi-clickhouse-$name-clickhouse-0-0 -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||||
|
timeout 100 sh -ec "until kubectl -n tenant-test get svc chi-clickhouse-$name-clickhouse-0-0 -o jsonpath='{.spec.ports[*].port}' | grep -q '9000 8123 9009'; do sleep 10; done"
|
||||||
|
timeout 80 sh -ec "until kubectl -n tenant-test get sts chi-clickhouse-$name-clickhouse-0-1 ; do sleep 10; done"
|
||||||
|
kubectl -n tenant-test wait statefulset.apps/chi-clickhouse-$name-clickhouse-0-1 --timeout=140s --for=jsonpath='{.status.replicas}'=1
|
||||||
|
}
|
||||||
@@ -1,41 +0,0 @@
|
|||||||
#!/usr/bin/env bats
|
|
||||||
|
|
||||||
@test "Create DB ClickHouse" {
|
|
||||||
name='test'
|
|
||||||
kubectl apply -f- <<EOF
|
|
||||||
apiVersion: apps.cozystack.io/v1alpha1
|
|
||||||
kind: ClickHouse
|
|
||||||
metadata:
|
|
||||||
name: $name
|
|
||||||
namespace: tenant-test
|
|
||||||
spec:
|
|
||||||
size: 10Gi
|
|
||||||
logStorageSize: 2Gi
|
|
||||||
shards: 1
|
|
||||||
replicas: 2
|
|
||||||
storageClass: ""
|
|
||||||
logTTL: 15
|
|
||||||
users:
|
|
||||||
testuser:
|
|
||||||
password: xai7Wepo
|
|
||||||
backup:
|
|
||||||
enabled: false
|
|
||||||
s3Region: us-east-1
|
|
||||||
s3Bucket: s3.example.org/clickhouse-backups
|
|
||||||
schedule: "0 2 * * *"
|
|
||||||
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
|
|
||||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
|
||||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
|
||||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
|
||||||
resources: {}
|
|
||||||
resourcesPreset: "nano"
|
|
||||||
EOF
|
|
||||||
sleep 5
|
|
||||||
kubectl -n tenant-test wait hr clickhouse-$name --timeout=20s --for=condition=ready
|
|
||||||
timeout 180 sh -ec "until kubectl -n tenant-test get svc chendpoint-clickhouse-$name -o jsonpath='{.spec.ports[*].port}' | grep -q '8123 9000'; do sleep 10; done"
|
|
||||||
kubectl -n tenant-test wait statefulset.apps/chi-clickhouse-$name-clickhouse-0-0 --timeout=120s --for=jsonpath='{.status.replicas}'=1
|
|
||||||
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints chi-clickhouse-$name-clickhouse-0-0 -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
|
||||||
timeout 100 sh -ec "until kubectl -n tenant-test get svc chi-clickhouse-$name-clickhouse-0-0 -o jsonpath='{.spec.ports[*].port}' | grep -q '9000 8123 9009'; do sleep 10; done"
|
|
||||||
timeout 80 sh -ec "until kubectl -n tenant-test get sts chi-clickhouse-$name-clickhouse-0-1 ; do sleep 10; done"
|
|
||||||
kubectl -n tenant-test wait statefulset.apps/chi-clickhouse-$name-clickhouse-0-1 --timeout=140s --for=jsonpath='{.status.replicas}'=1
|
|
||||||
}
|
|
||||||
@@ -1,51 +0,0 @@
|
|||||||
#!/usr/bin/env bats
|
|
||||||
|
|
||||||
@test "Create Kafka" {
|
|
||||||
name='test'
|
|
||||||
kubectl apply -f- <<EOF
|
|
||||||
apiVersion: apps.cozystack.io/v1alpha1
|
|
||||||
kind: Kafka
|
|
||||||
metadata:
|
|
||||||
name: $name
|
|
||||||
namespace: tenant-test
|
|
||||||
spec:
|
|
||||||
external: false
|
|
||||||
kafka:
|
|
||||||
size: 10Gi
|
|
||||||
replicas: 2
|
|
||||||
storageClass: ""
|
|
||||||
resources: {}
|
|
||||||
resourcesPreset: "nano"
|
|
||||||
zookeeper:
|
|
||||||
size: 5Gi
|
|
||||||
replicas: 2
|
|
||||||
storageClass: ""
|
|
||||||
resources:
|
|
||||||
resourcesPreset: "nano"
|
|
||||||
topics:
|
|
||||||
- name: testResults
|
|
||||||
partitions: 1
|
|
||||||
replicas: 2
|
|
||||||
config:
|
|
||||||
min.insync.replicas: 2
|
|
||||||
- name: testOrders
|
|
||||||
config:
|
|
||||||
cleanup.policy: compact
|
|
||||||
segment.ms: 3600000
|
|
||||||
max.compaction.lag.ms: 5400000
|
|
||||||
min.insync.replicas: 2
|
|
||||||
partitions: 1
|
|
||||||
replicas: 2
|
|
||||||
EOF
|
|
||||||
sleep 5
|
|
||||||
kubectl -n tenant-test wait hr kafka-$name --timeout=30s --for=condition=ready
|
|
||||||
kubectl wait kafkas -n tenant-test test --timeout=60s --for=condition=ready
|
|
||||||
timeout 60 sh -ec "until kubectl -n tenant-test get pvc data-kafka-$name-zookeeper-0; do sleep 10; done"
|
|
||||||
kubectl -n tenant-test wait pvc data-kafka-$name-zookeeper-0 --timeout=50s --for=jsonpath='{.status.phase}'=Bound
|
|
||||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc kafka-$name-zookeeper-client -o jsonpath='{.spec.ports[0].port}' | grep -q '2181'; do sleep 10; done"
|
|
||||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc kafka-$name-zookeeper-nodes -o jsonpath='{.spec.ports[*].port}' | grep -q '2181 2888 3888'; do sleep 10; done"
|
|
||||||
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints kafka-$name-zookeeper-nodes -o jsonpath='{.subsets[*].addresses[0].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
|
||||||
kubectl -n tenant-test delete kafka.apps.cozystack.io $name
|
|
||||||
kubectl -n tenant-test delete pvc data-kafka-$name-zookeeper-0
|
|
||||||
kubectl -n tenant-test delete pvc data-kafka-$name-zookeeper-1
|
|
||||||
}
|
|
||||||
@@ -1,72 +0,0 @@
|
|||||||
#!/usr/bin/env bats
|
|
||||||
|
|
||||||
@test "Create a tenant Kubernetes control plane" {
|
|
||||||
kubectl apply -f - <<EOF
|
|
||||||
apiVersion: apps.cozystack.io/v1alpha1
|
|
||||||
kind: Kubernetes
|
|
||||||
metadata:
|
|
||||||
name: test
|
|
||||||
namespace: tenant-test
|
|
||||||
spec:
|
|
||||||
addons:
|
|
||||||
certManager:
|
|
||||||
enabled: false
|
|
||||||
valuesOverride: {}
|
|
||||||
cilium:
|
|
||||||
valuesOverride: {}
|
|
||||||
fluxcd:
|
|
||||||
enabled: false
|
|
||||||
valuesOverride: {}
|
|
||||||
gatewayAPI:
|
|
||||||
enabled: false
|
|
||||||
gpuOperator:
|
|
||||||
enabled: false
|
|
||||||
valuesOverride: {}
|
|
||||||
ingressNginx:
|
|
||||||
enabled: true
|
|
||||||
hosts: []
|
|
||||||
valuesOverride: {}
|
|
||||||
monitoringAgents:
|
|
||||||
enabled: false
|
|
||||||
valuesOverride: {}
|
|
||||||
verticalPodAutoscaler:
|
|
||||||
valuesOverride: {}
|
|
||||||
controlPlane:
|
|
||||||
apiServer:
|
|
||||||
resources: {}
|
|
||||||
resourcesPreset: small
|
|
||||||
controllerManager:
|
|
||||||
resources: {}
|
|
||||||
resourcesPreset: micro
|
|
||||||
konnectivity:
|
|
||||||
server:
|
|
||||||
resources: {}
|
|
||||||
resourcesPreset: micro
|
|
||||||
replicas: 2
|
|
||||||
scheduler:
|
|
||||||
resources: {}
|
|
||||||
resourcesPreset: micro
|
|
||||||
host: ""
|
|
||||||
nodeGroups:
|
|
||||||
md0:
|
|
||||||
ephemeralStorage: 20Gi
|
|
||||||
gpus: []
|
|
||||||
instanceType: u1.medium
|
|
||||||
maxReplicas: 10
|
|
||||||
minReplicas: 0
|
|
||||||
resources:
|
|
||||||
cpu: ""
|
|
||||||
memory: ""
|
|
||||||
roles:
|
|
||||||
- ingress-nginx
|
|
||||||
storageClass: replicated
|
|
||||||
EOF
|
|
||||||
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
|
|
||||||
timeout 10 sh -ec 'until kubectl get kamajicontrolplane -n tenant-test kubernetes-test; do sleep 1; done'
|
|
||||||
kubectl wait --for=condition=TenantControlPlaneCreated kamajicontrolplane -n tenant-test kubernetes-test --timeout=4m
|
|
||||||
kubectl wait tcp -n tenant-test kubernetes-test --timeout=2m --for=jsonpath='{.status.kubernetesResources.version.status}'=Ready
|
|
||||||
kubectl wait deploy --timeout=4m --for=condition=available -n tenant-test kubernetes-test kubernetes-test-cluster-autoscaler kubernetes-test-kccm kubernetes-test-kcsi-controller
|
|
||||||
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=1m --for=jsonpath='{.status.replicas}'=2
|
|
||||||
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=10m --for=jsonpath='{.status.v1beta2.readyReplicas}'=2
|
|
||||||
kubectl -n tenant-test delete kuberneteses.apps.cozystack.io test
|
|
||||||
}
|
|
||||||
@@ -1,46 +0,0 @@
|
|||||||
#!/usr/bin/env bats
|
|
||||||
|
|
||||||
@test "Create DB MySQL" {
|
|
||||||
name='test'
|
|
||||||
kubectl apply -f- <<EOF
|
|
||||||
apiVersion: apps.cozystack.io/v1alpha1
|
|
||||||
kind: MySQL
|
|
||||||
metadata:
|
|
||||||
name: $name
|
|
||||||
namespace: tenant-test
|
|
||||||
spec:
|
|
||||||
external: false
|
|
||||||
size: 10Gi
|
|
||||||
replicas: 2
|
|
||||||
storageClass: ""
|
|
||||||
users:
|
|
||||||
testuser:
|
|
||||||
maxUserConnections: 1000
|
|
||||||
password: xai7Wepo
|
|
||||||
databases:
|
|
||||||
testdb:
|
|
||||||
roles:
|
|
||||||
admin:
|
|
||||||
- testuser
|
|
||||||
backup:
|
|
||||||
enabled: false
|
|
||||||
s3Region: us-east-1
|
|
||||||
s3Bucket: s3.example.org/postgres-backups
|
|
||||||
schedule: "0 2 * * *"
|
|
||||||
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
|
|
||||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
|
||||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
|
||||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
|
||||||
resources: {}
|
|
||||||
resourcesPreset: "nano"
|
|
||||||
EOF
|
|
||||||
sleep 5
|
|
||||||
kubectl -n tenant-test wait hr mysql-$name --timeout=30s --for=condition=ready
|
|
||||||
timeout 80 sh -ec "until kubectl -n tenant-test get svc mysql-$name -o jsonpath='{.spec.ports[0].port}' | grep -q '3306'; do sleep 10; done"
|
|
||||||
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints mysql-$name -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
|
||||||
kubectl -n tenant-test wait statefulset.apps/mysql-$name --timeout=110s --for=jsonpath='{.status.replicas}'=2
|
|
||||||
timeout 80 sh -ec "until kubectl -n tenant-test get svc mysql-$name-metrics -o jsonpath='{.spec.ports[0].port}' | grep -q '9104'; do sleep 10; done"
|
|
||||||
timeout 40 sh -ec "until kubectl -n tenant-test get endpoints mysql-$name-metrics -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
|
||||||
kubectl -n tenant-test wait deployment.apps/mysql-$name-metrics --timeout=90s --for=jsonpath='{.status.replicas}'=1
|
|
||||||
kubectl -n tenant-test delete mysqls.apps.cozystack.io $name
|
|
||||||
}
|
|
||||||
@@ -1,54 +0,0 @@
|
|||||||
#!/usr/bin/env bats
|
|
||||||
|
|
||||||
@test "Create DB PostgreSQL" {
|
|
||||||
name='test'
|
|
||||||
kubectl apply -f - <<EOF
|
|
||||||
apiVersion: apps.cozystack.io/v1alpha1
|
|
||||||
kind: Postgres
|
|
||||||
metadata:
|
|
||||||
name: $name
|
|
||||||
namespace: tenant-test
|
|
||||||
spec:
|
|
||||||
external: false
|
|
||||||
size: 10Gi
|
|
||||||
replicas: 2
|
|
||||||
storageClass: ""
|
|
||||||
postgresql:
|
|
||||||
parameters:
|
|
||||||
max_connections: 100
|
|
||||||
quorum:
|
|
||||||
minSyncReplicas: 0
|
|
||||||
maxSyncReplicas: 0
|
|
||||||
users:
|
|
||||||
testuser:
|
|
||||||
password: xai7Wepo
|
|
||||||
databases:
|
|
||||||
testdb:
|
|
||||||
roles:
|
|
||||||
admin:
|
|
||||||
- testuser
|
|
||||||
backup:
|
|
||||||
enabled: false
|
|
||||||
s3Region: us-east-1
|
|
||||||
s3Bucket: s3.example.org/postgres-backups
|
|
||||||
schedule: "0 2 * * *"
|
|
||||||
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
|
|
||||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
|
||||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
|
||||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
|
||||||
resources: {}
|
|
||||||
resourcesPreset: "nano"
|
|
||||||
EOF
|
|
||||||
sleep 5
|
|
||||||
kubectl -n tenant-test wait hr postgres-$name --timeout=100s --for=condition=ready
|
|
||||||
kubectl -n tenant-test wait job.batch postgres-$name-init-job --timeout=50s --for=condition=Complete
|
|
||||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-r -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
|
||||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-ro -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
|
||||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-rw -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
|
||||||
timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-r -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
|
||||||
# for some reason it takes longer for the read-only endpoint to be ready
|
|
||||||
#timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-ro -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
|
||||||
timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-rw -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
|
||||||
kubectl -n tenant-test delete postgreses.apps.cozystack.io $name
|
|
||||||
kubectl -n tenant-test delete job.batch/postgres-$name-init-job
|
|
||||||
}
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
#!/usr/bin/env bats
|
|
||||||
|
|
||||||
@test "Create Redis" {
|
|
||||||
name='test'
|
|
||||||
kubectl apply -f- <<EOF
|
|
||||||
apiVersion: apps.cozystack.io/v1alpha1
|
|
||||||
kind: Redis
|
|
||||||
metadata:
|
|
||||||
name: $name
|
|
||||||
namespace: tenant-test
|
|
||||||
spec:
|
|
||||||
external: false
|
|
||||||
size: 1Gi
|
|
||||||
replicas: 2
|
|
||||||
storageClass: ""
|
|
||||||
authEnabled: true
|
|
||||||
resources: {}
|
|
||||||
resourcesPreset: "nano"
|
|
||||||
EOF
|
|
||||||
sleep 5
|
|
||||||
kubectl -n tenant-test wait hr redis-$name --timeout=20s --for=condition=ready
|
|
||||||
kubectl -n tenant-test wait pvc redisfailover-persistent-data-rfr-redis-$name-0 --timeout=50s --for=jsonpath='{.status.phase}'=Bound
|
|
||||||
kubectl -n tenant-test wait deploy rfs-redis-$name --timeout=90s --for=condition=available
|
|
||||||
kubectl -n tenant-test wait sts rfr-redis-$name --timeout=90s --for=jsonpath='{.status.replicas}'=2
|
|
||||||
kubectl -n tenant-test delete redis.apps.cozystack.io $name
|
|
||||||
}
|
|
||||||
@@ -1,47 +0,0 @@
|
|||||||
#!/usr/bin/env bats
|
|
||||||
|
|
||||||
@test "Create a Virtual Machine" {
|
|
||||||
name='test'
|
|
||||||
kubectl apply -f - <<EOF
|
|
||||||
apiVersion: apps.cozystack.io/v1alpha1
|
|
||||||
kind: VirtualMachine
|
|
||||||
metadata:
|
|
||||||
name: $name
|
|
||||||
namespace: tenant-test
|
|
||||||
spec:
|
|
||||||
external: false
|
|
||||||
externalMethod: PortList
|
|
||||||
externalPorts:
|
|
||||||
- 22
|
|
||||||
instanceType: "u1.medium"
|
|
||||||
instanceProfile: ubuntu
|
|
||||||
systemDisk:
|
|
||||||
image: ubuntu
|
|
||||||
storage: 5Gi
|
|
||||||
storageClass: replicated
|
|
||||||
gpus: []
|
|
||||||
resources:
|
|
||||||
cpu: ""
|
|
||||||
memory: ""
|
|
||||||
sshKeys:
|
|
||||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF
|
|
||||||
test@test
|
|
||||||
cloudInit: |
|
|
||||||
#cloud-config
|
|
||||||
users:
|
|
||||||
- name: test
|
|
||||||
shell: /bin/bash
|
|
||||||
sudo: ['ALL=(ALL) NOPASSWD: ALL']
|
|
||||||
groups: sudo
|
|
||||||
ssh_authorized_keys:
|
|
||||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF test@test
|
|
||||||
cloudInitSeed: ""
|
|
||||||
EOF
|
|
||||||
sleep 5
|
|
||||||
kubectl -n tenant-test wait hr virtual-machine-$name --timeout=10s --for=condition=ready
|
|
||||||
kubectl -n tenant-test wait dv virtual-machine-$name --timeout=150s --for=condition=ready
|
|
||||||
kubectl -n tenant-test wait pvc virtual-machine-$name --timeout=100s --for=jsonpath='{.status.phase}'=Bound
|
|
||||||
kubectl -n tenant-test wait vm virtual-machine-$name --timeout=100s --for=condition=ready
|
|
||||||
timeout 120 sh -ec "until kubectl -n tenant-test get vmi virtual-machine-$name -o jsonpath='{.status.interfaces[0].ipAddress}' | grep -q '[0-9]'; do sleep 10; done"
|
|
||||||
kubectl -n tenant-test delete virtualmachines.apps.cozystack.io $name
|
|
||||||
}
|
|
||||||
@@ -1,68 +0,0 @@
|
|||||||
#!/usr/bin/env bats
|
|
||||||
|
|
||||||
@test "Create a VM Disk" {
|
|
||||||
name='test'
|
|
||||||
kubectl apply -f - <<EOF
|
|
||||||
apiVersion: apps.cozystack.io/v1alpha1
|
|
||||||
kind: VMDisk
|
|
||||||
metadata:
|
|
||||||
name: $name
|
|
||||||
namespace: tenant-test
|
|
||||||
spec:
|
|
||||||
source:
|
|
||||||
http:
|
|
||||||
url: https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img
|
|
||||||
optical: false
|
|
||||||
storage: 5Gi
|
|
||||||
storageClass: replicated
|
|
||||||
EOF
|
|
||||||
sleep 5
|
|
||||||
kubectl -n tenant-test wait hr vm-disk-$name --timeout=5s --for=condition=ready
|
|
||||||
kubectl -n tenant-test wait dv vm-disk-$name --timeout=150s --for=condition=ready
|
|
||||||
kubectl -n tenant-test wait pvc vm-disk-$name --timeout=100s --for=jsonpath='{.status.phase}'=Bound
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "Create a VM Instance" {
|
|
||||||
diskName='test'
|
|
||||||
name='test'
|
|
||||||
kubectl apply -f - <<EOF
|
|
||||||
apiVersion: apps.cozystack.io/v1alpha1
|
|
||||||
kind: VMInstance
|
|
||||||
metadata:
|
|
||||||
name: $name
|
|
||||||
namespace: tenant-test
|
|
||||||
spec:
|
|
||||||
external: false
|
|
||||||
externalMethod: PortList
|
|
||||||
externalPorts:
|
|
||||||
- 22
|
|
||||||
running: true
|
|
||||||
instanceType: "u1.medium"
|
|
||||||
instanceProfile: ubuntu
|
|
||||||
disks:
|
|
||||||
- name: $diskName
|
|
||||||
gpus: []
|
|
||||||
resources:
|
|
||||||
cpu: ""
|
|
||||||
memory: ""
|
|
||||||
sshKeys:
|
|
||||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF
|
|
||||||
test@test
|
|
||||||
cloudInit: |
|
|
||||||
#cloud-config
|
|
||||||
users:
|
|
||||||
- name: test
|
|
||||||
shell: /bin/bash
|
|
||||||
sudo: ['ALL=(ALL) NOPASSWD: ALL']
|
|
||||||
groups: sudo
|
|
||||||
ssh_authorized_keys:
|
|
||||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF test@test
|
|
||||||
cloudInitSeed: ""
|
|
||||||
EOF
|
|
||||||
sleep 5
|
|
||||||
timeout 20 sh -ec "until kubectl -n tenant-test get vmi vm-instance-$name -o jsonpath='{.status.interfaces[0].ipAddress}' | grep -q '[0-9]'; do sleep 5; done"
|
|
||||||
kubectl -n tenant-test wait hr vm-instance-$name --timeout=5s --for=condition=ready
|
|
||||||
kubectl -n tenant-test wait vm vm-instance-$name --timeout=20s --for=condition=ready
|
|
||||||
kubectl -n tenant-test delete vminstances.apps.cozystack.io $name
|
|
||||||
kubectl -n tenant-test delete vmdisks.apps.cozystack.io $diskName
|
|
||||||
}
|
|
||||||
391
hack/e2e-cluster.bats
Executable file
391
hack/e2e-cluster.bats
Executable file
@@ -0,0 +1,391 @@
|
|||||||
|
#!/usr/bin/env bats
|
||||||
|
# -----------------------------------------------------------------------------
|
||||||
|
# Cozystack end‑to‑end provisioning test (Bats)
|
||||||
|
# -----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
@test "Required installer assets exist" {
|
||||||
|
if [ ! -f _out/assets/cozystack-installer.yaml ]; then
|
||||||
|
echo "Missing: _out/assets/cozystack-installer.yaml" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -f _out/assets/nocloud-amd64.raw.xz ]; then
|
||||||
|
echo "Missing: _out/assets/nocloud-amd64.raw.xz" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "IPv4 forwarding is enabled" {
|
||||||
|
if [ "$(cat /proc/sys/net/ipv4/ip_forward)" != 1 ]; then
|
||||||
|
echo "IPv4 forwarding is disabled!" >&2
|
||||||
|
echo >&2
|
||||||
|
echo "Enable it with:" >&2
|
||||||
|
echo " echo 1 > /proc/sys/net/ipv4/ip_forward" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Clean previous VMs" {
|
||||||
|
kill $(cat srv1/qemu.pid srv2/qemu.pid srv3/qemu.pid 2>/dev/null) 2>/dev/null || true
|
||||||
|
rm -rf srv1 srv2 srv3
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Prepare networking and masquerading" {
|
||||||
|
ip link del cozy-br0 2>/dev/null || true
|
||||||
|
ip link add cozy-br0 type bridge
|
||||||
|
ip link set cozy-br0 up
|
||||||
|
ip address add 192.168.123.1/24 dev cozy-br0
|
||||||
|
|
||||||
|
# Masquerading rule – idempotent (delete first, then add)
|
||||||
|
iptables -t nat -D POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE 2>/dev/null || true
|
||||||
|
iptables -t nat -A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Prepare cloud‑init drive for VMs" {
|
||||||
|
mkdir -p srv1 srv2 srv3
|
||||||
|
|
||||||
|
# Generate cloud‑init ISOs
|
||||||
|
for i in 1 2 3; do
|
||||||
|
echo "hostname: srv${i}" > "srv${i}/meta-data"
|
||||||
|
|
||||||
|
cat > "srv${i}/user-data" <<'EOF'
|
||||||
|
#cloud-config
|
||||||
|
EOF
|
||||||
|
|
||||||
|
cat > "srv${i}/network-config" <<EOF
|
||||||
|
version: 2
|
||||||
|
ethernets:
|
||||||
|
eth0:
|
||||||
|
dhcp4: false
|
||||||
|
addresses:
|
||||||
|
- "192.168.123.1${i}/26"
|
||||||
|
gateway4: "192.168.123.1"
|
||||||
|
nameservers:
|
||||||
|
search: [cluster.local]
|
||||||
|
addresses: [8.8.8.8]
|
||||||
|
EOF
|
||||||
|
|
||||||
|
( cd "srv${i}" && genisoimage \
|
||||||
|
-output seed.img \
|
||||||
|
-volid cidata -rational-rock -joliet \
|
||||||
|
user-data meta-data network-config )
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Use Talos NoCloud image from assets" {
|
||||||
|
if [ ! -f _out/assets/nocloud-amd64.raw.xz ]; then
|
||||||
|
echo "Missing _out/assets/nocloud-amd64.raw.xz" 2>&1
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
rm -f nocloud-amd64.raw
|
||||||
|
cp _out/assets/nocloud-amd64.raw.xz .
|
||||||
|
xz --decompress nocloud-amd64.raw.xz
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Prepare VM disks" {
|
||||||
|
for i in 1 2 3; do
|
||||||
|
cp nocloud-amd64.raw srv${i}/system.img
|
||||||
|
qemu-img resize srv${i}/system.img 50G
|
||||||
|
qemu-img create srv${i}/data.img 100G
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Create tap devices" {
|
||||||
|
for i in 1 2 3; do
|
||||||
|
ip link del cozy-srv${i} 2>/dev/null || true
|
||||||
|
ip tuntap add dev cozy-srv${i} mode tap
|
||||||
|
ip link set cozy-srv${i} up
|
||||||
|
ip link set cozy-srv${i} master cozy-br0
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Boot QEMU VMs" {
|
||||||
|
for i in 1 2 3; do
|
||||||
|
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 8 -m 24576 \
|
||||||
|
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5${i} \
|
||||||
|
-netdev tap,id=net0,ifname=cozy-srv${i},script=no,downscript=no \
|
||||||
|
-drive file=srv${i}/system.img,if=virtio,format=raw \
|
||||||
|
-drive file=srv${i}/seed.img,if=virtio,format=raw \
|
||||||
|
-drive file=srv${i}/data.img,if=virtio,format=raw \
|
||||||
|
-display none -daemonize -pidfile srv${i}/qemu.pid
|
||||||
|
done
|
||||||
|
|
||||||
|
# Give qemu a few seconds to start up networking
|
||||||
|
sleep 5
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Wait until Talos API port 50000 is reachable on all machines" {
|
||||||
|
timeout 60 sh -ec 'until nc -nz 192.168.123.11 50000 && nc -nz 192.168.123.12 50000 && nc -nz 192.168.123.13 50000; do sleep 1; done'
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Generate Talos cluster configuration" {
|
||||||
|
# Cluster‑wide patches
|
||||||
|
cat > patch.yaml <<'EOF'
|
||||||
|
machine:
|
||||||
|
kubelet:
|
||||||
|
nodeIP:
|
||||||
|
validSubnets:
|
||||||
|
- 192.168.123.0/24
|
||||||
|
extraConfig:
|
||||||
|
maxPods: 512
|
||||||
|
kernel:
|
||||||
|
modules:
|
||||||
|
- name: openvswitch
|
||||||
|
- name: drbd
|
||||||
|
parameters:
|
||||||
|
- usermode_helper=disabled
|
||||||
|
- name: zfs
|
||||||
|
- name: spl
|
||||||
|
registries:
|
||||||
|
mirrors:
|
||||||
|
docker.io:
|
||||||
|
endpoints:
|
||||||
|
- https://mirror.gcr.io
|
||||||
|
files:
|
||||||
|
- content: |
|
||||||
|
[plugins]
|
||||||
|
[plugins."io.containerd.cri.v1.runtime"]
|
||||||
|
device_ownership_from_security_context = true
|
||||||
|
path: /etc/cri/conf.d/20-customization.part
|
||||||
|
op: create
|
||||||
|
|
||||||
|
cluster:
|
||||||
|
apiServer:
|
||||||
|
extraArgs:
|
||||||
|
oidc-issuer-url: "https://keycloak.example.org/realms/cozy"
|
||||||
|
oidc-client-id: "kubernetes"
|
||||||
|
oidc-username-claim: "preferred_username"
|
||||||
|
oidc-groups-claim: "groups"
|
||||||
|
network:
|
||||||
|
cni:
|
||||||
|
name: none
|
||||||
|
dnsDomain: cozy.local
|
||||||
|
podSubnets:
|
||||||
|
- 10.244.0.0/16
|
||||||
|
serviceSubnets:
|
||||||
|
- 10.96.0.0/16
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Control‑plane‑only patches
|
||||||
|
cat > patch-controlplane.yaml <<'EOF'
|
||||||
|
machine:
|
||||||
|
nodeLabels:
|
||||||
|
node.kubernetes.io/exclude-from-external-load-balancers:
|
||||||
|
$patch: delete
|
||||||
|
network:
|
||||||
|
interfaces:
|
||||||
|
- interface: eth0
|
||||||
|
vip:
|
||||||
|
ip: 192.168.123.10
|
||||||
|
cluster:
|
||||||
|
allowSchedulingOnControlPlanes: true
|
||||||
|
controllerManager:
|
||||||
|
extraArgs:
|
||||||
|
bind-address: 0.0.0.0
|
||||||
|
scheduler:
|
||||||
|
extraArgs:
|
||||||
|
bind-address: 0.0.0.0
|
||||||
|
apiServer:
|
||||||
|
certSANs:
|
||||||
|
- 127.0.0.1
|
||||||
|
proxy:
|
||||||
|
disabled: true
|
||||||
|
discovery:
|
||||||
|
enabled: false
|
||||||
|
etcd:
|
||||||
|
advertisedSubnets:
|
||||||
|
- 192.168.123.0/24
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Generate secrets once
|
||||||
|
if [ ! -f secrets.yaml ]; then
|
||||||
|
talosctl gen secrets
|
||||||
|
fi
|
||||||
|
|
||||||
|
rm -f controlplane.yaml worker.yaml talosconfig kubeconfig
|
||||||
|
talosctl gen config --with-secrets secrets.yaml cozystack https://192.168.123.10:6443 \
|
||||||
|
--config-patch=@patch.yaml --config-patch-control-plane @patch-controlplane.yaml
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Apply Talos configuration to the node" {
|
||||||
|
# Apply the configuration to all three nodes
|
||||||
|
for node in 11 12 13; do
|
||||||
|
talosctl apply -f controlplane.yaml -n 192.168.123.${node} -e 192.168.123.${node} -i
|
||||||
|
done
|
||||||
|
|
||||||
|
# Wait for Talos services to come up again
|
||||||
|
timeout 60 sh -ec 'until nc -nz 192.168.123.11 50000 && nc -nz 192.168.123.12 50000 && nc -nz 192.168.123.13 50000; do sleep 1; done'
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Bootstrap Talos cluster" {
|
||||||
|
# Bootstrap etcd on the first node
|
||||||
|
timeout 10 sh -ec 'until talosctl bootstrap -n 192.168.123.11 -e 192.168.123.11; do sleep 1; done'
|
||||||
|
|
||||||
|
# Wait until etcd is healthy
|
||||||
|
timeout 180 sh -ec 'until talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 >/dev/null 2>&1; do sleep 1; done'
|
||||||
|
timeout 60 sh -ec 'while talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1 | grep -q "rpc error"; do sleep 1; done'
|
||||||
|
|
||||||
|
# Retrieve kubeconfig
|
||||||
|
rm -f kubeconfig
|
||||||
|
talosctl kubeconfig kubeconfig -e 192.168.123.10 -n 192.168.123.10
|
||||||
|
|
||||||
|
# Wait until all three nodes register in Kubernetes
|
||||||
|
timeout 60 sh -ec 'until [ $(kubectl get node --no-headers | wc -l) -eq 3 ]; do sleep 1; done'
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Install Cozystack" {
|
||||||
|
# Create namespace & configmap required by installer
|
||||||
|
kubectl create namespace cozy-system --dry-run=client -o yaml | kubectl apply -f -
|
||||||
|
kubectl create configmap cozystack -n cozy-system \
|
||||||
|
--from-literal=bundle-name=paas-full \
|
||||||
|
--from-literal=ipv4-pod-cidr=10.244.0.0/16 \
|
||||||
|
--from-literal=ipv4-pod-gateway=10.244.0.1 \
|
||||||
|
--from-literal=ipv4-svc-cidr=10.96.0.0/16 \
|
||||||
|
--from-literal=ipv4-join-cidr=100.64.0.0/16 \
|
||||||
|
--from-literal=root-host=example.org \
|
||||||
|
--from-literal=api-server-endpoint=https://192.168.123.10:6443 \
|
||||||
|
--dry-run=client -o yaml | kubectl apply -f -
|
||||||
|
|
||||||
|
# Apply installer manifests from file
|
||||||
|
kubectl apply -f _out/assets/cozystack-installer.yaml
|
||||||
|
|
||||||
|
# Wait for the installer deployment to become available
|
||||||
|
kubectl wait deployment/cozystack -n cozy-system --timeout=1m --for=condition=Available
|
||||||
|
|
||||||
|
# Wait until HelmReleases appear & reconcile them
|
||||||
|
timeout 60 sh -ec 'until kubectl get hr -A | grep -q cozys; do sleep 1; done'
|
||||||
|
sleep 5
|
||||||
|
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n "$1" hr/"$2" &"} END {print "wait"}' | sh -ex
|
||||||
|
|
||||||
|
# Fail the test if any HelmRelease is not Ready
|
||||||
|
if kubectl get hr -A | grep -v " True " | grep -v NAME; then
|
||||||
|
kubectl get hr -A
|
||||||
|
fail "Some HelmReleases failed to reconcile"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Wait for Cluster‑API provider deployments" {
|
||||||
|
# Wait for Cluster‑API provider deployments
|
||||||
|
timeout 60 sh -ec 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager >/dev/null 2>&1; do sleep 1; done'
|
||||||
|
kubectl wait deployment/capi-controller-manager deployment/capi-kamaji-controller-manager deployment/capi-kubeadm-bootstrap-controller-manager deployment/capi-operator-cluster-api-operator deployment/capk-controller-manager -n cozy-cluster-api --timeout=1m --for=condition=available
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Wait for LINSTOR and configure storage" {
|
||||||
|
# Linstor controller and nodes
|
||||||
|
kubectl wait deployment/linstor-controller -n cozy-linstor --timeout=5m --for=condition=available
|
||||||
|
timeout 60 sh -ec 'until [ $(kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor node list | grep -c Online) -eq 3 ]; do sleep 1; done'
|
||||||
|
|
||||||
|
for node in srv1 srv2 srv3; do
|
||||||
|
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs ${node} /dev/vdc --pool-name data --storage-pool data
|
||||||
|
done
|
||||||
|
|
||||||
|
# Storage classes
|
||||||
|
kubectl apply -f - <<'EOF'
|
||||||
|
---
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: StorageClass
|
||||||
|
metadata:
|
||||||
|
name: local
|
||||||
|
annotations:
|
||||||
|
storageclass.kubernetes.io/is-default-class: "true"
|
||||||
|
provisioner: linstor.csi.linbit.com
|
||||||
|
parameters:
|
||||||
|
linstor.csi.linbit.com/storagePool: "data"
|
||||||
|
linstor.csi.linbit.com/layerList: "storage"
|
||||||
|
linstor.csi.linbit.com/allowRemoteVolumeAccess: "false"
|
||||||
|
volumeBindingMode: WaitForFirstConsumer
|
||||||
|
allowVolumeExpansion: true
|
||||||
|
---
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: StorageClass
|
||||||
|
metadata:
|
||||||
|
name: replicated
|
||||||
|
provisioner: linstor.csi.linbit.com
|
||||||
|
parameters:
|
||||||
|
linstor.csi.linbit.com/storagePool: "data"
|
||||||
|
linstor.csi.linbit.com/autoPlace: "3"
|
||||||
|
linstor.csi.linbit.com/layerList: "drbd storage"
|
||||||
|
linstor.csi.linbit.com/allowRemoteVolumeAccess: "true"
|
||||||
|
property.linstor.csi.linbit.com/DrbdOptions/auto-quorum: suspend-io
|
||||||
|
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-no-data-accessible: suspend-io
|
||||||
|
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-suspended-primary-outdated: force-secondary
|
||||||
|
property.linstor.csi.linbit.com/DrbdOptions/Net/rr-conflict: retry-connect
|
||||||
|
volumeBindingMode: WaitForFirstConsumer
|
||||||
|
allowVolumeExpansion: true
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Wait for MetalLB and configure address pool" {
|
||||||
|
# MetalLB address pool
|
||||||
|
kubectl apply -f - <<'EOF'
|
||||||
|
---
|
||||||
|
apiVersion: metallb.io/v1beta1
|
||||||
|
kind: L2Advertisement
|
||||||
|
metadata:
|
||||||
|
name: cozystack
|
||||||
|
namespace: cozy-metallb
|
||||||
|
spec:
|
||||||
|
ipAddressPools: [cozystack]
|
||||||
|
---
|
||||||
|
apiVersion: metallb.io/v1beta1
|
||||||
|
kind: IPAddressPool
|
||||||
|
metadata:
|
||||||
|
name: cozystack
|
||||||
|
namespace: cozy-metallb
|
||||||
|
spec:
|
||||||
|
addresses: [192.168.123.200-192.168.123.250]
|
||||||
|
autoAssign: true
|
||||||
|
avoidBuggyIPs: false
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Check Cozystack API service" {
|
||||||
|
kubectl wait --for=condition=Available apiservices/v1alpha1.apps.cozystack.io --timeout=2m
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Configure Tenant and wait for applications" {
|
||||||
|
# Patch root tenant and wait for its releases
|
||||||
|
kubectl patch tenants/root -n tenant-root --type merge -p '{"spec":{"host":"example.org","ingress":true,"monitoring":true,"etcd":true,"isolated":true}}'
|
||||||
|
|
||||||
|
timeout 60 sh -ec 'until kubectl get hr -n tenant-root etcd ingress monitoring tenant-root >/dev/null 2>&1; do sleep 1; done'
|
||||||
|
kubectl wait hr/etcd hr/ingress hr/tenant-root -n tenant-root --timeout=2m --for=condition=ready
|
||||||
|
|
||||||
|
if ! kubectl wait hr/monitoring -n tenant-root --timeout=2m --for=condition=ready; then
|
||||||
|
flux reconcile hr monitoring -n tenant-root --force
|
||||||
|
kubectl wait hr/monitoring -n tenant-root --timeout=2m --for=condition=ready
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Expose Cozystack services through ingress
|
||||||
|
kubectl patch configmap/cozystack -n cozy-system --type merge -p '{"data":{"expose-services":"api,dashboard,cdi-uploadproxy,vm-exportproxy,keycloak"}}'
|
||||||
|
|
||||||
|
# NGINX ingress controller
|
||||||
|
timeout 60 sh -ec 'until kubectl get deploy root-ingress-controller -n tenant-root >/dev/null 2>&1; do sleep 1; done'
|
||||||
|
kubectl wait deploy/root-ingress-controller -n tenant-root --timeout=5m --for=condition=available
|
||||||
|
|
||||||
|
# etcd statefulset
|
||||||
|
kubectl wait sts/etcd -n tenant-root --for=jsonpath='{.status.readyReplicas}'=3 --timeout=5m
|
||||||
|
|
||||||
|
# VictoriaMetrics components
|
||||||
|
kubectl wait vmalert/vmalert-shortterm vmalertmanager/alertmanager -n tenant-root --for=jsonpath='{.status.updateStatus}'=operational --timeout=5m
|
||||||
|
kubectl wait vlogs/generic -n tenant-root --for=jsonpath='{.status.updateStatus}'=operational --timeout=5m
|
||||||
|
kubectl wait vmcluster/shortterm vmcluster/longterm -n tenant-root --for=jsonpath='{.status.clusterStatus}'=operational --timeout=5m
|
||||||
|
|
||||||
|
# Grafana
|
||||||
|
kubectl wait clusters.postgresql.cnpg.io/grafana-db -n tenant-root --for=condition=ready --timeout=5m
|
||||||
|
kubectl wait deploy/grafana-deployment -n tenant-root --for=condition=available --timeout=5m
|
||||||
|
|
||||||
|
# Verify Grafana via ingress
|
||||||
|
ingress_ip=$(kubectl get svc root-ingress-controller -n tenant-root -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||||
|
if ! curl -sS -k "https://${ingress_ip}" -H 'Host: grafana.example.org' --max-time 30 | grep -q Found; then
|
||||||
|
echo "Failed to access Grafana via ingress at ${ingress_ip}" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Keycloak OIDC stack is healthy" {
|
||||||
|
kubectl patch configmap/cozystack -n cozy-system --type merge -p '{"data":{"oidc-enabled":"true"}}'
|
||||||
|
|
||||||
|
timeout 120 sh -ec 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator >/dev/null 2>&1; do sleep 1; done'
|
||||||
|
kubectl wait hr/keycloak hr/keycloak-configure hr/keycloak-operator -n cozy-keycloak --timeout=10m --for=condition=ready
|
||||||
|
}
|
||||||
@@ -1,12 +1,5 @@
|
|||||||
#!/usr/bin/env bats
|
#!/usr/bin/env bats
|
||||||
|
|
||||||
@test "Required installer assets exist" {
|
|
||||||
if [ ! -f _out/assets/cozystack-installer.yaml ]; then
|
|
||||||
echo "Missing: _out/assets/cozystack-installer.yaml" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "Install Cozystack" {
|
@test "Install Cozystack" {
|
||||||
# Create namespace & configmap required by installer
|
# Create namespace & configmap required by installer
|
||||||
kubectl create namespace cozy-system --dry-run=client -o yaml | kubectl apply -f -
|
kubectl create namespace cozy-system --dry-run=client -o yaml | kubectl apply -f -
|
||||||
@@ -27,14 +20,14 @@
|
|||||||
kubectl wait deployment/cozystack -n cozy-system --timeout=1m --for=condition=Available
|
kubectl wait deployment/cozystack -n cozy-system --timeout=1m --for=condition=Available
|
||||||
|
|
||||||
# Wait until HelmReleases appear & reconcile them
|
# Wait until HelmReleases appear & reconcile them
|
||||||
timeout 60 sh -ec 'until kubectl get hr -A -l cozystack.io/system-app=true | grep -q cozys; do sleep 1; done'
|
timeout 60 sh -ec 'until kubectl get hr -A | grep -q cozys; do sleep 1; done'
|
||||||
sleep 5
|
sleep 5
|
||||||
kubectl get hr -A -l cozystack.io/system-app=true | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n "$1" hr/"$2" &"} END {print "wait"}' | sh -ex
|
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n "$1" hr/"$2" &"} END {print "wait"}' | sh -ex
|
||||||
|
|
||||||
# Fail the test if any HelmRelease is not Ready
|
# Fail the test if any HelmRelease is not Ready
|
||||||
if kubectl get hr -A | grep -v " True " | grep -v NAME; then
|
if kubectl get hr -A | grep -v " True " | grep -v NAME; then
|
||||||
kubectl get hr -A
|
kubectl get hr -A
|
||||||
echo "Some HelmReleases failed to reconcile" >&2
|
fail "Some HelmReleases failed to reconcile"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -49,11 +42,7 @@
|
|||||||
kubectl wait deployment/linstor-controller -n cozy-linstor --timeout=5m --for=condition=available
|
kubectl wait deployment/linstor-controller -n cozy-linstor --timeout=5m --for=condition=available
|
||||||
timeout 60 sh -ec 'until [ $(kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor node list | grep -c Online) -eq 3 ]; do sleep 1; done'
|
timeout 60 sh -ec 'until [ $(kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor node list | grep -c Online) -eq 3 ]; do sleep 1; done'
|
||||||
|
|
||||||
created_pools=$(kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor sp l -s data --pastable | awk '$2 == "data" {printf " " $4} END{printf " "}')
|
|
||||||
for node in srv1 srv2 srv3; do
|
for node in srv1 srv2 srv3; do
|
||||||
case $created_pools in
|
|
||||||
*" $node "*) echo "Storage pool 'data' already exists on node $node"; continue;;
|
|
||||||
esac
|
|
||||||
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs ${node} /dev/vdc --pool-name data --storage-pool data
|
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs ${node} /dev/vdc --pool-name data --storage-pool data
|
||||||
done
|
done
|
||||||
|
|
||||||
@@ -166,24 +155,3 @@ EOF
|
|||||||
timeout 120 sh -ec 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator >/dev/null 2>&1; do sleep 1; done'
|
timeout 120 sh -ec 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator >/dev/null 2>&1; do sleep 1; done'
|
||||||
kubectl wait hr/keycloak hr/keycloak-configure hr/keycloak-operator -n cozy-keycloak --timeout=10m --for=condition=ready
|
kubectl wait hr/keycloak hr/keycloak-configure hr/keycloak-operator -n cozy-keycloak --timeout=10m --for=condition=ready
|
||||||
}
|
}
|
||||||
|
|
||||||
@test "Create tenant with isolated mode enabled" {
|
|
||||||
kubectl -n tenant-root get tenants.apps.cozystack.io test ||
|
|
||||||
kubectl apply -f - <<EOF
|
|
||||||
apiVersion: apps.cozystack.io/v1alpha1
|
|
||||||
kind: Tenant
|
|
||||||
metadata:
|
|
||||||
name: test
|
|
||||||
namespace: tenant-root
|
|
||||||
spec:
|
|
||||||
etcd: false
|
|
||||||
host: ""
|
|
||||||
ingress: false
|
|
||||||
isolated: true
|
|
||||||
monitoring: false
|
|
||||||
resourceQuotas: {}
|
|
||||||
seaweedfs: false
|
|
||||||
EOF
|
|
||||||
kubectl wait hr/tenant-test -n tenant-root --timeout=1m --for=condition=ready
|
|
||||||
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -4,6 +4,11 @@
|
|||||||
# -----------------------------------------------------------------------------
|
# -----------------------------------------------------------------------------
|
||||||
|
|
||||||
@test "Required installer assets exist" {
|
@test "Required installer assets exist" {
|
||||||
|
if [ ! -f _out/assets/cozystack-installer.yaml ]; then
|
||||||
|
echo "Missing: _out/assets/cozystack-installer.yaml" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
if [ ! -f _out/assets/nocloud-amd64.raw.xz ]; then
|
if [ ! -f _out/assets/nocloud-amd64.raw.xz ]; then
|
||||||
echo "Missing: _out/assets/nocloud-amd64.raw.xz" >&2
|
echo "Missing: _out/assets/nocloud-amd64.raw.xz" >&2
|
||||||
exit 1
|
exit 1
|
||||||
@@ -136,25 +141,7 @@ machine:
|
|||||||
mirrors:
|
mirrors:
|
||||||
docker.io:
|
docker.io:
|
||||||
endpoints:
|
endpoints:
|
||||||
- https://dockerio.nexus.lllamnyp.su
|
- https://mirror.gcr.io
|
||||||
cr.fluentbit.io:
|
|
||||||
endpoints:
|
|
||||||
- https://fluentbit.nexus.lllamnyp.su
|
|
||||||
docker-registry3.mariadb.com:
|
|
||||||
endpoints:
|
|
||||||
- https://mariadb.nexus.lllamnyp.su
|
|
||||||
gcr.io:
|
|
||||||
endpoints:
|
|
||||||
- https://gcr.nexus.lllamnyp.su
|
|
||||||
ghcr.io:
|
|
||||||
endpoints:
|
|
||||||
- https://ghcr.nexus.lllamnyp.su
|
|
||||||
quay.io:
|
|
||||||
endpoints:
|
|
||||||
- https://quay.nexus.lllamnyp.su
|
|
||||||
registry.k8s.io:
|
|
||||||
endpoints:
|
|
||||||
- https://k8s.nexus.lllamnyp.su
|
|
||||||
files:
|
files:
|
||||||
- content: |
|
- content: |
|
||||||
[plugins]
|
[plugins]
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.11.0
|
version: 0.10.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ include ../../../scripts/package.mk
|
|||||||
|
|
||||||
generate:
|
generate:
|
||||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||||
yq -i -o json --indent 4 '.properties.resourcesPreset.enum = ["none", "nano", "micro", "small", "medium", "large", "xlarge", "2xlarge"]' values.schema.json
|
|
||||||
|
|
||||||
image:
|
image:
|
||||||
docker buildx build images/clickhouse-backup \
|
docker buildx build images/clickhouse-backup \
|
||||||
|
|||||||
@@ -1,19 +1,18 @@
|
|||||||
# Managed ClickHouse Service
|
# Managed Clickhouse Service
|
||||||
|
|
||||||
ClickHouse is an open source high-performance and column-oriented SQL database management system (DBMS).
|
ClickHouse is an open source high-performance and column-oriented SQL database management system (DBMS).
|
||||||
It is used for online analytical processing (OLAP).
|
It is used for online analytical processing (OLAP).
|
||||||
|
Cozystack platform uses Altinity operator to provide ClickHouse.
|
||||||
|
|
||||||
### How to restore backup from S3
|
### How to restore backup:
|
||||||
|
|
||||||
1. Find the snapshot:
|
1. Find a snapshot:
|
||||||
|
```
|
||||||
```bash
|
|
||||||
restic -r s3:s3.example.org/clickhouse-backups/table_name snapshots
|
restic -r s3:s3.example.org/clickhouse-backups/table_name snapshots
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Restore it:
|
2. Restore it:
|
||||||
|
```
|
||||||
```bash
|
|
||||||
restic -r s3:s3.example.org/clickhouse-backups/table_name restore latest --target /tmp/
|
restic -r s3:s3.example.org/clickhouse-backups/table_name restore latest --target /tmp/
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -41,7 +40,7 @@ For more details, read [Restic: Effective Backup from Stdin](https://blog.aenix.
|
|||||||
### Backup parameters
|
### Backup parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ------------------------ | --------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ |
|
| ------------------------ | --------------------------------------------------------------------------- | ------------------------------------------------------ |
|
||||||
| `backup.enabled` | Enable periodic backups | `false` |
|
| `backup.enabled` | Enable periodic backups | `false` |
|
||||||
| `backup.s3Region` | AWS S3 region where backups are stored | `us-east-1` |
|
| `backup.s3Region` | AWS S3 region where backups are stored | `us-east-1` |
|
||||||
| `backup.s3Bucket` | S3 bucket used for storing backups | `s3.example.org/clickhouse-backups` |
|
| `backup.s3Bucket` | S3 bucket used for storing backups | `s3.example.org/clickhouse-backups` |
|
||||||
@@ -50,31 +49,22 @@ For more details, read [Restic: Effective Backup from Stdin](https://blog.aenix.
|
|||||||
| `backup.s3AccessKey` | Access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
| `backup.s3AccessKey` | Access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||||
| `backup.s3SecretKey` | Secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
| `backup.s3SecretKey` | Secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||||
| `backup.resticPassword` | Password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
| `backup.resticPassword` | Password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
||||||
| `resources` | Explicit CPU and memory configuration for each ClickHouse replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
|
| `resources` | Explicit CPU/memory resource requests and limits for the Clickhouse service | `{}` |
|
||||||
| `resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `small` |
|
| `resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `nano` |
|
||||||
|
|
||||||
## Parameter examples and reference
|
|
||||||
|
|
||||||
### resources and resourcesPreset
|
In production environments, it's recommended to set `resources` explicitly.
|
||||||
|
Example of `resources`:
|
||||||
`resources` sets explicit CPU and memory configurations for each replica.
|
|
||||||
When left empty, the preset defined in `resourcesPreset` is applied.
|
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
resources:
|
resources:
|
||||||
|
limits:
|
||||||
cpu: 4000m
|
cpu: 4000m
|
||||||
memory: 4Gi
|
memory: 4Gi
|
||||||
|
requests:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 512Mi
|
||||||
```
|
```
|
||||||
|
|
||||||
`resourcesPreset` sets named CPU and memory configurations for each replica.
|
Allowed values for `resourcesPreset` are `none`, `nano`, `micro`, `small`, `medium`, `large`, `xlarge`, `2xlarge`.
|
||||||
This setting is ignored if the corresponding `resources` value is set.
|
This value is ignored if `resources` value is set.
|
||||||
|
|
||||||
| Preset name | CPU | memory |
|
|
||||||
|-------------|--------|---------|
|
|
||||||
| `nano` | `250m` | `128Mi` |
|
|
||||||
| `micro` | `500m` | `256Mi` |
|
|
||||||
| `small` | `1` | `512Mi` |
|
|
||||||
| `medium` | `1` | `1Gi` |
|
|
||||||
| `large` | `3` | `2Gi` |
|
|
||||||
| `xlarge` | `4` | `4Gi` |
|
|
||||||
| `2xlarge` | `8` | `8Gi` |
|
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
ghcr.io/cozystack/cozystack/clickhouse-backup:0.11.0@sha256:3faf7a4cebf390b9053763107482de175aa0fdb88c1e77424fd81100b1c3a205
|
ghcr.io/cozystack/cozystack/clickhouse-backup:0.10.0@sha256:3faf7a4cebf390b9053763107482de175aa0fdb88c1e77424fd81100b1c3a205
|
||||||
|
|||||||
@@ -132,7 +132,11 @@ spec:
|
|||||||
containers:
|
containers:
|
||||||
- name: clickhouse
|
- name: clickhouse
|
||||||
image: clickhouse/clickhouse-server:24.9.2.42
|
image: clickhouse/clickhouse-server:24.9.2.42
|
||||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.resourcesPreset .Values.resources $) | nindent 16 }}
|
{{- if .Values.resources }}
|
||||||
|
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.resources $) | nindent 16 }}
|
||||||
|
{{- else if ne .Values.resourcesPreset "none" }}
|
||||||
|
resources: {{- include "cozy-lib.resources.preset" (list .Values.resourcesPreset $) | nindent 16 }}
|
||||||
|
{{- end }}
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: data-volume-template
|
- name: data-volume-template
|
||||||
mountPath: /var/lib/clickhouse
|
mountPath: /var/lib/clickhouse
|
||||||
|
|||||||
@@ -79,23 +79,13 @@
|
|||||||
},
|
},
|
||||||
"resources": {
|
"resources": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"description": "Explicit CPU and memory configuration for each ClickHouse replica. When left empty, the preset defined in `resourcesPreset` is applied.",
|
"description": "Explicit CPU/memory resource requests and limits for the Clickhouse service",
|
||||||
"default": {}
|
"default": {}
|
||||||
},
|
},
|
||||||
"resourcesPreset": {
|
"resourcesPreset": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.",
|
"description": "Use a common resources preset when `resources` is not set explicitly.",
|
||||||
"default": "small",
|
"default": "nano"
|
||||||
"enum": [
|
|
||||||
"none",
|
|
||||||
"nano",
|
|
||||||
"micro",
|
|
||||||
"small",
|
|
||||||
"medium",
|
|
||||||
"large",
|
|
||||||
"xlarge",
|
|
||||||
"2xlarge"
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -47,11 +47,15 @@ backup:
|
|||||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||||
|
|
||||||
## @param resources Explicit CPU and memory configuration for each ClickHouse replica. When left empty, the preset defined in `resourcesPreset` is applied.
|
## @param resources Explicit CPU/memory resource requests and limits for the Clickhouse service
|
||||||
resources: {}
|
resources: {}
|
||||||
# resources:
|
# resources:
|
||||||
|
# limits:
|
||||||
# cpu: 4000m
|
# cpu: 4000m
|
||||||
# memory: 4Gi
|
# memory: 4Gi
|
||||||
|
# requests:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 512Mi
|
||||||
|
|
||||||
## @param resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.
|
## @param resourcesPreset Use a common resources preset when `resources` is not set explicitly.
|
||||||
resourcesPreset: "small"
|
resourcesPreset: "nano"
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.8.0
|
version: 0.7.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
|
|||||||
@@ -2,4 +2,3 @@ include ../../../scripts/package.mk
|
|||||||
|
|
||||||
generate:
|
generate:
|
||||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||||
yq -i -o json --indent 4 '.properties.resourcesPreset.enum = ["none", "nano", "micro", "small", "medium", "large", "xlarge", "2xlarge"]' values.schema.json
|
|
||||||
|
|||||||
@@ -1,21 +1,17 @@
|
|||||||
# Managed FerretDB Service
|
# Managed FerretDB Service
|
||||||
|
|
||||||
FerretDB is an open source MongoDB alternative.
|
|
||||||
It translates MongoDB wire protocol queries to SQL and can be used as a direct replacement for MongoDB 5.0+.
|
|
||||||
Internally, FerretDB service is backed by Postgres.
|
|
||||||
|
|
||||||
## Parameters
|
## Parameters
|
||||||
|
|
||||||
### Common parameters
|
### Common parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ------------------------ | --------------------------------------------------------------------------------------------------------------------------- | ------- |
|
| ------------------------ | ----------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||||
| `external` | Enable external access from outside the cluster | `false` |
|
| `external` | Enable external access from outside the cluster | `false` |
|
||||||
| `size` | Persistent Volume size | `10Gi` |
|
| `size` | Persistent Volume size | `10Gi` |
|
||||||
| `replicas` | Number of replicas | `2` |
|
| `replicas` | Number of Postgres replicas | `2` |
|
||||||
| `storageClass` | StorageClass used to store the data | `""` |
|
| `storageClass` | StorageClass used to store the data | `""` |
|
||||||
| `quorum.minSyncReplicas` | Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed | `0` |
|
| `quorum.minSyncReplicas` | Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed. | `0` |
|
||||||
| `quorum.maxSyncReplicas` | Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the total number of replicas) | `0` |
|
| `quorum.maxSyncReplicas` | Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the number of instances). | `0` |
|
||||||
|
|
||||||
### Configuration parameters
|
### Configuration parameters
|
||||||
|
|
||||||
@@ -26,8 +22,8 @@ Internally, FerretDB service is backed by Postgres.
|
|||||||
### Backup parameters
|
### Backup parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ |
|
| ------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ |
|
||||||
| `backup.enabled` | Enable periodic backups | `false` |
|
| `backup.enabled` | Enable pereiodic backups | `false` |
|
||||||
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
|
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
|
||||||
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/postgres-backups` |
|
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/postgres-backups` |
|
||||||
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
|
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
|
||||||
@@ -35,33 +31,7 @@ Internally, FerretDB service is backed by Postgres.
|
|||||||
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||||
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||||
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
||||||
| `resources` | Explicit CPU and memory configuration for each FerretDB replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
|
| `resources` | Resources | `{}` |
|
||||||
| `resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `nano` |
|
| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Parameter examples and reference
|
|
||||||
|
|
||||||
### resources and resourcesPreset
|
|
||||||
|
|
||||||
`resources` sets explicit CPU and memory configurations for each replica.
|
|
||||||
When left empty, the preset defined in `resourcesPreset` is applied.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
resources:
|
|
||||||
cpu: 4000m
|
|
||||||
memory: 4Gi
|
|
||||||
```
|
|
||||||
|
|
||||||
`resourcesPreset` sets named CPU and memory configurations for each replica.
|
|
||||||
This setting is ignored if the corresponding `resources` value is set.
|
|
||||||
|
|
||||||
| Preset name | CPU | memory |
|
|
||||||
|-------------|--------|---------|
|
|
||||||
| `nano` | `250m` | `128Mi` |
|
|
||||||
| `micro` | `500m` | `256Mi` |
|
|
||||||
| `small` | `1` | `512Mi` |
|
|
||||||
| `medium` | `1` | `1Gi` |
|
|
||||||
| `large` | `3` | `2Gi` |
|
|
||||||
| `xlarge` | `4` | `4Gi` |
|
|
||||||
| `2xlarge` | `8` | `8Gi` |
|
|
||||||
|
|||||||
@@ -18,7 +18,11 @@ spec:
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
minSyncReplicas: {{ .Values.quorum.minSyncReplicas }}
|
minSyncReplicas: {{ .Values.quorum.minSyncReplicas }}
|
||||||
maxSyncReplicas: {{ .Values.quorum.maxSyncReplicas }}
|
maxSyncReplicas: {{ .Values.quorum.maxSyncReplicas }}
|
||||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.resourcesPreset .Values.resources $) | nindent 4 }}
|
{{- if .Values.resources }}
|
||||||
|
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.resources $) | nindent 4 }}
|
||||||
|
{{- else if ne .Values.resourcesPreset "none" }}
|
||||||
|
resources: {{- include "cozy-lib.resources.preset" (list .Values.resourcesPreset $) | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
monitoring:
|
monitoring:
|
||||||
enablePodMonitor: true
|
enablePodMonitor: true
|
||||||
|
|
||||||
|
|||||||
@@ -14,7 +14,7 @@
|
|||||||
},
|
},
|
||||||
"replicas": {
|
"replicas": {
|
||||||
"type": "number",
|
"type": "number",
|
||||||
"description": "Number of replicas",
|
"description": "Number of Postgres replicas",
|
||||||
"default": 2
|
"default": 2
|
||||||
},
|
},
|
||||||
"storageClass": {
|
"storageClass": {
|
||||||
@@ -27,12 +27,12 @@
|
|||||||
"properties": {
|
"properties": {
|
||||||
"minSyncReplicas": {
|
"minSyncReplicas": {
|
||||||
"type": "number",
|
"type": "number",
|
||||||
"description": "Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed",
|
"description": "Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed.",
|
||||||
"default": 0
|
"default": 0
|
||||||
},
|
},
|
||||||
"maxSyncReplicas": {
|
"maxSyncReplicas": {
|
||||||
"type": "number",
|
"type": "number",
|
||||||
"description": "Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the total number of replicas)",
|
"description": "Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the number of instances).",
|
||||||
"default": 0
|
"default": 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -42,7 +42,7 @@
|
|||||||
"properties": {
|
"properties": {
|
||||||
"enabled": {
|
"enabled": {
|
||||||
"type": "boolean",
|
"type": "boolean",
|
||||||
"description": "Enable periodic backups",
|
"description": "Enable pereiodic backups",
|
||||||
"default": false
|
"default": false
|
||||||
},
|
},
|
||||||
"s3Region": {
|
"s3Region": {
|
||||||
@@ -84,23 +84,13 @@
|
|||||||
},
|
},
|
||||||
"resources": {
|
"resources": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"description": "Explicit CPU and memory configuration for each FerretDB replica. When left empty, the preset defined in `resourcesPreset` is applied.",
|
"description": "Resources",
|
||||||
"default": {}
|
"default": {}
|
||||||
},
|
},
|
||||||
"resourcesPreset": {
|
"resourcesPreset": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.",
|
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||||
"default": "nano",
|
"default": "nano"
|
||||||
"enum": [
|
|
||||||
"none",
|
|
||||||
"nano",
|
|
||||||
"micro",
|
|
||||||
"small",
|
|
||||||
"medium",
|
|
||||||
"large",
|
|
||||||
"xlarge",
|
|
||||||
"2xlarge"
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
## @param external Enable external access from outside the cluster
|
## @param external Enable external access from outside the cluster
|
||||||
## @param size Persistent Volume size
|
## @param size Persistent Volume size
|
||||||
## @param replicas Number of replicas
|
## @param replicas Number of Postgres replicas
|
||||||
## @param storageClass StorageClass used to store the data
|
## @param storageClass StorageClass used to store the data
|
||||||
##
|
##
|
||||||
external: false
|
external: false
|
||||||
@@ -11,8 +11,8 @@ replicas: 2
|
|||||||
storageClass: ""
|
storageClass: ""
|
||||||
|
|
||||||
## Configuration for the quorum-based synchronous replication
|
## Configuration for the quorum-based synchronous replication
|
||||||
## @param quorum.minSyncReplicas Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed
|
## @param quorum.minSyncReplicas Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed.
|
||||||
## @param quorum.maxSyncReplicas Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the total number of replicas)
|
## @param quorum.maxSyncReplicas Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the number of instances).
|
||||||
quorum:
|
quorum:
|
||||||
minSyncReplicas: 0
|
minSyncReplicas: 0
|
||||||
maxSyncReplicas: 0
|
maxSyncReplicas: 0
|
||||||
@@ -31,7 +31,7 @@ users: {}
|
|||||||
|
|
||||||
## @section Backup parameters
|
## @section Backup parameters
|
||||||
|
|
||||||
## @param backup.enabled Enable periodic backups
|
## @param backup.enabled Enable pereiodic backups
|
||||||
## @param backup.s3Region The AWS S3 region where backups are stored
|
## @param backup.s3Region The AWS S3 region where backups are stored
|
||||||
## @param backup.s3Bucket The S3 bucket used for storing backups
|
## @param backup.s3Bucket The S3 bucket used for storing backups
|
||||||
## @param backup.schedule Cron schedule for automated backups
|
## @param backup.schedule Cron schedule for automated backups
|
||||||
@@ -49,11 +49,15 @@ backup:
|
|||||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||||
|
|
||||||
## @param resources Explicit CPU and memory configuration for each FerretDB replica. When left empty, the preset defined in `resourcesPreset` is applied.
|
## @param resources Resources
|
||||||
resources: {}
|
resources: {}
|
||||||
# resources:
|
# resources:
|
||||||
|
# limits:
|
||||||
# cpu: 4000m
|
# cpu: 4000m
|
||||||
# memory: 4Gi
|
# memory: 4Gi
|
||||||
|
# requests:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 512Mi
|
||||||
|
|
||||||
## @param resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.
|
## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||||
resourcesPreset: "nano"
|
resourcesPreset: "nano"
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.6.0
|
version: 0.5.1
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
|
|||||||
@@ -23,8 +23,6 @@ image-nginx:
|
|||||||
|
|
||||||
generate:
|
generate:
|
||||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||||
yq -i -o json --indent 4 '.properties.haproxy.properties.resourcesPreset.enum = ["none", "nano", "micro", "small", "medium", "large", "xlarge", "2xlarge"]' values.schema.json
|
|
||||||
yq -i -o json --indent 4 '.properties.nginx.properties.resourcesPreset.enum = ["none", "nano", "micro", "small", "medium", "large", "xlarge", "2xlarge"]' values.schema.json
|
|
||||||
|
|
||||||
update:
|
update:
|
||||||
tag=$$(git ls-remote --tags --sort="v:refname" https://github.com/chrislim2888/IP2Location-C-Library | awk -F'[/^]' 'END{print $$3}') && \
|
tag=$$(git ls-remote --tags --sort="v:refname" https://github.com/chrislim2888/IP2Location-C-Library | awk -F'[/^]' 'END{print $$3}') && \
|
||||||
|
|||||||
@@ -1,9 +1,8 @@
|
|||||||
# Managed Nginx-based HTTP Cache Service
|
# Managed Nginx Caching Service
|
||||||
|
|
||||||
The Nginx-based HTTP caching service is designed to optimize web traffic and enhance web application performance.
|
The Nginx Caching Service is designed to optimize web traffic and enhance web application performance. This service combines custom-built Nginx instances with HAproxy for efficient caching and load balancing.
|
||||||
This service combines custom-built Nginx instances with HAProxy for efficient caching and load balancing.
|
|
||||||
|
|
||||||
## Deployment information
|
## Deployment infromation
|
||||||
|
|
||||||
The Nginx instances include the following modules and features:
|
The Nginx instances include the following modules and features:
|
||||||
|
|
||||||
@@ -54,67 +53,27 @@ The deployment architecture is illustrated in the diagram below:
|
|||||||
|
|
||||||
## Known issues
|
## Known issues
|
||||||
|
|
||||||
- VTS module shows wrong upstream response time, [github.com/vozlt/nginx-module-vts#198](https://github.com/vozlt/nginx-module-vts/issues/198)
|
VTS module shows wrong upstream resonse time
|
||||||
|
- https://github.com/vozlt/nginx-module-vts/issues/198
|
||||||
|
|
||||||
## Parameters
|
## Parameters
|
||||||
|
|
||||||
### Common parameters
|
### Common parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ------------------------- | ------------------------------------------------------------------------------------------------------------------------------------ | ------- |
|
| ------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||||
| `external` | Enable external access from outside the cluster | `false` |
|
| `external` | Enable external access from outside the cluster | `false` |
|
||||||
| `size` | Persistent Volume size | `10Gi` |
|
| `size` | Persistent Volume size | `10Gi` |
|
||||||
| `storageClass` | StorageClass used to store the data | `""` |
|
| `storageClass` | StorageClass used to store the data | `""` |
|
||||||
| `haproxy.replicas` | Number of HAProxy replicas | `2` |
|
| `haproxy.replicas` | Number of HAProxy replicas | `2` |
|
||||||
| `nginx.replicas` | Number of Nginx replicas | `2` |
|
| `nginx.replicas` | Number of Nginx replicas | `2` |
|
||||||
| `haproxy.resources` | Explicit CPU and memory configuration for each HAProxy replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
|
| `haproxy.resources` | Resources | `{}` |
|
||||||
| `haproxy.resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `nano` |
|
| `haproxy.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||||
| `nginx.resources` | Explicit CPU and memory configuration for each nginx replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
|
| `nginx.resources` | Resources | `{}` |
|
||||||
| `nginx.resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `nano` |
|
| `nginx.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||||
|
|
||||||
### Configuration parameters
|
### Configuration parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ----------- | ----------------------- | ----- |
|
| ----------- | ----------------------- | ----- |
|
||||||
| `endpoints` | Endpoints configuration | `[]` |
|
| `endpoints` | Endpoints configuration | `[]` |
|
||||||
|
|
||||||
## Parameter examples and reference
|
|
||||||
|
|
||||||
### resources and resourcesPreset
|
|
||||||
|
|
||||||
`resources` sets explicit CPU and memory configurations for each replica.
|
|
||||||
When left empty, the preset defined in `resourcesPreset` is applied.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
resources:
|
|
||||||
cpu: 4000m
|
|
||||||
memory: 4Gi
|
|
||||||
```
|
|
||||||
|
|
||||||
`resourcesPreset` sets named CPU and memory configurations for each replica.
|
|
||||||
This setting is ignored if the corresponding `resources` value is set.
|
|
||||||
|
|
||||||
| Preset name | CPU | memory |
|
|
||||||
|-------------|--------|---------|
|
|
||||||
| `nano` | `250m` | `128Mi` |
|
|
||||||
| `micro` | `500m` | `256Mi` |
|
|
||||||
| `small` | `1` | `512Mi` |
|
|
||||||
| `medium` | `1` | `1Gi` |
|
|
||||||
| `large` | `3` | `2Gi` |
|
|
||||||
| `xlarge` | `4` | `4Gi` |
|
|
||||||
| `2xlarge` | `8` | `8Gi` |
|
|
||||||
|
|
||||||
|
|
||||||
### endpoints
|
|
||||||
|
|
||||||
`endpoints` is a flat list of IP addresses:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
endpoints:
|
|
||||||
- 10.100.3.1:80
|
|
||||||
- 10.100.3.11:80
|
|
||||||
- 10.100.3.2:80
|
|
||||||
- 10.100.3.12:80
|
|
||||||
- 10.100.3.3:80
|
|
||||||
- 10.100.3.13:80
|
|
||||||
```
|
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
ghcr.io/cozystack/cozystack/nginx-cache:0.6.0@sha256:b7633717cd7449c0042ae92d8ca9b36e4d69566561f5c7d44e21058e7d05c6d5
|
ghcr.io/cozystack/cozystack/nginx-cache:0.5.1@sha256:50ac1581e3100bd6c477a71161cb455a341ffaf9e5e2f6086802e4e25271e8af
|
||||||
|
|||||||
@@ -33,7 +33,11 @@ spec:
|
|||||||
containers:
|
containers:
|
||||||
- image: haproxy:latest
|
- image: haproxy:latest
|
||||||
name: haproxy
|
name: haproxy
|
||||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.haproxy.resourcesPreset .Values.haproxy.resources $) | nindent 10 }}
|
{{- if .Values.haproxy.resources }}
|
||||||
|
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.haproxy.resources $) | nindent 10 }}
|
||||||
|
{{- else if ne .Values.haproxy.resourcesPreset "none" }}
|
||||||
|
resources: {{- include "cozy-lib.resources.preset" (list .Values.haproxy.resourcesPreset $) | nindent 10 }}
|
||||||
|
{{- end }}
|
||||||
ports:
|
ports:
|
||||||
- containerPort: 8080
|
- containerPort: 8080
|
||||||
name: http
|
name: http
|
||||||
|
|||||||
@@ -52,7 +52,11 @@ spec:
|
|||||||
shareProcessNamespace: true
|
shareProcessNamespace: true
|
||||||
containers:
|
containers:
|
||||||
- name: nginx
|
- name: nginx
|
||||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list $.Values.nginx.resourcesPreset $.Values.nginx.resources $) | nindent 10 }}
|
{{- if $.Values.nginx.resources }}
|
||||||
|
resources: {{- include "cozy-lib.resources.sanitize" (list $.Values.nginx.resources $) | nindent 10 }}
|
||||||
|
{{- else if ne $.Values.nginx.resourcesPreset "none" }}
|
||||||
|
resources: {{- include "cozy-lib.resources.preset" (list $.Values.nginx.resourcesPreset $) | nindent 10 }}
|
||||||
|
{{- end }}
|
||||||
image: "{{ $.Files.Get "images/nginx-cache.tag" | trim }}"
|
image: "{{ $.Files.Get "images/nginx-cache.tag" | trim }}"
|
||||||
readinessProbe:
|
readinessProbe:
|
||||||
httpGet:
|
httpGet:
|
||||||
|
|||||||
@@ -27,23 +27,13 @@
|
|||||||
},
|
},
|
||||||
"resources": {
|
"resources": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"description": "Explicit CPU and memory configuration for each HAProxy replica. When left empty, the preset defined in `resourcesPreset` is applied.",
|
"description": "Resources",
|
||||||
"default": {}
|
"default": {}
|
||||||
},
|
},
|
||||||
"resourcesPreset": {
|
"resourcesPreset": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.",
|
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||||
"default": "nano",
|
"default": "nano"
|
||||||
"enum": [
|
|
||||||
"none",
|
|
||||||
"nano",
|
|
||||||
"micro",
|
|
||||||
"small",
|
|
||||||
"medium",
|
|
||||||
"large",
|
|
||||||
"xlarge",
|
|
||||||
"2xlarge"
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@@ -57,23 +47,13 @@
|
|||||||
},
|
},
|
||||||
"resources": {
|
"resources": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"description": "Explicit CPU and memory configuration for each nginx replica. When left empty, the preset defined in `resourcesPreset` is applied.",
|
"description": "Resources",
|
||||||
"default": {}
|
"default": {}
|
||||||
},
|
},
|
||||||
"resourcesPreset": {
|
"resourcesPreset": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.",
|
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||||
"default": "nano",
|
"default": "nano"
|
||||||
"enum": [
|
|
||||||
"none",
|
|
||||||
"nano",
|
|
||||||
"micro",
|
|
||||||
"small",
|
|
||||||
"medium",
|
|
||||||
"large",
|
|
||||||
"xlarge",
|
|
||||||
"2xlarge"
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -12,23 +12,31 @@ size: 10Gi
|
|||||||
storageClass: ""
|
storageClass: ""
|
||||||
haproxy:
|
haproxy:
|
||||||
replicas: 2
|
replicas: 2
|
||||||
## @param haproxy.resources Explicit CPU and memory configuration for each HAProxy replica. When left empty, the preset defined in `resourcesPreset` is applied.
|
## @param haproxy.resources Resources
|
||||||
resources: {}
|
resources: {}
|
||||||
# resources:
|
# resources:
|
||||||
|
# limits:
|
||||||
# cpu: 4000m
|
# cpu: 4000m
|
||||||
# memory: 4Gi
|
# memory: 4Gi
|
||||||
|
# requests:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 512Mi
|
||||||
|
|
||||||
## @param haproxy.resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.
|
## @param haproxy.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||||
resourcesPreset: "nano"
|
resourcesPreset: "nano"
|
||||||
nginx:
|
nginx:
|
||||||
replicas: 2
|
replicas: 2
|
||||||
## @param nginx.resources Explicit CPU and memory configuration for each nginx replica. When left empty, the preset defined in `resourcesPreset` is applied.
|
## @param nginx.resources Resources
|
||||||
resources: {}
|
resources: {}
|
||||||
# resources:
|
# resources:
|
||||||
|
# limits:
|
||||||
# cpu: 4000m
|
# cpu: 4000m
|
||||||
# memory: 4Gi
|
# memory: 4Gi
|
||||||
|
# requests:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 512Mi
|
||||||
|
|
||||||
## @param nginx.resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.
|
## @param nginx.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||||
resourcesPreset: "nano"
|
resourcesPreset: "nano"
|
||||||
|
|
||||||
## @section Configuration parameters
|
## @section Configuration parameters
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.8.0
|
version: 0.7.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
|
|||||||
@@ -2,5 +2,3 @@ include ../../../scripts/package.mk
|
|||||||
|
|
||||||
generate:
|
generate:
|
||||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||||
yq -i -o json --indent 4 '.properties.kafka.properties.resourcesPreset.enum = ["none", "nano", "micro", "small", "medium", "large", "xlarge", "2xlarge"]' values.schema.json
|
|
||||||
yq -i -o json --indent 4 '.properties.zookeeper.properties.resourcesPreset.enum = ["none", "nano", "micro", "small", "medium", "large", "xlarge", "2xlarge"]' values.schema.json
|
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
### Common parameters
|
### Common parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| --------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
| --------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||||
| `external` | Enable external access from outside the cluster | `false` |
|
| `external` | Enable external access from outside the cluster | `false` |
|
||||||
| `kafka.size` | Persistent Volume size for Kafka | `10Gi` |
|
| `kafka.size` | Persistent Volume size for Kafka | `10Gi` |
|
||||||
| `kafka.replicas` | Number of Kafka replicas | `3` |
|
| `kafka.replicas` | Number of Kafka replicas | `3` |
|
||||||
@@ -13,58 +13,13 @@
|
|||||||
| `zookeeper.size` | Persistent Volume size for ZooKeeper | `5Gi` |
|
| `zookeeper.size` | Persistent Volume size for ZooKeeper | `5Gi` |
|
||||||
| `zookeeper.replicas` | Number of ZooKeeper replicas | `3` |
|
| `zookeeper.replicas` | Number of ZooKeeper replicas | `3` |
|
||||||
| `zookeeper.storageClass` | StorageClass used to store the ZooKeeper data | `""` |
|
| `zookeeper.storageClass` | StorageClass used to store the ZooKeeper data | `""` |
|
||||||
| `kafka.resources` | Explicit CPU and memory configuration for each Kafka replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
|
| `kafka.resources` | Resources | `{}` |
|
||||||
| `kafka.resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `small` |
|
| `kafka.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `small` |
|
||||||
| `zookeeper.resources` | Explicit CPU and memory configuration for each Zookeeper replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
|
| `zookeeper.resources` | Resources | `{}` |
|
||||||
| `zookeeper.resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `small` |
|
| `zookeeper.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` |
|
||||||
|
|
||||||
### Configuration parameters
|
### Configuration parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| -------- | -------------------- | ----- |
|
| -------- | -------------------- | ----- |
|
||||||
| `topics` | Topics configuration | `[]` |
|
| `topics` | Topics configuration | `[]` |
|
||||||
|
|
||||||
## Parameter examples and reference
|
|
||||||
|
|
||||||
### resources and resourcesPreset
|
|
||||||
|
|
||||||
`resources` sets explicit CPU and memory configurations for each replica.
|
|
||||||
When left empty, the preset defined in `resourcesPreset` is applied.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
resources:
|
|
||||||
cpu: 4000m
|
|
||||||
memory: 4Gi
|
|
||||||
```
|
|
||||||
|
|
||||||
`resourcesPreset` sets named CPU and memory configurations for each replica.
|
|
||||||
This setting is ignored if the corresponding `resources` value is set.
|
|
||||||
|
|
||||||
| Preset name | CPU | memory |
|
|
||||||
|-------------|--------|---------|
|
|
||||||
| `nano` | `250m` | `128Mi` |
|
|
||||||
| `micro` | `500m` | `256Mi` |
|
|
||||||
| `small` | `1` | `512Mi` |
|
|
||||||
| `medium` | `1` | `1Gi` |
|
|
||||||
| `large` | `3` | `2Gi` |
|
|
||||||
| `xlarge` | `4` | `4Gi` |
|
|
||||||
| `2xlarge` | `8` | `8Gi` |
|
|
||||||
|
|
||||||
### topics
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
topics:
|
|
||||||
- name: Results
|
|
||||||
partitions: 1
|
|
||||||
replicas: 3
|
|
||||||
config:
|
|
||||||
min.insync.replicas: 2
|
|
||||||
- name: Orders
|
|
||||||
config:
|
|
||||||
cleanup.policy: compact
|
|
||||||
segment.ms: 3600000
|
|
||||||
max.compaction.lag.ms: 5400000
|
|
||||||
min.insync.replicas: 2
|
|
||||||
partitions: 1
|
|
||||||
replicas: 3
|
|
||||||
```
|
|
||||||
|
|||||||
@@ -8,7 +8,11 @@ metadata:
|
|||||||
spec:
|
spec:
|
||||||
kafka:
|
kafka:
|
||||||
replicas: {{ .Values.kafka.replicas }}
|
replicas: {{ .Values.kafka.replicas }}
|
||||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.kafka.resourcesPreset .Values.kafka.resources $) | nindent 6 }}
|
{{- if .Values.kafka.resources }}
|
||||||
|
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.kafka.resources $) | nindent 6 }}
|
||||||
|
{{- else if ne .Values.kafka.resourcesPreset "none" }}
|
||||||
|
resources: {{- include "cozy-lib.resources.preset" (list .Values.kafka.resourcesPreset $) | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
listeners:
|
listeners:
|
||||||
- name: plain
|
- name: plain
|
||||||
port: 9092
|
port: 9092
|
||||||
@@ -66,7 +70,11 @@ spec:
|
|||||||
key: kafka-metrics-config.yml
|
key: kafka-metrics-config.yml
|
||||||
zookeeper:
|
zookeeper:
|
||||||
replicas: {{ .Values.zookeeper.replicas }}
|
replicas: {{ .Values.zookeeper.replicas }}
|
||||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.zookeeper.resourcesPreset .Values.zookeeper.resources $) | nindent 6 }}
|
{{- if .Values.zookeeper.resources }}
|
||||||
|
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.zookeeper.resources $) | nindent 6 }}
|
||||||
|
{{- else if ne .Values.zookeeper.resourcesPreset "none" }}
|
||||||
|
resources: {{- include "cozy-lib.resources.preset" (list .Values.zookeeper.resourcesPreset $) | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
storage:
|
storage:
|
||||||
type: persistent-claim
|
type: persistent-claim
|
||||||
{{- with .Values.zookeeper.size }}
|
{{- with .Values.zookeeper.size }}
|
||||||
|
|||||||
@@ -27,23 +27,13 @@
|
|||||||
},
|
},
|
||||||
"resources": {
|
"resources": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"description": "Explicit CPU and memory configuration for each Kafka replica. When left empty, the preset defined in `resourcesPreset` is applied.",
|
"description": "Resources",
|
||||||
"default": {}
|
"default": {}
|
||||||
},
|
},
|
||||||
"resourcesPreset": {
|
"resourcesPreset": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.",
|
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||||
"default": "small",
|
"default": "small"
|
||||||
"enum": [
|
|
||||||
"none",
|
|
||||||
"nano",
|
|
||||||
"micro",
|
|
||||||
"small",
|
|
||||||
"medium",
|
|
||||||
"large",
|
|
||||||
"xlarge",
|
|
||||||
"2xlarge"
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@@ -67,23 +57,13 @@
|
|||||||
},
|
},
|
||||||
"resources": {
|
"resources": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"description": "Explicit CPU and memory configuration for each Zookeeper replica. When left empty, the preset defined in `resourcesPreset` is applied.",
|
"description": "Resources",
|
||||||
"default": {}
|
"default": {}
|
||||||
},
|
},
|
||||||
"resourcesPreset": {
|
"resourcesPreset": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.",
|
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||||
"default": "small",
|
"default": "micro"
|
||||||
"enum": [
|
|
||||||
"none",
|
|
||||||
"nano",
|
|
||||||
"micro",
|
|
||||||
"small",
|
|
||||||
"medium",
|
|
||||||
"large",
|
|
||||||
"xlarge",
|
|
||||||
"2xlarge"
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -14,25 +14,35 @@ kafka:
|
|||||||
size: 10Gi
|
size: 10Gi
|
||||||
replicas: 3
|
replicas: 3
|
||||||
storageClass: ""
|
storageClass: ""
|
||||||
## @param kafka.resources Explicit CPU and memory configuration for each Kafka replica. When left empty, the preset defined in `resourcesPreset` is applied.
|
## @param kafka.resources Resources
|
||||||
resources: {}
|
resources: {}
|
||||||
# resources:
|
# resources:
|
||||||
|
# limits:
|
||||||
# cpu: 4000m
|
# cpu: 4000m
|
||||||
# memory: 4Gi
|
# memory: 4Gi
|
||||||
## @param kafka.resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.
|
# requests:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 512Mi
|
||||||
|
|
||||||
|
## @param kafka.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||||
resourcesPreset: "small"
|
resourcesPreset: "small"
|
||||||
|
|
||||||
zookeeper:
|
zookeeper:
|
||||||
size: 5Gi
|
size: 5Gi
|
||||||
replicas: 3
|
replicas: 3
|
||||||
storageClass: ""
|
storageClass: ""
|
||||||
## @param zookeeper.resources Explicit CPU and memory configuration for each Zookeeper replica. When left empty, the preset defined in `resourcesPreset` is applied.
|
## @param zookeeper.resources Resources
|
||||||
resources: {}
|
resources: {}
|
||||||
# resources:
|
# resources:
|
||||||
|
# limits:
|
||||||
# cpu: 4000m
|
# cpu: 4000m
|
||||||
# memory: 4Gi
|
# memory: 4Gi
|
||||||
## @param zookeeper.resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.
|
# requests:
|
||||||
resourcesPreset: "small"
|
# cpu: 100m
|
||||||
|
# memory: 512Mi
|
||||||
|
|
||||||
|
## @param zookeeper.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||||
|
resourcesPreset: "micro"
|
||||||
|
|
||||||
## @section Configuration parameters
|
## @section Configuration parameters
|
||||||
|
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.25.0
|
version: 0.24.1
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ include ../../../scripts/package.mk
|
|||||||
|
|
||||||
generate:
|
generate:
|
||||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||||
yq -o json -i '.properties.addons.properties.ingressNginx.properties.exposeMethod.enum = ["Proxied","LoadBalancer"]' values.schema.json
|
|
||||||
yq -o json -i '.properties.controlPlane.properties.apiServer.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
|
yq -o json -i '.properties.controlPlane.properties.apiServer.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
|
||||||
yq -o json -i '.properties.controlPlane.properties.controllerManager.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
|
yq -o json -i '.properties.controlPlane.properties.controllerManager.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
|
||||||
yq -o json -i '.properties.controlPlane.properties.scheduler.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
|
yq -o json -i '.properties.controlPlane.properties.scheduler.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
|
||||||
|
|||||||
@@ -82,24 +82,24 @@ See the reference for components utilized in this service:
|
|||||||
### Common Parameters
|
### Common Parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ----------------------- | ----------------------------------------------------------------------------------------------------------------- | ------------ |
|
| ----------------------------------- | ----------------------------------------------------------------------------------------------------------------- | ------------ |
|
||||||
| `host` | Hostname used to access the Kubernetes cluster externally. Defaults to `<cluster-name>.<tenant-host>` when empty. | `""` |
|
| `host` | Hostname used to access the Kubernetes cluster externally. Defaults to `<cluster-name>.<tenant-host>` when empty. | `""` |
|
||||||
| `controlPlane.replicas` | Number of replicas for Kubernetes control-plane components. | `2` |
|
| `controlPlane.replicas` | Number of replicas for Kubernetes control-plane components. | `2` |
|
||||||
| `storageClass` | StorageClass used to store user data. | `replicated` |
|
| `storageClass` | StorageClass used to store user data. | `replicated` |
|
||||||
|
| `useCustomSecretForPatchContainerd` | if true, for patch containerd will be used secret: {{ .Release.Name }}-patch-containerd | `false` |
|
||||||
| `nodeGroups` | nodeGroups configuration | `{}` |
|
| `nodeGroups` | nodeGroups configuration | `{}` |
|
||||||
|
|
||||||
### Cluster Addons
|
### Cluster Addons
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| --------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- |
|
| --------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||||
| `addons.certManager.enabled` | Enable cert-manager, which automatically creates and manages SSL/TLS certificates. | `false` |
|
| `addons.certManager.enabled` | Enable cert-manager, which automatically creates and manages SSL/TLS certificates. | `false` |
|
||||||
| `addons.certManager.valuesOverride` | Custom values to override | `{}` |
|
| `addons.certManager.valuesOverride` | Custom values to override | `{}` |
|
||||||
| `addons.cilium.valuesOverride` | Custom values to override | `{}` |
|
| `addons.cilium.valuesOverride` | Custom values to override | `{}` |
|
||||||
| `addons.gatewayAPI.enabled` | Enable the Gateway API | `false` |
|
| `addons.gatewayAPI.enabled` | Enable the Gateway API | `false` |
|
||||||
| `addons.ingressNginx.enabled` | Enable the Ingress-NGINX controller (requires nodes labeled with the 'ingress-nginx' role). | `false` |
|
| `addons.ingressNginx.enabled` | Enable the Ingress-NGINX controller (requires nodes labeled with the 'ingress-nginx' role). | `false` |
|
||||||
| `addons.ingressNginx.valuesOverride` | Custom values to override | `{}` |
|
| `addons.ingressNginx.valuesOverride` | Custom values to override | `{}` |
|
||||||
| `addons.ingressNginx.exposeMethod` | Method to expose the Ingress-NGINX controller. (allowed values: Proxied, LoadBalancer) | `Proxied` |
|
| `addons.ingressNginx.hosts` | List of domain names that the parent cluster should route to this tenant cluster. | `[]` |
|
||||||
| `addons.ingressNginx.hosts` | List of domain names that the parent cluster should route to this tenant cluster. Taken into account only when `exposeMethod` is set to `Proxied`. | `[]` |
|
|
||||||
| `addons.gpuOperator.enabled` | Enable the GPU-operator | `false` |
|
| `addons.gpuOperator.enabled` | Enable the GPU-operator | `false` |
|
||||||
| `addons.gpuOperator.valuesOverride` | Custom values to override | `{}` |
|
| `addons.gpuOperator.valuesOverride` | Custom values to override | `{}` |
|
||||||
| `addons.fluxcd.enabled` | Enable FluxCD | `false` |
|
| `addons.fluxcd.enabled` | Enable FluxCD | `false` |
|
||||||
@@ -107,48 +107,37 @@ See the reference for components utilized in this service:
|
|||||||
| `addons.monitoringAgents.enabled` | Enable monitoring agents (Fluent Bit and VMAgents) to send logs and metrics. If tenant monitoring is enabled, data is sent to tenant storage; otherwise, it goes to root storage. | `false` |
|
| `addons.monitoringAgents.enabled` | Enable monitoring agents (Fluent Bit and VMAgents) to send logs and metrics. If tenant monitoring is enabled, data is sent to tenant storage; otherwise, it goes to root storage. | `false` |
|
||||||
| `addons.monitoringAgents.valuesOverride` | Custom values to override | `{}` |
|
| `addons.monitoringAgents.valuesOverride` | Custom values to override | `{}` |
|
||||||
| `addons.verticalPodAutoscaler.valuesOverride` | Custom values to override | `{}` |
|
| `addons.verticalPodAutoscaler.valuesOverride` | Custom values to override | `{}` |
|
||||||
| `addons.velero.enabled` | Enable velero for backup and restore k8s cluster. | `false` |
|
|
||||||
| `addons.velero.valuesOverride` | Custom values to override | `{}` |
|
|
||||||
|
|
||||||
### Kubernetes Control Plane Configuration
|
### Kubernetes Control Plane Configuration
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| -------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | -------- |
|
| -------------------------------------------------- | ---------------------------------------------------------------------------- | -------- |
|
||||||
| `controlPlane.apiServer.resources` | Explicit CPU and memory configuration for the API Server. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
|
| `controlPlane.apiServer.resources` | Explicit CPU/memory resource requests and limits for the API server. | `{}` |
|
||||||
| `controlPlane.apiServer.resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `medium` |
|
| `controlPlane.apiServer.resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `medium` |
|
||||||
| `controlPlane.controllerManager.resources` | Explicit CPU and memory configuration for the Controller Manager. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
|
| `controlPlane.controllerManager.resources` | Explicit CPU/memory resource requests and limits for the controller manager. | `{}` |
|
||||||
| `controlPlane.controllerManager.resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `micro` |
|
| `controlPlane.controllerManager.resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `micro` |
|
||||||
| `controlPlane.scheduler.resources` | Explicit CPU and memory configuration for the Scheduler. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
|
| `controlPlane.scheduler.resources` | Explicit CPU/memory resource requests and limits for the scheduler. | `{}` |
|
||||||
| `controlPlane.scheduler.resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `micro` |
|
| `controlPlane.scheduler.resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `micro` |
|
||||||
| `controlPlane.konnectivity.server.resources` | Explicit CPU and memory configuration for Konnectivity. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
|
| `controlPlane.konnectivity.server.resources` | Explicit CPU/memory resource requests and limits for the Konnectivity. | `{}` |
|
||||||
| `controlPlane.konnectivity.server.resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `micro` |
|
| `controlPlane.konnectivity.server.resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `micro` |
|
||||||
|
|
||||||
|
In production environments, it's recommended to set `resources` explicitly.
|
||||||
## Parameter examples and reference
|
Example of `controlPlane.*.resources`:
|
||||||
|
|
||||||
### resources and resourcesPreset
|
|
||||||
|
|
||||||
`resources` sets explicit CPU and memory configurations for each replica.
|
|
||||||
When left empty, the preset defined in `resourcesPreset` is applied.
|
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
resources:
|
resources:
|
||||||
|
limits:
|
||||||
cpu: 4000m
|
cpu: 4000m
|
||||||
memory: 4Gi
|
memory: 4Gi
|
||||||
|
requests:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 512Mi
|
||||||
```
|
```
|
||||||
|
|
||||||
`resourcesPreset` sets named CPU and memory configurations for each replica.
|
Allowed values for `controlPlane.*.resourcesPreset` are `none`, `nano`, `micro`, `small`, `medium`, `large`, `xlarge`, `2xlarge`.
|
||||||
This setting is ignored if the corresponding `resources` value is set.
|
This value is ignored if the corresponding `resources` value is set.
|
||||||
|
|
||||||
| Preset name | CPU | memory |
|
## Resources Reference
|
||||||
|-------------|--------|---------|
|
|
||||||
| `nano` | `250m` | `128Mi` |
|
|
||||||
| `micro` | `500m` | `256Mi` |
|
|
||||||
| `small` | `1` | `512Mi` |
|
|
||||||
| `medium` | `1` | `1Gi` |
|
|
||||||
| `large` | `3` | `2Gi` |
|
|
||||||
| `xlarge` | `4` | `4Gi` |
|
|
||||||
| `2xlarge` | `8` | `8Gi` |
|
|
||||||
|
|
||||||
### instanceType Resources
|
### instanceType Resources
|
||||||
|
|
||||||
@@ -312,3 +301,4 @@ Specific characteristics of this series are:
|
|||||||
workload.
|
workload.
|
||||||
- *vCPU-To-Memory Ratio (1:4)* - A vCPU-to-Memory ratio of 1:4 starting from
|
- *vCPU-To-Memory Ratio (1:4)* - A vCPU-to-Memory ratio of 1:4 starting from
|
||||||
the medium size.
|
the medium size.
|
||||||
|
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.25.0@sha256:3a8170433e1632e5cc2b6d9db34d0605e8e6c63c158282c38450415e700e932e
|
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.24.0@sha256:3a8170433e1632e5cc2b6d9db34d0605e8e6c63c158282c38450415e700e932e
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.25.0@sha256:412ed2b3c77249bd1b973e6dc9c87976d31863717fb66ba74ccda573af737eb1
|
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.24.0@sha256:b478952fab735f85c3ba15835012b1de8af5578b33a8a2670eaf532ffc17681e
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.25.0@sha256:445c2727b04ac68595b43c988ff17b3d69a7b22b0644fde3b10c65b47a7bc036
|
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.24.0@sha256:4d3728b2050d4e0adb00b9f4abbb4a020b29e1a39f24ca1447806fc81f110fa6
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ ARG builder_image=docker.io/library/golang:1.22.5
|
|||||||
FROM ${builder_image} AS builder
|
FROM ${builder_image} AS builder
|
||||||
RUN git clone https://github.com/kubevirt/csi-driver /src/kubevirt-csi-driver \
|
RUN git clone https://github.com/kubevirt/csi-driver /src/kubevirt-csi-driver \
|
||||||
&& cd /src/kubevirt-csi-driver \
|
&& cd /src/kubevirt-csi-driver \
|
||||||
&& git checkout a8d6605bc9997bcfda3fb9f1f82ba6445b4984cc
|
&& git checkout 35836e0c8b68d9916d29a838ea60cdd3fc6199cf
|
||||||
|
|
||||||
ARG TARGETOS
|
ARG TARGETOS
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
@@ -11,7 +11,6 @@ ENV GOOS=$TARGETOS
|
|||||||
ENV GOARCH=$TARGETARCH
|
ENV GOARCH=$TARGETARCH
|
||||||
|
|
||||||
WORKDIR /src/kubevirt-csi-driver
|
WORKDIR /src/kubevirt-csi-driver
|
||||||
|
|
||||||
RUN make build
|
RUN make build
|
||||||
|
|
||||||
FROM quay.io/centos/centos:stream9
|
FROM quay.io/centos/centos:stream9
|
||||||
|
|||||||
@@ -120,11 +120,23 @@ metadata:
|
|||||||
kamaji.clastix.io/kubeconfig-secret-key: "super-admin.svc"
|
kamaji.clastix.io/kubeconfig-secret-key: "super-admin.svc"
|
||||||
spec:
|
spec:
|
||||||
apiServer:
|
apiServer:
|
||||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.controlPlane.apiServer.resourcesPreset .Values.controlPlane.apiServer.resources $) | nindent 6 }}
|
{{- if .Values.controlPlane.apiServer.resources }}
|
||||||
|
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.controlPlane.apiServer.resources $) | nindent 6 }}
|
||||||
|
{{- else if ne .Values.controlPlane.apiServer.resourcesPreset "none" }}
|
||||||
|
resources: {{- include "cozy-lib.resources.preset" (list .Values.controlPlane.apiServer.resourcesPreset $) | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
controllerManager:
|
controllerManager:
|
||||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.controlPlane.controllerManager.resourcesPreset .Values.controlPlane.controllerManager.resources $) | nindent 6 }}
|
{{- if .Values.controlPlane.controllerManager.resources }}
|
||||||
|
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.controlPlane.controllerManager.resources $) | nindent 6 }}
|
||||||
|
{{- else if ne .Values.controlPlane.controllerManager.resourcesPreset "none" }}
|
||||||
|
resources: {{- include "cozy-lib.resources.preset" (list .Values.controlPlane.controllerManager.resourcesPreset $) | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
scheduler:
|
scheduler:
|
||||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.controlPlane.scheduler.resourcesPreset .Values.controlPlane.scheduler.resources $) | nindent 6 }}
|
{{- if .Values.controlPlane.scheduler.resources }}
|
||||||
|
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.controlPlane.scheduler.resources $) | nindent 6 }}
|
||||||
|
{{- else if ne .Values.controlPlane.scheduler.resourcesPreset "none" }}
|
||||||
|
resources: {{- include "cozy-lib.resources.preset" (list .Values.controlPlane.scheduler.resourcesPreset $) | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
dataStoreName: "{{ $etcd }}"
|
dataStoreName: "{{ $etcd }}"
|
||||||
addons:
|
addons:
|
||||||
coreDNS:
|
coreDNS:
|
||||||
@@ -133,7 +145,11 @@ spec:
|
|||||||
konnectivity:
|
konnectivity:
|
||||||
server:
|
server:
|
||||||
port: 8132
|
port: 8132
|
||||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.controlPlane.konnectivity.server.resourcesPreset .Values.controlPlane.konnectivity.server.resources $) | nindent 10 }}
|
{{- if .Values.controlPlane.konnectivity.server.resources }}
|
||||||
|
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.controlPlane.konnectivity.server.resources $) | nindent 10 }}
|
||||||
|
{{- else if ne .Values.controlPlane.konnectivity.server.resourcesPreset "none" }}
|
||||||
|
resources: {{- include "cozy-lib.resources.preset" (list .Values.controlPlane.konnectivity.server.resourcesPreset $) | nindent 10 }}
|
||||||
|
{{- end }}
|
||||||
kubelet:
|
kubelet:
|
||||||
cgroupfs: systemd
|
cgroupfs: systemd
|
||||||
preferredAddressTypes:
|
preferredAddressTypes:
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
{{- if not .Values.useCustomSecretForPatchContainerd }}
|
||||||
{{- $sourceSecret := lookup "v1" "Secret" "cozy-system" "patch-containerd" }}
|
{{- $sourceSecret := lookup "v1" "Secret" "cozy-system" "patch-containerd" }}
|
||||||
{{- if $sourceSecret }}
|
{{- if $sourceSecret }}
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
@@ -11,3 +12,4 @@ data:
|
|||||||
{{ printf "%s: %s" $key ($value | quote) | indent 2 }}
|
{{ printf "%s: %s" $key ($value | quote) | indent 2 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
{{- if .Values.addons.certManager.enabled }}
|
|
||||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||||
kind: HelmRelease
|
kind: HelmRelease
|
||||||
metadata:
|
metadata:
|
||||||
@@ -55,4 +54,3 @@ stringData:
|
|||||||
values: |
|
values: |
|
||||||
{{- toYaml .Values.addons.certManager.valuesOverride | nindent 4 }}
|
{{- toYaml .Values.addons.certManager.valuesOverride | nindent 4 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
|
||||||
|
|||||||
@@ -3,11 +3,9 @@ ingress-nginx:
|
|||||||
fullnameOverride: ingress-nginx
|
fullnameOverride: ingress-nginx
|
||||||
controller:
|
controller:
|
||||||
kind: DaemonSet
|
kind: DaemonSet
|
||||||
{{- if eq .Values.addons.ingressNginx.exposeMethod "Proxied" }}
|
|
||||||
hostNetwork: true
|
hostNetwork: true
|
||||||
service:
|
service:
|
||||||
enabled: false
|
enabled: false
|
||||||
{{- end }}
|
|
||||||
{{- if not .Values.addons.certManager.enabled }}
|
{{- if not .Values.addons.certManager.enabled }}
|
||||||
admissionWebhooks:
|
admissionWebhooks:
|
||||||
certManager:
|
certManager:
|
||||||
|
|||||||
@@ -1,46 +0,0 @@
|
|||||||
{{- if .Values.addons.velero.enabled }}
|
|
||||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
|
||||||
kind: HelmRelease
|
|
||||||
metadata:
|
|
||||||
name: {{ .Release.Name }}-velero
|
|
||||||
labels:
|
|
||||||
cozystack.io/repository: system
|
|
||||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
|
||||||
spec:
|
|
||||||
interval: 5m
|
|
||||||
releaseName: velero
|
|
||||||
chart:
|
|
||||||
spec:
|
|
||||||
chart: cozy-velero
|
|
||||||
reconcileStrategy: Revision
|
|
||||||
sourceRef:
|
|
||||||
kind: HelmRepository
|
|
||||||
name: cozystack-system
|
|
||||||
namespace: cozy-system
|
|
||||||
version: '>= 0.0.0-0'
|
|
||||||
kubeConfig:
|
|
||||||
secretRef:
|
|
||||||
name: {{ .Release.Name }}-admin-kubeconfig
|
|
||||||
key: super-admin.svc
|
|
||||||
targetNamespace: cozy-velero
|
|
||||||
storageNamespace: cozy-velero
|
|
||||||
install:
|
|
||||||
createNamespace: true
|
|
||||||
remediation:
|
|
||||||
retries: -1
|
|
||||||
upgrade:
|
|
||||||
remediation:
|
|
||||||
retries: -1
|
|
||||||
{{- with .Values.addons.velero.valuesOverride }}
|
|
||||||
values:
|
|
||||||
{{- toYaml . | nindent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
dependsOn:
|
|
||||||
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
|
|
||||||
- name: {{ .Release.Name }}
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
{{- end }}
|
|
||||||
- name: {{ .Release.Name }}-cilium
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
{{- end }}
|
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
{{- $myNS := lookup "v1" "Namespace" "" .Release.Namespace }}
|
{{- $myNS := lookup "v1" "Namespace" "" .Release.Namespace }}
|
||||||
{{- $ingress := index $myNS.metadata.annotations "namespace.cozystack.io/ingress" }}
|
{{- $ingress := index $myNS.metadata.annotations "namespace.cozystack.io/ingress" }}
|
||||||
{{- if and (eq .Values.addons.ingressNginx.exposeMethod "Proxied") .Values.addons.ingressNginx.hosts }}
|
{{- if .Values.addons.ingressNginx.hosts }}
|
||||||
---
|
---
|
||||||
apiVersion: networking.k8s.io/v1
|
apiVersion: networking.k8s.io/v1
|
||||||
kind: Ingress
|
kind: Ingress
|
||||||
|
|||||||
@@ -20,12 +20,12 @@
|
|||||||
"properties": {
|
"properties": {
|
||||||
"resources": {
|
"resources": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"description": "Explicit CPU and memory configuration for the API Server. When left empty, the preset defined in `resourcesPreset` is applied.",
|
"description": "Explicit CPU/memory resource requests and limits for the API server.",
|
||||||
"default": {}
|
"default": {}
|
||||||
},
|
},
|
||||||
"resourcesPreset": {
|
"resourcesPreset": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.",
|
"description": "Use a common resources preset when `resources` is not set explicitly.",
|
||||||
"default": "medium",
|
"default": "medium",
|
||||||
"enum": [
|
"enum": [
|
||||||
"none",
|
"none",
|
||||||
@@ -45,12 +45,12 @@
|
|||||||
"properties": {
|
"properties": {
|
||||||
"resources": {
|
"resources": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"description": "Explicit CPU and memory configuration for the Controller Manager. When left empty, the preset defined in `resourcesPreset` is applied.",
|
"description": "Explicit CPU/memory resource requests and limits for the controller manager.",
|
||||||
"default": {}
|
"default": {}
|
||||||
},
|
},
|
||||||
"resourcesPreset": {
|
"resourcesPreset": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.",
|
"description": "Use a common resources preset when `resources` is not set explicitly.",
|
||||||
"default": "micro",
|
"default": "micro",
|
||||||
"enum": [
|
"enum": [
|
||||||
"none",
|
"none",
|
||||||
@@ -70,12 +70,12 @@
|
|||||||
"properties": {
|
"properties": {
|
||||||
"resources": {
|
"resources": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"description": "Explicit CPU and memory configuration for the Scheduler. When left empty, the preset defined in `resourcesPreset` is applied.",
|
"description": "Explicit CPU/memory resource requests and limits for the scheduler.",
|
||||||
"default": {}
|
"default": {}
|
||||||
},
|
},
|
||||||
"resourcesPreset": {
|
"resourcesPreset": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.",
|
"description": "Use a common resources preset when `resources` is not set explicitly.",
|
||||||
"default": "micro",
|
"default": "micro",
|
||||||
"enum": [
|
"enum": [
|
||||||
"none",
|
"none",
|
||||||
@@ -98,12 +98,12 @@
|
|||||||
"properties": {
|
"properties": {
|
||||||
"resources": {
|
"resources": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"description": "Explicit CPU and memory configuration for Konnectivity. When left empty, the preset defined in `resourcesPreset` is applied.",
|
"description": "Explicit CPU/memory resource requests and limits for the Konnectivity.",
|
||||||
"default": {}
|
"default": {}
|
||||||
},
|
},
|
||||||
"resourcesPreset": {
|
"resourcesPreset": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.",
|
"description": "Use a common resources preset when `resources` is not set explicitly.",
|
||||||
"default": "micro",
|
"default": "micro",
|
||||||
"enum": [
|
"enum": [
|
||||||
"none",
|
"none",
|
||||||
@@ -127,6 +127,11 @@
|
|||||||
"description": "StorageClass used to store user data.",
|
"description": "StorageClass used to store user data.",
|
||||||
"default": "replicated"
|
"default": "replicated"
|
||||||
},
|
},
|
||||||
|
"useCustomSecretForPatchContainerd": {
|
||||||
|
"type": "boolean",
|
||||||
|
"description": "if true, for patch containerd will be used secret: {{ .Release.Name }}-patch-containerd",
|
||||||
|
"default": false
|
||||||
|
},
|
||||||
"addons": {
|
"addons": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
@@ -178,18 +183,9 @@
|
|||||||
"description": "Custom values to override",
|
"description": "Custom values to override",
|
||||||
"default": {}
|
"default": {}
|
||||||
},
|
},
|
||||||
"exposeMethod": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "Method to expose the Ingress-NGINX controller. (allowed values: Proxied, LoadBalancer)",
|
|
||||||
"default": "Proxied",
|
|
||||||
"enum": [
|
|
||||||
"Proxied",
|
|
||||||
"LoadBalancer"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"hosts": {
|
"hosts": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"description": "List of domain names that the parent cluster should route to this tenant cluster. Taken into account only when `exposeMethod` is set to `Proxied`.",
|
"description": "List of domain names that the parent cluster should route to this tenant cluster.",
|
||||||
"default": [],
|
"default": [],
|
||||||
"items": {}
|
"items": {}
|
||||||
}
|
}
|
||||||
@@ -249,21 +245,6 @@
|
|||||||
"default": {}
|
"default": {}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
|
||||||
"velero": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"enabled": {
|
|
||||||
"type": "boolean",
|
|
||||||
"description": "Enable velero for backup and restore k8s cluster.",
|
|
||||||
"default": false
|
|
||||||
},
|
|
||||||
"valuesOverride": {
|
|
||||||
"type": "object",
|
|
||||||
"description": "Custom values to override",
|
|
||||||
"default": {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,9 +3,11 @@
|
|||||||
## @param host Hostname used to access the Kubernetes cluster externally. Defaults to `<cluster-name>.<tenant-host>` when empty.
|
## @param host Hostname used to access the Kubernetes cluster externally. Defaults to `<cluster-name>.<tenant-host>` when empty.
|
||||||
## @param controlPlane.replicas Number of replicas for Kubernetes control-plane components.
|
## @param controlPlane.replicas Number of replicas for Kubernetes control-plane components.
|
||||||
## @param storageClass StorageClass used to store user data.
|
## @param storageClass StorageClass used to store user data.
|
||||||
|
## @param useCustomSecretForPatchContainerd if true, for patch containerd will be used secret: {{ .Release.Name }}-patch-containerd
|
||||||
##
|
##
|
||||||
host: ""
|
host: ""
|
||||||
storageClass: replicated
|
storageClass: replicated
|
||||||
|
useCustomSecretForPatchContainerd: false
|
||||||
|
|
||||||
## @param nodeGroups [object] nodeGroups configuration
|
## @param nodeGroups [object] nodeGroups configuration
|
||||||
##
|
##
|
||||||
@@ -61,14 +63,12 @@ addons:
|
|||||||
## @param addons.ingressNginx.valuesOverride Custom values to override
|
## @param addons.ingressNginx.valuesOverride Custom values to override
|
||||||
##
|
##
|
||||||
enabled: false
|
enabled: false
|
||||||
## @param addons.ingressNginx.exposeMethod Method to expose the Ingress-NGINX controller. (allowed values: Proxied, LoadBalancer)
|
## @param addons.ingressNginx.hosts List of domain names that the parent cluster should route to this tenant cluster.
|
||||||
## @param addons.ingressNginx.hosts List of domain names that the parent cluster should route to this tenant cluster. Taken into account only when `exposeMethod` is set to `Proxied`.
|
|
||||||
## e.g:
|
## e.g:
|
||||||
## hosts:
|
## hosts:
|
||||||
## - example.org
|
## - example.org
|
||||||
## - foo.example.net
|
## - foo.example.net
|
||||||
##
|
##
|
||||||
exposeMethod: Proxied
|
|
||||||
hosts: []
|
hosts: []
|
||||||
valuesOverride: {}
|
valuesOverride: {}
|
||||||
|
|
||||||
@@ -105,15 +105,6 @@ addons:
|
|||||||
##
|
##
|
||||||
valuesOverride: {}
|
valuesOverride: {}
|
||||||
|
|
||||||
## Velero
|
|
||||||
##
|
|
||||||
velero:
|
|
||||||
## @param addons.velero.enabled Enable velero for backup and restore k8s cluster.
|
|
||||||
## @param addons.velero.valuesOverride Custom values to override
|
|
||||||
##
|
|
||||||
enabled: false
|
|
||||||
valuesOverride: {}
|
|
||||||
|
|
||||||
## @section Kubernetes Control Plane Configuration
|
## @section Kubernetes Control Plane Configuration
|
||||||
##
|
##
|
||||||
|
|
||||||
@@ -121,31 +112,35 @@ controlPlane:
|
|||||||
replicas: 2
|
replicas: 2
|
||||||
|
|
||||||
apiServer:
|
apiServer:
|
||||||
## @param controlPlane.apiServer.resources Explicit CPU and memory configuration for the API Server. When left empty, the preset defined in `resourcesPreset` is applied.
|
## @param controlPlane.apiServer.resources Explicit CPU/memory resource requests and limits for the API server.
|
||||||
## @param controlPlane.apiServer.resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.
|
## @param controlPlane.apiServer.resourcesPreset Use a common resources preset when `resources` is not set explicitly.
|
||||||
## e.g:
|
## e.g:
|
||||||
## resources:
|
## resources:
|
||||||
|
## limits:
|
||||||
## cpu: 4000m
|
## cpu: 4000m
|
||||||
## memory: 4Gi
|
## memory: 4Gi
|
||||||
|
## requests:
|
||||||
|
## cpu: 100m
|
||||||
|
## memory: 512Mi
|
||||||
##
|
##
|
||||||
resourcesPreset: "medium"
|
resourcesPreset: "medium"
|
||||||
resources: {}
|
resources: {}
|
||||||
|
|
||||||
controllerManager:
|
controllerManager:
|
||||||
## @param controlPlane.controllerManager.resources Explicit CPU and memory configuration for the Controller Manager. When left empty, the preset defined in `resourcesPreset` is applied.
|
## @param controlPlane.controllerManager.resources Explicit CPU/memory resource requests and limits for the controller manager.
|
||||||
## @param controlPlane.controllerManager.resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.
|
## @param controlPlane.controllerManager.resourcesPreset Use a common resources preset when `resources` is not set explicitly.
|
||||||
resourcesPreset: "micro"
|
resourcesPreset: "micro"
|
||||||
resources: {}
|
resources: {}
|
||||||
|
|
||||||
scheduler:
|
scheduler:
|
||||||
## @param controlPlane.scheduler.resources Explicit CPU and memory configuration for the Scheduler. When left empty, the preset defined in `resourcesPreset` is applied.
|
## @param controlPlane.scheduler.resources Explicit CPU/memory resource requests and limits for the scheduler.
|
||||||
## @param controlPlane.scheduler.resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.
|
## @param controlPlane.scheduler.resourcesPreset Use a common resources preset when `resources` is not set explicitly.
|
||||||
resourcesPreset: "micro"
|
resourcesPreset: "micro"
|
||||||
resources: {}
|
resources: {}
|
||||||
|
|
||||||
konnectivity:
|
konnectivity:
|
||||||
server:
|
server:
|
||||||
## @param controlPlane.konnectivity.server.resources Explicit CPU and memory configuration for Konnectivity. When left empty, the preset defined in `resourcesPreset` is applied.
|
## @param controlPlane.konnectivity.server.resources Explicit CPU/memory resource requests and limits for the Konnectivity.
|
||||||
## @param controlPlane.konnectivity.server.resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.
|
## @param controlPlane.konnectivity.server.resourcesPreset Use a common resources preset when `resources` is not set explicitly.
|
||||||
resourcesPreset: "micro"
|
resourcesPreset: "micro"
|
||||||
resources: {}
|
resources: {}
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.9.0
|
version: 0.8.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ include ../../../scripts/package.mk
|
|||||||
|
|
||||||
generate:
|
generate:
|
||||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||||
yq -i -o json --indent 4 '.properties.resourcesPreset.enum = ["none", "nano", "micro", "small", "medium", "large", "xlarge", "2xlarge"]' values.schema.json
|
|
||||||
|
|
||||||
image:
|
image:
|
||||||
docker buildx build images/mariadb-backup \
|
docker buildx build images/mariadb-backup \
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
## Managed MariaDB Service
|
## Managed MariaDB Service
|
||||||
|
|
||||||
The Managed MariaDB Service offers a powerful and widely used relational database solution.
|
The Managed MariaDB Service offers a powerful and widely used relational database solution. This service allows you to create and manage a replicated MariaDB cluster seamlessly.
|
||||||
This service allows you to create and manage a replicated MariaDB cluster seamlessly.
|
|
||||||
|
|
||||||
## Deployment Details
|
## Deployment Details
|
||||||
|
|
||||||
@@ -47,7 +46,7 @@ restic -r s3:s3.example.org/mariadb-backups/database_name restore latest --targe
|
|||||||
```
|
```
|
||||||
|
|
||||||
more details:
|
more details:
|
||||||
- https://blog.aenix.io/restic-effective-backup-from-stdin-4bc1e8f083c1
|
- https://itnext.io/restic-effective-backup-from-stdin-4bc1e8f083c1
|
||||||
|
|
||||||
### Known issues
|
### Known issues
|
||||||
|
|
||||||
@@ -85,8 +84,8 @@ more details:
|
|||||||
### Backup parameters
|
### Backup parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------ |
|
| ------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ |
|
||||||
| `backup.enabled` | Enable periodic backups | `false` |
|
| `backup.enabled` | Enable pereiodic backups | `false` |
|
||||||
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
|
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
|
||||||
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/postgres-backups` |
|
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/postgres-backups` |
|
||||||
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
|
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
|
||||||
@@ -94,56 +93,6 @@ more details:
|
|||||||
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||||
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||||
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
||||||
| `resources` | Explicit CPU and memory configuration for each MariaDB replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
|
| `resources` | Resources | `{}` |
|
||||||
| `resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `nano` |
|
| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||||
|
|
||||||
## Parameter examples and reference
|
|
||||||
|
|
||||||
### resources and resourcesPreset
|
|
||||||
|
|
||||||
`resources` sets explicit CPU and memory configurations for each replica.
|
|
||||||
When left empty, the preset defined in `resourcesPreset` is applied.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
resources:
|
|
||||||
cpu: 4000m
|
|
||||||
memory: 4Gi
|
|
||||||
```
|
|
||||||
|
|
||||||
`resourcesPreset` sets named CPU and memory configurations for each replica.
|
|
||||||
This setting is ignored if the corresponding `resources` value is set.
|
|
||||||
|
|
||||||
| Preset name | CPU | memory |
|
|
||||||
|-------------|--------|---------|
|
|
||||||
| `nano` | `250m` | `128Mi` |
|
|
||||||
| `micro` | `500m` | `256Mi` |
|
|
||||||
| `small` | `1` | `512Mi` |
|
|
||||||
| `medium` | `1` | `1Gi` |
|
|
||||||
| `large` | `3` | `2Gi` |
|
|
||||||
| `xlarge` | `4` | `4Gi` |
|
|
||||||
| `2xlarge` | `8` | `8Gi` |
|
|
||||||
|
|
||||||
### users
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
users:
|
|
||||||
user1:
|
|
||||||
maxUserConnections: 1000
|
|
||||||
password: hackme
|
|
||||||
user2:
|
|
||||||
maxUserConnections: 1000
|
|
||||||
password: hackme
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
### databases
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
databases:
|
|
||||||
myapp1:
|
|
||||||
roles:
|
|
||||||
admin:
|
|
||||||
- user1
|
|
||||||
readonly:
|
|
||||||
- user2
|
|
||||||
```
|
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
ghcr.io/cozystack/cozystack/mariadb-backup:0.9.0@sha256:cfd1c37d8ad24e10681d82d6e6ce8a641b4602c1b0ffa8516ae15b4958bb12d4
|
ghcr.io/cozystack/cozystack/mariadb-backup:0.8.0@sha256:cfd1c37d8ad24e10681d82d6e6ce8a641b4602c1b0ffa8516ae15b4958bb12d4
|
||||||
|
|||||||
@@ -61,9 +61,7 @@ spec:
|
|||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
app.kubernetes.io/instance: {{ $.Release.Name }}
|
app.kubernetes.io/instance: {{ $.Release.Name }}
|
||||||
{{- if and .Values.external (eq (int .Values.replicas) 1) }}
|
|
||||||
type: LoadBalancer
|
|
||||||
{{- end }}
|
|
||||||
storage:
|
storage:
|
||||||
size: {{ .Values.size }}
|
size: {{ .Values.size }}
|
||||||
resizeInUseVolumes: true
|
resizeInUseVolumes: true
|
||||||
@@ -72,7 +70,7 @@ spec:
|
|||||||
storageClassName: {{ . }}
|
storageClassName: {{ . }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- if and .Values.external (gt (int .Values.replicas) 1) }}
|
{{- if .Values.external }}
|
||||||
primaryService:
|
primaryService:
|
||||||
type: LoadBalancer
|
type: LoadBalancer
|
||||||
{{- end }}
|
{{- end }}
|
||||||
@@ -80,4 +78,8 @@ spec:
|
|||||||
#secondaryService:
|
#secondaryService:
|
||||||
# type: LoadBalancer
|
# type: LoadBalancer
|
||||||
|
|
||||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.resourcesPreset .Values.resources $) | nindent 4 }}
|
{{- if .Values.resources }}
|
||||||
|
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.resources $) | nindent 4 }}
|
||||||
|
{{- else if ne .Values.resourcesPreset "none" }}
|
||||||
|
resources: {{- include "cozy-lib.resources.preset" (list .Values.resourcesPreset $) | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
|||||||
@@ -27,7 +27,7 @@
|
|||||||
"properties": {
|
"properties": {
|
||||||
"enabled": {
|
"enabled": {
|
||||||
"type": "boolean",
|
"type": "boolean",
|
||||||
"description": "Enable periodic backups",
|
"description": "Enable pereiodic backups",
|
||||||
"default": false
|
"default": false
|
||||||
},
|
},
|
||||||
"s3Region": {
|
"s3Region": {
|
||||||
@@ -69,23 +69,13 @@
|
|||||||
},
|
},
|
||||||
"resources": {
|
"resources": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"description": "Explicit CPU and memory configuration for each MariaDB replica. When left empty, the preset defined in `resourcesPreset` is applied.",
|
"description": "Resources",
|
||||||
"default": {}
|
"default": {}
|
||||||
},
|
},
|
||||||
"resourcesPreset": {
|
"resourcesPreset": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.",
|
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||||
"default": "nano",
|
"default": "nano"
|
||||||
"enum": [
|
|
||||||
"none",
|
|
||||||
"nano",
|
|
||||||
"micro",
|
|
||||||
"small",
|
|
||||||
"medium",
|
|
||||||
"large",
|
|
||||||
"xlarge",
|
|
||||||
"2xlarge"
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -37,7 +37,7 @@ databases: {}
|
|||||||
|
|
||||||
## @section Backup parameters
|
## @section Backup parameters
|
||||||
|
|
||||||
## @param backup.enabled Enable periodic backups
|
## @param backup.enabled Enable pereiodic backups
|
||||||
## @param backup.s3Region The AWS S3 region where backups are stored
|
## @param backup.s3Region The AWS S3 region where backups are stored
|
||||||
## @param backup.s3Bucket The S3 bucket used for storing backups
|
## @param backup.s3Bucket The S3 bucket used for storing backups
|
||||||
## @param backup.schedule Cron schedule for automated backups
|
## @param backup.schedule Cron schedule for automated backups
|
||||||
@@ -55,11 +55,15 @@ backup:
|
|||||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||||
|
|
||||||
## @param resources Explicit CPU and memory configuration for each MariaDB replica. When left empty, the preset defined in `resourcesPreset` is applied.
|
## @param resources Resources
|
||||||
resources: {}
|
resources: {}
|
||||||
# resources:
|
# resources:
|
||||||
|
# limits:
|
||||||
# cpu: 4000m
|
# cpu: 4000m
|
||||||
# memory: 4Gi
|
# memory: 4Gi
|
||||||
|
# requests:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 512Mi
|
||||||
|
|
||||||
## @param resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.
|
## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||||
resourcesPreset: "nano"
|
resourcesPreset: "nano"
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.8.0
|
version: 0.7.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
|
|||||||
@@ -2,4 +2,3 @@ include ../../../scripts/package.mk
|
|||||||
|
|
||||||
generate:
|
generate:
|
||||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||||
yq -i -o json --indent 4 '.properties.resourcesPreset.enum = ["none", "nano", "micro", "small", "medium", "large", "xlarge", "2xlarge"]' values.schema.json
|
|
||||||
|
|||||||
@@ -1,14 +1,11 @@
|
|||||||
# Managed NATS Service
|
# Managed NATS Service
|
||||||
|
|
||||||
NATS is an open-source, simple, secure, and high performance messaging system.
|
|
||||||
It provides a data layer for cloud native applications, IoT messaging, and microservices architectures.
|
|
||||||
|
|
||||||
## Parameters
|
## Parameters
|
||||||
|
|
||||||
### Common parameters
|
### Common parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ------------------- | --------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
| ------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||||
| `external` | Enable external access from outside the cluster | `false` |
|
| `external` | Enable external access from outside the cluster | `false` |
|
||||||
| `replicas` | Persistent Volume size for NATS | `2` |
|
| `replicas` | Persistent Volume size for NATS | `2` |
|
||||||
| `storageClass` | StorageClass used to store the data | `""` |
|
| `storageClass` | StorageClass used to store the data | `""` |
|
||||||
@@ -17,32 +14,5 @@ It provides a data layer for cloud native applications, IoT messaging, and micro
|
|||||||
| `jetstream.enabled` | Enable or disable Jetstream | `true` |
|
| `jetstream.enabled` | Enable or disable Jetstream | `true` |
|
||||||
| `config.merge` | Additional configuration to merge into NATS config | `{}` |
|
| `config.merge` | Additional configuration to merge into NATS config | `{}` |
|
||||||
| `config.resolver` | Additional configuration to merge into NATS config | `{}` |
|
| `config.resolver` | Additional configuration to merge into NATS config | `{}` |
|
||||||
| `resources` | Explicit CPU and memory configuration for each NATS replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
|
| `resources` | Resources | `{}` |
|
||||||
| `resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `nano` |
|
| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||||
|
|
||||||
## Parameter examples and reference
|
|
||||||
|
|
||||||
### resources and resourcesPreset
|
|
||||||
|
|
||||||
`resources` sets explicit CPU and memory configurations for each replica.
|
|
||||||
When left empty, the preset defined in `resourcesPreset` is applied.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
resources:
|
|
||||||
cpu: 4000m
|
|
||||||
memory: 4Gi
|
|
||||||
```
|
|
||||||
|
|
||||||
`resourcesPreset` sets named CPU and memory configurations for each replica.
|
|
||||||
This setting is ignored if the corresponding `resources` value is set.
|
|
||||||
|
|
||||||
| Preset name | CPU | memory |
|
|
||||||
|-------------|--------|---------|
|
|
||||||
| `nano` | `250m` | `128Mi` |
|
|
||||||
| `micro` | `500m` | `256Mi` |
|
|
||||||
| `small` | `1` | `512Mi` |
|
|
||||||
| `medium` | `1` | `1Gi` |
|
|
||||||
| `large` | `3` | `2Gi` |
|
|
||||||
| `xlarge` | `4` | `4Gi` |
|
|
||||||
| `2xlarge` | `8` | `8Gi` |
|
|
||||||
|
|
||||||
|
|||||||
@@ -46,7 +46,11 @@ spec:
|
|||||||
containers:
|
containers:
|
||||||
- name: nats
|
- name: nats
|
||||||
image: nats:2.10.17-alpine
|
image: nats:2.10.17-alpine
|
||||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.resourcesPreset .Values.resources $) | nindent 22 }}
|
{{- if .Values.resources }}
|
||||||
|
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.resources $) | nindent 22 }}
|
||||||
|
{{- else if ne .Values.resourcesPreset "none" }}
|
||||||
|
resources: {{- include "cozy-lib.resources.preset" (list .Values.resourcesPreset $) | nindent 22 }}
|
||||||
|
{{- end }}
|
||||||
fullnameOverride: {{ .Release.Name }}
|
fullnameOverride: {{ .Release.Name }}
|
||||||
config:
|
config:
|
||||||
cluster:
|
cluster:
|
||||||
|
|||||||
@@ -49,23 +49,13 @@
|
|||||||
},
|
},
|
||||||
"resources": {
|
"resources": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"description": "Explicit CPU and memory configuration for each NATS replica. When left empty, the preset defined in `resourcesPreset` is applied.",
|
"description": "Resources",
|
||||||
"default": {}
|
"default": {}
|
||||||
},
|
},
|
||||||
"resourcesPreset": {
|
"resourcesPreset": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.",
|
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||||
"default": "nano",
|
"default": "nano"
|
||||||
"enum": [
|
|
||||||
"none",
|
|
||||||
"nano",
|
|
||||||
"micro",
|
|
||||||
"small",
|
|
||||||
"medium",
|
|
||||||
"large",
|
|
||||||
"xlarge",
|
|
||||||
"2xlarge"
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -62,11 +62,15 @@ config:
|
|||||||
## Example see: https://github.com/nats-io/k8s/blob/main/helm/charts/nats/values.yaml#L247
|
## Example see: https://github.com/nats-io/k8s/blob/main/helm/charts/nats/values.yaml#L247
|
||||||
resolver: {}
|
resolver: {}
|
||||||
|
|
||||||
## @param resources Explicit CPU and memory configuration for each NATS replica. When left empty, the preset defined in `resourcesPreset` is applied.
|
## @param resources Resources
|
||||||
resources: {}
|
resources: {}
|
||||||
# resources:
|
# resources:
|
||||||
|
# limits:
|
||||||
# cpu: 4000m
|
# cpu: 4000m
|
||||||
# memory: 4Gi
|
# memory: 4Gi
|
||||||
|
# requests:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 512Mi
|
||||||
|
|
||||||
## @param resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.
|
## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||||
resourcesPreset: "nano"
|
resourcesPreset: "nano"
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.16.0
|
version: 0.14.1
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
|
|||||||
@@ -1,5 +1,24 @@
|
|||||||
|
POSTGRES_BACKUP_TAG = $(shell awk '$$1 == "version:" {print $$2}' Chart.yaml)
|
||||||
|
|
||||||
|
include ../../../scripts/common-envs.mk
|
||||||
include ../../../scripts/package.mk
|
include ../../../scripts/package.mk
|
||||||
|
|
||||||
generate:
|
generate:
|
||||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||||
yq -i -o json --indent 4 '.properties.resourcesPreset.enum = ["none", "nano", "micro", "small", "medium", "large", "xlarge", "2xlarge"]' values.schema.json
|
|
||||||
|
image:
|
||||||
|
docker buildx build images/postgres-backup \
|
||||||
|
--provenance false \
|
||||||
|
--builder=$(BUILDER) \
|
||||||
|
--platform=$(PLATFORM) \
|
||||||
|
--tag $(REGISTRY)/postgres-backup:$(call settag,$(POSTGRES_BACKUP_TAG)) \
|
||||||
|
--cache-from type=registry,ref=$(REGISTRY)/postgres-backup:latest \
|
||||||
|
--cache-to type=inline \
|
||||||
|
--metadata-file images/postgres-backup.json \
|
||||||
|
--push=$(PUSH) \
|
||||||
|
--label "org.opencontainers.image.source=https://github.com/cozystack/cozystack" \
|
||||||
|
--load=$(LOAD)
|
||||||
|
echo "$(REGISTRY)/postgres-backup:$(call settag,$(POSTGRES_BACKUP_TAG))@$$(yq e '."containerimage.digest"' images/postgres-backup.json -o json -r)" \
|
||||||
|
> images/postgres-backup.tag
|
||||||
|
cp images/postgres-backup.tag ../ferretdb/images/
|
||||||
|
rm -f images/postgres-backup.json
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
# Managed PostgreSQL Service
|
# Managed PostgreSQL Service
|
||||||
|
|
||||||
PostgreSQL is currently the leading choice among relational databases, known for its robust features and performance.
|
PostgreSQL is currently the leading choice among relational databases, known for its robust features and performance. Our Managed PostgreSQL Service takes advantage of platform-side implementation to provide a self-healing replicated cluster. This cluster is efficiently managed using the highly acclaimed CloudNativePG operator, which has gained popularity within the community.
|
||||||
The Managed PostgreSQL Service takes advantage of platform-side implementation to provide a self-healing replicated cluster.
|
|
||||||
This cluster is efficiently managed using the highly acclaimed CloudNativePG operator, which has gained popularity within the community.
|
|
||||||
|
|
||||||
## Deployment Details
|
## Deployment Details
|
||||||
|
|
||||||
@@ -13,7 +11,7 @@ This managed service is controlled by the CloudNativePG operator, ensuring effic
|
|||||||
|
|
||||||
## HowTos
|
## HowTos
|
||||||
|
|
||||||
### How to switch primary/secondary replica
|
### How to switch master/slave replica
|
||||||
|
|
||||||
See:
|
See:
|
||||||
|
|
||||||
@@ -35,7 +33,7 @@ restic -r s3:s3.example.org/postgres-backups/database_name restore latest --targ
|
|||||||
|
|
||||||
more details:
|
more details:
|
||||||
|
|
||||||
- <https://blog.aenix.io/restic-effective-backup-from-stdin-4bc1e8f083c1>
|
- <https://itnext.io/restic-effective-backup-from-stdin-4bc1e8f083c1>
|
||||||
|
|
||||||
## Parameters
|
## Parameters
|
||||||
|
|
||||||
@@ -61,81 +59,14 @@ more details:
|
|||||||
### Backup parameters
|
### Backup parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ------------------------ | -------------------------------------------------------------------- | ----------------------------------- |
|
| ------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ |
|
||||||
| `backup.enabled` | Enable pereiodic backups | `false` |
|
| `backup.enabled` | Enable pereiodic backups | `false` |
|
||||||
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * * *` |
|
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
|
||||||
| `backup.retentionPolicy` | The retention policy | `30d` |
|
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/postgres-backups` |
|
||||||
| `backup.destinationPath` | The path where to store the backup (i.e. s3://bucket/path/to/folder) | `s3://BUCKET_NAME/` |
|
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
|
||||||
| `backup.endpointURL` | Endpoint to be used to upload data to the cloud | `http://minio-gateway-service:9000` |
|
| `backup.cleanupStrategy` | The strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
|
||||||
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||||
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||||
|
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
||||||
### Bootstrap parameters
|
| `resources` | Resources | `{}` |
|
||||||
|
| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||||
| Name | Description | Value |
|
|
||||||
| ------------------------ | --------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
|
||||||
| `bootstrap.enabled` | Restore cluster from backup | `false` |
|
|
||||||
| `bootstrap.recoveryTime` | Time stamp up to which recovery will proceed, expressed in RFC 3339 format, if empty, will restore latest | `""` |
|
|
||||||
| `bootstrap.oldName` | Name of cluster before deleting | `""` |
|
|
||||||
| `resources` | Explicit CPU and memory configuration for each PostgreSQL replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
|
|
||||||
| `resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `micro` |
|
|
||||||
|
|
||||||
|
|
||||||
## Parameter examples and reference
|
|
||||||
|
|
||||||
### resources and resourcesPreset
|
|
||||||
|
|
||||||
`resources` sets explicit CPU and memory configurations for each replica.
|
|
||||||
When left empty, the preset defined in `resourcesPreset` is applied.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
resources:
|
|
||||||
cpu: 4000m
|
|
||||||
memory: 4Gi
|
|
||||||
```
|
|
||||||
|
|
||||||
`resourcesPreset` sets named CPU and memory configurations for each replica.
|
|
||||||
This setting is ignored if the corresponding `resources` value is set.
|
|
||||||
|
|
||||||
| Preset name | CPU | memory |
|
|
||||||
|-------------|--------|---------|
|
|
||||||
| `nano` | `250m` | `128Mi` |
|
|
||||||
| `micro` | `500m` | `256Mi` |
|
|
||||||
| `small` | `1` | `512Mi` |
|
|
||||||
| `medium` | `1` | `1Gi` |
|
|
||||||
| `large` | `3` | `2Gi` |
|
|
||||||
| `xlarge` | `4` | `4Gi` |
|
|
||||||
| `2xlarge` | `8` | `8Gi` |
|
|
||||||
|
|
||||||
### users
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
users:
|
|
||||||
user1:
|
|
||||||
password: strongpassword
|
|
||||||
user2:
|
|
||||||
password: hackme
|
|
||||||
airflow:
|
|
||||||
password: qwerty123
|
|
||||||
debezium:
|
|
||||||
replication: true
|
|
||||||
```
|
|
||||||
|
|
||||||
### databases
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
databases:
|
|
||||||
myapp:
|
|
||||||
roles:
|
|
||||||
admin:
|
|
||||||
- user1
|
|
||||||
- debezium
|
|
||||||
readonly:
|
|
||||||
- user2
|
|
||||||
airflow:
|
|
||||||
roles:
|
|
||||||
admin:
|
|
||||||
- airflow
|
|
||||||
extensions:
|
|
||||||
- hstore
|
|
||||||
```
|
|
||||||
|
|||||||
1
packages/apps/postgres/images/postgres-backup.tag
Normal file
1
packages/apps/postgres/images/postgres-backup.tag
Normal file
@@ -0,0 +1 @@
|
|||||||
|
ghcr.io/cozystack/cozystack/postgres-backup:0.14.0@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f
|
||||||
2
packages/apps/postgres/images/postgres-backup/Dockerfile
Normal file
2
packages/apps/postgres/images/postgres-backup/Dockerfile
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
FROM alpine:3.22
|
||||||
|
RUN apk add --no-cache postgresql17-client uuidgen restic
|
||||||
99
packages/apps/postgres/templates/backup-cronjob.yaml
Normal file
99
packages/apps/postgres/templates/backup-cronjob.yaml
Normal file
@@ -0,0 +1,99 @@
|
|||||||
|
{{- if .Values.backup.enabled }}
|
||||||
|
{{ $image := .Files.Get "images/backup.json" | fromJson }}
|
||||||
|
|
||||||
|
apiVersion: batch/v1
|
||||||
|
kind: CronJob
|
||||||
|
metadata:
|
||||||
|
name: {{ .Release.Name }}-backup
|
||||||
|
spec:
|
||||||
|
schedule: "{{ .Values.backup.schedule }}"
|
||||||
|
concurrencyPolicy: Forbid
|
||||||
|
successfulJobsHistoryLimit: 3
|
||||||
|
failedJobsHistoryLimit: 3
|
||||||
|
jobTemplate:
|
||||||
|
spec:
|
||||||
|
backoffLimit: 2
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
annotations:
|
||||||
|
checksum/config: {{ include (print $.Template.BasePath "/backup-script.yaml") . | sha256sum }}
|
||||||
|
checksum/secret: {{ include (print $.Template.BasePath "/backup-secret.yaml") . | sha256sum }}
|
||||||
|
spec:
|
||||||
|
imagePullSecrets:
|
||||||
|
- name: {{ .Release.Name }}-regsecret
|
||||||
|
restartPolicy: OnFailure
|
||||||
|
containers:
|
||||||
|
- name: pgdump
|
||||||
|
image: "{{ $.Files.Get "images/postgres-backup.tag" | trim }}"
|
||||||
|
command:
|
||||||
|
- /bin/sh
|
||||||
|
- /scripts/backup.sh
|
||||||
|
env:
|
||||||
|
- name: REPO_PREFIX
|
||||||
|
value: {{ required "s3Bucket is not specified!" .Values.backup.s3Bucket | quote }}
|
||||||
|
- name: CLEANUP_STRATEGY
|
||||||
|
value: {{ required "cleanupStrategy is not specified!" .Values.backup.cleanupStrategy | quote }}
|
||||||
|
- name: PGUSER
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: {{ .Release.Name }}-superuser
|
||||||
|
key: username
|
||||||
|
- name: PGPASSWORD
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: {{ .Release.Name }}-superuser
|
||||||
|
key: password
|
||||||
|
- name: PGHOST
|
||||||
|
value: {{ .Release.Name }}-rw
|
||||||
|
- name: PGPORT
|
||||||
|
value: "5432"
|
||||||
|
- name: PGDATABASE
|
||||||
|
value: postgres
|
||||||
|
- name: AWS_ACCESS_KEY_ID
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: {{ .Release.Name }}-backup
|
||||||
|
key: s3AccessKey
|
||||||
|
- name: AWS_SECRET_ACCESS_KEY
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: {{ .Release.Name }}-backup
|
||||||
|
key: s3SecretKey
|
||||||
|
- name: AWS_DEFAULT_REGION
|
||||||
|
value: {{ .Values.backup.s3Region }}
|
||||||
|
- name: RESTIC_PASSWORD
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: {{ .Release.Name }}-backup
|
||||||
|
key: resticPassword
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /scripts
|
||||||
|
name: scripts
|
||||||
|
- mountPath: /tmp
|
||||||
|
name: tmp
|
||||||
|
- mountPath: /.cache
|
||||||
|
name: cache
|
||||||
|
securityContext:
|
||||||
|
allowPrivilegeEscalation: false
|
||||||
|
capabilities:
|
||||||
|
drop:
|
||||||
|
- ALL
|
||||||
|
privileged: false
|
||||||
|
readOnlyRootFilesystem: true
|
||||||
|
runAsNonRoot: true
|
||||||
|
{{- include "postgresjobs.resources" . | nindent 12 }}
|
||||||
|
volumes:
|
||||||
|
- name: scripts
|
||||||
|
secret:
|
||||||
|
secretName: {{ .Release.Name }}-backup-script
|
||||||
|
- name: tmp
|
||||||
|
emptyDir: {}
|
||||||
|
- name: cache
|
||||||
|
emptyDir: {}
|
||||||
|
securityContext:
|
||||||
|
runAsNonRoot: true
|
||||||
|
runAsUser: 9000
|
||||||
|
runAsGroup: 9000
|
||||||
|
seccompProfile:
|
||||||
|
type: RuntimeDefault
|
||||||
|
{{- end }}
|
||||||
50
packages/apps/postgres/templates/backup-script.yaml
Normal file
50
packages/apps/postgres/templates/backup-script.yaml
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
{{- if .Values.backup.enabled }}
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: {{ .Release.Name }}-backup-script
|
||||||
|
stringData:
|
||||||
|
backup.sh: |
|
||||||
|
#!/bin/sh
|
||||||
|
set -e
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
JOB_ID="job-$(uuidgen|cut -f1 -d-)"
|
||||||
|
DB_LIST=$(psql -Atq -c 'SELECT datname FROM pg_catalog.pg_database;' | grep -v '^\(postgres\|app\|template.*\)$')
|
||||||
|
echo DB_LIST=$(echo "$DB_LIST" | shuf) # shuffle list
|
||||||
|
echo "Job ID: $JOB_ID"
|
||||||
|
echo "Target repo: $REPO_PREFIX"
|
||||||
|
echo "Cleanup strategy: $CLEANUP_STRATEGY"
|
||||||
|
echo "Start backup for:"
|
||||||
|
echo "$DB_LIST"
|
||||||
|
echo
|
||||||
|
echo "Backup started at `date +%Y-%m-%d\ %H:%M:%S`"
|
||||||
|
for db in $DB_LIST; do
|
||||||
|
(
|
||||||
|
set -x
|
||||||
|
restic -r "s3:${REPO_PREFIX}/$db" cat config >/dev/null 2>&1 || \
|
||||||
|
restic -r "s3:${REPO_PREFIX}/$db" init --repository-version 2
|
||||||
|
restic -r "s3:${REPO_PREFIX}/$db" unlock --remove-all >/dev/null 2>&1 || true # no locks, k8s takes care of it
|
||||||
|
pg_dump -Z0 -Ft -d "$db" | \
|
||||||
|
restic -r "s3:${REPO_PREFIX}/$db" backup --tag "$JOB_ID" --stdin --stdin-filename dump.tar
|
||||||
|
restic -r "s3:${REPO_PREFIX}/$db" tag --tag "$JOB_ID" --set "completed"
|
||||||
|
)
|
||||||
|
done
|
||||||
|
echo "Backup finished at `date +%Y-%m-%d\ %H:%M:%S`"
|
||||||
|
|
||||||
|
echo
|
||||||
|
echo "Run cleanup:"
|
||||||
|
echo
|
||||||
|
|
||||||
|
echo "Cleanup started at `date +%Y-%m-%d\ %H:%M:%S`"
|
||||||
|
for db in $DB_LIST; do
|
||||||
|
(
|
||||||
|
set -x
|
||||||
|
restic forget -r "s3:${REPO_PREFIX}/$db" --group-by=tags --keep-tag "completed" # keep completed snapshots only
|
||||||
|
restic forget -r "s3:${REPO_PREFIX}/$db" --group-by=tags $CLEANUP_STRATEGY
|
||||||
|
restic prune -r "s3:${REPO_PREFIX}/$db"
|
||||||
|
)
|
||||||
|
done
|
||||||
|
echo "Cleanup finished at `date +%Y-%m-%d\ %H:%M:%S`"
|
||||||
|
{{- end }}
|
||||||
@@ -1,10 +1,11 @@
|
|||||||
{{- if or .Values.backup.enabled .Values.bootstrap.enabled }}
|
{{- if .Values.backup.enabled }}
|
||||||
---
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Secret
|
kind: Secret
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ .Release.Name }}-s3-creds
|
name: {{ .Release.Name }}-backup
|
||||||
stringData:
|
stringData:
|
||||||
AWS_ACCESS_KEY_ID: {{ required "s3AccessKey is not specified!" .Values.backup.s3AccessKey | quote }}
|
s3AccessKey: {{ required "s3AccessKey is not specified!" .Values.backup.s3AccessKey }}
|
||||||
AWS_SECRET_ACCESS_KEY: {{ required "s3SecretKey is not specified!" .Values.backup.s3SecretKey | quote }}
|
s3SecretKey: {{ required "s3SecretKey is not specified!" .Values.backup.s3SecretKey }}
|
||||||
|
resticPassword: {{ required "resticPassword is not specified!" .Values.backup.resticPassword }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|||||||
@@ -1,12 +0,0 @@
|
|||||||
{{- if .Values.backup.enabled }}
|
|
||||||
---
|
|
||||||
apiVersion: postgresql.cnpg.io/v1
|
|
||||||
kind: ScheduledBackup
|
|
||||||
metadata:
|
|
||||||
name: {{ .Release.Name }}
|
|
||||||
spec:
|
|
||||||
schedule: {{ .Values.backup.schedule | quote }}
|
|
||||||
backupOwnerReference: self
|
|
||||||
cluster:
|
|
||||||
name: {{ .Release.Name }}
|
|
||||||
{{- end }}
|
|
||||||
@@ -5,45 +5,12 @@ metadata:
|
|||||||
name: {{ .Release.Name }}
|
name: {{ .Release.Name }}
|
||||||
spec:
|
spec:
|
||||||
instances: {{ .Values.replicas }}
|
instances: {{ .Values.replicas }}
|
||||||
{{- if .Values.backup.enabled }}
|
{{- if .Values.resources }}
|
||||||
backup:
|
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.resources $) | nindent 4 }}
|
||||||
barmanObjectStore:
|
{{- else if ne .Values.resourcesPreset "none" }}
|
||||||
destinationPath: {{ .Values.backup.destinationPath }}
|
resources: {{- include "cozy-lib.resources.preset" (list .Values.resourcesPreset $) | nindent 4 }}
|
||||||
endpointURL: {{ .Values.backup.endpointURL }}
|
|
||||||
s3Credentials:
|
|
||||||
accessKeyId:
|
|
||||||
name: {{ .Release.Name }}-s3-creds
|
|
||||||
key: AWS_ACCESS_KEY_ID
|
|
||||||
secretAccessKey:
|
|
||||||
name: {{ .Release.Name }}-s3-creds
|
|
||||||
key: AWS_SECRET_ACCESS_KEY
|
|
||||||
retentionPolicy: {{ .Values.backup.retentionPolicy }}
|
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- if .Values.bootstrap.enabled }}
|
|
||||||
bootstrap:
|
|
||||||
recovery:
|
|
||||||
source: {{ .Values.bootstrap.oldName }}
|
|
||||||
{{- if .Values.bootstrap.recoveryTime }}
|
|
||||||
recoveryTarget:
|
|
||||||
targetTime: {{ .Values.bootstrap.recoveryTime }}
|
|
||||||
{{- end }}
|
|
||||||
externalClusters:
|
|
||||||
- name: {{ .Values.bootstrap.oldName }}
|
|
||||||
barmanObjectStore:
|
|
||||||
destinationPath: {{ .Values.backup.destinationPath }}
|
|
||||||
endpointURL: {{ .Values.backup.endpointURL }}
|
|
||||||
s3Credentials:
|
|
||||||
accessKeyId:
|
|
||||||
name: {{ .Release.Name }}-s3-creds
|
|
||||||
key: AWS_ACCESS_KEY_ID
|
|
||||||
secretAccessKey:
|
|
||||||
name: {{ .Release.Name }}-s3-creds
|
|
||||||
key: AWS_SECRET_ACCESS_KEY
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.resourcesPreset .Values.resources $) | nindent 4 }}
|
|
||||||
|
|
||||||
enableSuperuserAccess: true
|
enableSuperuserAccess: true
|
||||||
{{- $configMap := lookup "v1" "ConfigMap" "cozy-system" "cozystack-scheduling" }}
|
{{- $configMap := lookup "v1" "ConfigMap" "cozy-system" "cozystack-scheduling" }}
|
||||||
{{- if $configMap }}
|
{{- if $configMap }}
|
||||||
|
|||||||
@@ -41,10 +41,10 @@ stringData:
|
|||||||
{{- if .Values.users }}
|
{{- if .Values.users }}
|
||||||
psql -v ON_ERROR_STOP=1 <<\EOT
|
psql -v ON_ERROR_STOP=1 <<\EOT
|
||||||
{{- range $user, $u := .Values.users }}
|
{{- range $user, $u := .Values.users }}
|
||||||
SELECT 'CREATE ROLE "{{ $user }}" LOGIN INHERIT;'
|
SELECT 'CREATE ROLE {{ $user }} LOGIN INHERIT;'
|
||||||
WHERE NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = '{{ $user }}')\gexec
|
WHERE NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = '{{ $user }}')\gexec
|
||||||
ALTER ROLE "{{ $user }}" WITH PASSWORD '{{ index $passwords $user }}' LOGIN INHERIT {{ ternary "REPLICATION" "NOREPLICATION" (default false $u.replication) }};
|
ALTER ROLE {{ $user }} WITH PASSWORD '{{ index $passwords $user }}' LOGIN INHERIT {{ ternary "REPLICATION" "NOREPLICATION" (default false $u.replication) }};
|
||||||
COMMENT ON ROLE "{{ $user }}" IS 'user managed by helm';
|
COMMENT ON ROLE {{ $user }} IS 'user managed by helm';
|
||||||
{{- end }}
|
{{- end }}
|
||||||
EOT
|
EOT
|
||||||
{{- end }}
|
{{- end }}
|
||||||
@@ -68,15 +68,15 @@ stringData:
|
|||||||
{{- if .Values.databases }}
|
{{- if .Values.databases }}
|
||||||
psql -v ON_ERROR_STOP=1 --echo-all <<\EOT
|
psql -v ON_ERROR_STOP=1 --echo-all <<\EOT
|
||||||
{{- range $database, $d := .Values.databases }}
|
{{- range $database, $d := .Values.databases }}
|
||||||
SELECT 'CREATE DATABASE "{{ $database }}"'
|
SELECT 'CREATE DATABASE {{ $database }}'
|
||||||
WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname = '{{ $database }}')\gexec
|
WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname = '{{ $database }}')\gexec
|
||||||
COMMENT ON DATABASE "{{ $database }}" IS 'database managed by helm';
|
COMMENT ON DATABASE {{ $database }} IS 'database managed by helm';
|
||||||
SELECT 'CREATE ROLE "{{ $database }}_admin" NOINHERIT;'
|
SELECT 'CREATE ROLE {{ $database }}_admin NOINHERIT;'
|
||||||
WHERE NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = '{{ $database }}_admin')\gexec
|
WHERE NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = '{{ $database }}_admin')\gexec
|
||||||
COMMENT ON ROLE "{{ $database }}_admin" IS 'role managed by helm';
|
COMMENT ON ROLE {{ $database }}_admin IS 'role managed by helm';
|
||||||
SELECT 'CREATE ROLE "{{ $database }}_readonly" NOINHERIT;'
|
SELECT 'CREATE ROLE {{ $database }}_readonly NOINHERIT;'
|
||||||
WHERE NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = '{{ $database }}_readonly')\gexec
|
WHERE NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = '{{ $database }}_readonly')\gexec
|
||||||
COMMENT ON ROLE "{{ $database }}_readonly" IS 'role managed by helm';
|
COMMENT ON ROLE {{ $database }}_readonly IS 'role managed by helm';
|
||||||
{{- end }}
|
{{- end }}
|
||||||
EOT
|
EOT
|
||||||
{{- end }}
|
{{- end }}
|
||||||
@@ -84,8 +84,8 @@ stringData:
|
|||||||
echo "== grant privileges on databases to roles"
|
echo "== grant privileges on databases to roles"
|
||||||
{{- range $database, $d := .Values.databases }}
|
{{- range $database, $d := .Values.databases }}
|
||||||
psql -v ON_ERROR_STOP=1 --echo-all -d "{{ $database }}" <<\EOT
|
psql -v ON_ERROR_STOP=1 --echo-all -d "{{ $database }}" <<\EOT
|
||||||
ALTER DATABASE "{{ $database }}" OWNER TO "{{ $database }}_admin";
|
ALTER DATABASE {{ $database }} OWNER TO {{ $database }}_admin;
|
||||||
GRANT CONNECT ON DATABASE "{{ $database }}" TO "{{ $database }}_readonly";
|
GRANT CONNECT ON DATABASE {{ $database }} TO {{ $database }}_readonly;
|
||||||
|
|
||||||
DO $$
|
DO $$
|
||||||
DECLARE
|
DECLARE
|
||||||
@@ -165,14 +165,14 @@ stringData:
|
|||||||
{{- range $database, $d := .Values.databases }}
|
{{- range $database, $d := .Values.databases }}
|
||||||
{{- range $user, $u := $.Values.users }}
|
{{- range $user, $u := $.Values.users }}
|
||||||
{{- if has $user $d.roles.admin }}
|
{{- if has $user $d.roles.admin }}
|
||||||
GRANT "{{ $database }}_admin" TO "{{ $user }}";
|
GRANT {{ $database }}_admin TO {{ $user }};
|
||||||
{{- else }}
|
{{- else }}
|
||||||
REVOKE "{{ $database }}_admin" FROM "{{ $user }}";
|
REVOKE {{ $database }}_admin FROM {{ $user }};
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if has $user $d.roles.readonly }}
|
{{- if has $user $d.roles.readonly }}
|
||||||
GRANT "{{ $database }}_readonly" TO "{{ $user }}";
|
GRANT {{ $database }}_readonly TO {{ $user }};
|
||||||
{{- else }}
|
{{- else }}
|
||||||
REVOKE "{{ $database }}_readonly" FROM "{{ $user }}";
|
REVOKE {{ $database }}_readonly FROM {{ $user }};
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|||||||
@@ -65,25 +65,25 @@
|
|||||||
"description": "Enable pereiodic backups",
|
"description": "Enable pereiodic backups",
|
||||||
"default": false
|
"default": false
|
||||||
},
|
},
|
||||||
|
"s3Region": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The AWS S3 region where backups are stored",
|
||||||
|
"default": "us-east-1"
|
||||||
|
},
|
||||||
|
"s3Bucket": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The S3 bucket used for storing backups",
|
||||||
|
"default": "s3.example.org/postgres-backups"
|
||||||
|
},
|
||||||
"schedule": {
|
"schedule": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "Cron schedule for automated backups",
|
"description": "Cron schedule for automated backups",
|
||||||
"default": "0 2 * * * *"
|
"default": "0 2 * * *"
|
||||||
},
|
},
|
||||||
"retentionPolicy": {
|
"cleanupStrategy": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "The retention policy",
|
"description": "The strategy for cleaning up old backups",
|
||||||
"default": "30d"
|
"default": "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
|
||||||
},
|
|
||||||
"destinationPath": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "The path where to store the backup (i.e. s3://bucket/path/to/folder)",
|
|
||||||
"default": "s3://BUCKET_NAME/"
|
|
||||||
},
|
|
||||||
"endpointURL": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "Endpoint to be used to upload data to the cloud",
|
|
||||||
"default": "http://minio-gateway-service:9000"
|
|
||||||
},
|
},
|
||||||
"s3AccessKey": {
|
"s3AccessKey": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
@@ -94,48 +94,23 @@
|
|||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "The secret key for S3, used for authentication",
|
"description": "The secret key for S3, used for authentication",
|
||||||
"default": "ju3eum4dekeich9ahM1te8waeGai0oog"
|
"default": "ju3eum4dekeich9ahM1te8waeGai0oog"
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"bootstrap": {
|
"resticPassword": {
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"enabled": {
|
|
||||||
"type": "boolean",
|
|
||||||
"description": "Restore cluster from backup",
|
|
||||||
"default": false
|
|
||||||
},
|
|
||||||
"recoveryTime": {
|
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "Time stamp up to which recovery will proceed, expressed in RFC 3339 format, if empty, will restore latest",
|
"description": "The password for Restic backup encryption",
|
||||||
"default": ""
|
"default": "ChaXoveekoh6eigh4siesheeda2quai0"
|
||||||
},
|
|
||||||
"oldName": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "Name of cluster before deleting",
|
|
||||||
"default": ""
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"resources": {
|
"resources": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"description": "Explicit CPU and memory configuration for each PostgreSQL replica. When left empty, the preset defined in `resourcesPreset` is applied.",
|
"description": "Resources",
|
||||||
"default": {}
|
"default": {}
|
||||||
},
|
},
|
||||||
"resourcesPreset": {
|
"resourcesPreset": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.",
|
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||||
"default": "micro",
|
"default": "nano"
|
||||||
"enum": [
|
|
||||||
"none",
|
|
||||||
"nano",
|
|
||||||
"micro",
|
|
||||||
"small",
|
|
||||||
"medium",
|
|
||||||
"large",
|
|
||||||
"xlarge",
|
|
||||||
"2xlarge"
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -60,38 +60,32 @@ databases: {}
|
|||||||
## @section Backup parameters
|
## @section Backup parameters
|
||||||
|
|
||||||
## @param backup.enabled Enable pereiodic backups
|
## @param backup.enabled Enable pereiodic backups
|
||||||
|
## @param backup.s3Region The AWS S3 region where backups are stored
|
||||||
|
## @param backup.s3Bucket The S3 bucket used for storing backups
|
||||||
## @param backup.schedule Cron schedule for automated backups
|
## @param backup.schedule Cron schedule for automated backups
|
||||||
## @param backup.retentionPolicy The retention policy
|
## @param backup.cleanupStrategy The strategy for cleaning up old backups
|
||||||
## @param backup.destinationPath The path where to store the backup (i.e. s3://bucket/path/to/folder)
|
|
||||||
## @param backup.endpointURL Endpoint to be used to upload data to the cloud
|
|
||||||
## @param backup.s3AccessKey The access key for S3, used for authentication
|
## @param backup.s3AccessKey The access key for S3, used for authentication
|
||||||
## @param backup.s3SecretKey The secret key for S3, used for authentication
|
## @param backup.s3SecretKey The secret key for S3, used for authentication
|
||||||
|
## @param backup.resticPassword The password for Restic backup encryption
|
||||||
backup:
|
backup:
|
||||||
enabled: false
|
enabled: false
|
||||||
retentionPolicy: 30d
|
s3Region: us-east-1
|
||||||
destinationPath: s3://BUCKET_NAME/
|
s3Bucket: s3.example.org/postgres-backups
|
||||||
endpointURL: http://minio-gateway-service:9000
|
schedule: "0 2 * * *"
|
||||||
schedule: "0 2 * * * *"
|
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
|
||||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||||
|
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||||
|
|
||||||
## @section Bootstrap parameters
|
## @param resources Resources
|
||||||
|
|
||||||
## @param bootstrap.enabled Restore cluster from backup
|
|
||||||
## @param bootstrap.recoveryTime Time stamp up to which recovery will proceed, expressed in RFC 3339 format, if empty, will restore latest
|
|
||||||
## @param bootstrap.oldName Name of cluster before deleting
|
|
||||||
##
|
|
||||||
bootstrap:
|
|
||||||
enabled: false
|
|
||||||
# example: 2020-11-26 15:22:00.00000+00
|
|
||||||
recoveryTime: ""
|
|
||||||
oldName: ""
|
|
||||||
|
|
||||||
## @param resources Explicit CPU and memory configuration for each PostgreSQL replica. When left empty, the preset defined in `resourcesPreset` is applied.
|
|
||||||
resources: {}
|
resources: {}
|
||||||
# resources:
|
# resources:
|
||||||
|
# limits:
|
||||||
# cpu: 4000m
|
# cpu: 4000m
|
||||||
# memory: 4Gi
|
# memory: 4Gi
|
||||||
|
# requests:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 512Mi
|
||||||
|
|
||||||
## @param resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.
|
## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||||
resourcesPreset: "micro"
|
resourcesPreset: "nano"
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.8.0
|
version: 0.7.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
|
|||||||
@@ -2,4 +2,3 @@ include ../../../scripts/package.mk
|
|||||||
|
|
||||||
generate:
|
generate:
|
||||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||||
yq -i -o json --indent 4 '.properties.resourcesPreset.enum = ["none", "nano", "micro", "small", "medium", "large", "xlarge", "2xlarge"]' values.schema.json
|
|
||||||
|
|||||||
@@ -23,35 +23,8 @@ The service utilizes official RabbitMQ operator. This ensures the reliability an
|
|||||||
### Configuration parameters
|
### Configuration parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ----------------- | ------------------------------------------------------------------------------------------------------------------------------------- | ------ |
|
| ----------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ |
|
||||||
| `users` | Users configuration | `{}` |
|
| `users` | Users configuration | `{}` |
|
||||||
| `vhosts` | Virtual Hosts configuration | `{}` |
|
| `vhosts` | Virtual Hosts configuration | `{}` |
|
||||||
| `resources` | Explicit CPU and memory configuration for each RabbitMQ replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
|
| `resources` | Resources | `{}` |
|
||||||
| `resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `nano` |
|
| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||||
|
|
||||||
## Parameter examples and reference
|
|
||||||
|
|
||||||
### resources and resourcesPreset
|
|
||||||
|
|
||||||
`resources` sets explicit CPU and memory configurations for each replica.
|
|
||||||
When left empty, the preset defined in `resourcesPreset` is applied.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
resources:
|
|
||||||
cpu: 4000m
|
|
||||||
memory: 4Gi
|
|
||||||
```
|
|
||||||
|
|
||||||
`resourcesPreset` sets named CPU and memory configurations for each replica.
|
|
||||||
This setting is ignored if the corresponding `resources` value is set.
|
|
||||||
|
|
||||||
| Preset name | CPU | memory |
|
|
||||||
|-------------|--------|---------|
|
|
||||||
| `nano` | `100m` | `128Mi` |
|
|
||||||
| `micro` | `250m` | `256Mi` |
|
|
||||||
| `small` | `500m` | `512Mi` |
|
|
||||||
| `medium` | `500m` | `1Gi` |
|
|
||||||
| `large` | `1` | `2Gi` |
|
|
||||||
| `xlarge` | `2` | `4Gi` |
|
|
||||||
| `2xlarge` | `4` | `8Gi` |
|
|
||||||
|
|
||||||
|
|||||||
@@ -11,7 +11,11 @@ spec:
|
|||||||
service:
|
service:
|
||||||
type: LoadBalancer
|
type: LoadBalancer
|
||||||
{{- end }}
|
{{- end }}
|
||||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.resourcesPreset .Values.resources $) | nindent 4 }}
|
{{- if .Values.resources }}
|
||||||
|
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.resources $) | nindent 4 }}
|
||||||
|
{{- else if ne .Values.resourcesPreset "none" }}
|
||||||
|
resources: {{- include "cozy-lib.resources.preset" (list .Values.resourcesPreset $) | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
override:
|
override:
|
||||||
statefulSet:
|
statefulSet:
|
||||||
spec:
|
spec:
|
||||||
|
|||||||
@@ -29,23 +29,13 @@
|
|||||||
},
|
},
|
||||||
"resources": {
|
"resources": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"description": "Explicit CPU and memory configuration for each RabbitMQ replica. When left empty, the preset defined in `resourcesPreset` is applied.",
|
"description": "Resources",
|
||||||
"default": {}
|
"default": {}
|
||||||
},
|
},
|
||||||
"resourcesPreset": {
|
"resourcesPreset": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.",
|
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||||
"default": "nano",
|
"default": "nano"
|
||||||
"enum": [
|
|
||||||
"none",
|
|
||||||
"nano",
|
|
||||||
"micro",
|
|
||||||
"small",
|
|
||||||
"medium",
|
|
||||||
"large",
|
|
||||||
"xlarge",
|
|
||||||
"2xlarge"
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -40,11 +40,15 @@ users: {}
|
|||||||
## - user3
|
## - user3
|
||||||
vhosts: {}
|
vhosts: {}
|
||||||
|
|
||||||
## @param resources Explicit CPU and memory configuration for each RabbitMQ replica. When left empty, the preset defined in `resourcesPreset` is applied.
|
## @param resources Resources
|
||||||
resources: {}
|
resources: {}
|
||||||
# resources:
|
# resources:
|
||||||
|
# limits:
|
||||||
# cpu: 4000m
|
# cpu: 4000m
|
||||||
# memory: 4Gi
|
# memory: 4Gi
|
||||||
|
# requests:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 512Mi
|
||||||
|
|
||||||
## @param resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.
|
## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||||
resourcesPreset: "nano"
|
resourcesPreset: "nano"
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.9.0
|
version: 0.8.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
|
|||||||
@@ -2,4 +2,3 @@ include ../../../scripts/package.mk
|
|||||||
|
|
||||||
generate:
|
generate:
|
||||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||||
yq -i -o json --indent 4 '.properties.resourcesPreset.enum = ["none", "nano", "micro", "small", "medium", "large", "xlarge", "2xlarge"]' values.schema.json
|
|
||||||
|
|||||||
@@ -14,37 +14,13 @@ Service utilizes the Spotahome Redis Operator for efficient management and orche
|
|||||||
### Common parameters
|
### Common parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ----------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
| ----------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||||
| `external` | Enable external access from outside the cluster | `false` |
|
| `external` | Enable external access from outside the cluster | `false` |
|
||||||
| `size` | Persistent Volume size | `1Gi` |
|
| `size` | Persistent Volume size | `1Gi` |
|
||||||
| `replicas` | Number of Redis replicas | `2` |
|
| `replicas` | Number of Redis replicas | `2` |
|
||||||
| `storageClass` | StorageClass used to store the data | `""` |
|
| `storageClass` | StorageClass used to store the data | `""` |
|
||||||
| `authEnabled` | Enable password generation | `true` |
|
| `authEnabled` | Enable password generation | `true` |
|
||||||
| `resources` | Explicit CPU and memory configuration for each Redis replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
|
| `resources` | Resources | `{}` |
|
||||||
| `resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `nano` |
|
| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||||
|
|
||||||
## Parameter examples and reference
|
|
||||||
|
|
||||||
### resources and resourcesPreset
|
|
||||||
|
|
||||||
`resources` sets explicit CPU and memory configurations for each replica.
|
|
||||||
When left empty, the preset defined in `resourcesPreset` is applied.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
resources:
|
|
||||||
cpu: 4000m
|
|
||||||
memory: 4Gi
|
|
||||||
```
|
|
||||||
|
|
||||||
`resourcesPreset` sets named CPU and memory configurations for each replica.
|
|
||||||
This setting is ignored if the corresponding `resources` value is set.
|
|
||||||
|
|
||||||
| Preset name | CPU | memory |
|
|
||||||
|-------------|--------|---------|
|
|
||||||
| `nano` | `250m` | `128Mi` |
|
|
||||||
| `micro` | `500m` | `256Mi` |
|
|
||||||
| `small` | `1` | `512Mi` |
|
|
||||||
| `medium` | `1` | `1Gi` |
|
|
||||||
| `large` | `3` | `2Gi` |
|
|
||||||
| `xlarge` | `4` | `4Gi` |
|
|
||||||
| `2xlarge` | `8` | `8Gi` |
|
|
||||||
|
|||||||
@@ -25,10 +25,18 @@ metadata:
|
|||||||
spec:
|
spec:
|
||||||
sentinel:
|
sentinel:
|
||||||
replicas: 3
|
replicas: 3
|
||||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.resourcesPreset .Values.resources $) | nindent 6 }}
|
{{- if .Values.resources }}
|
||||||
|
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.resources $) | nindent 6 }}
|
||||||
|
{{- else if ne .Values.resourcesPreset "none" }}
|
||||||
|
resources: {{- include "cozy-lib.resources.preset" (list .Values.resourcesPreset $) | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
redis:
|
redis:
|
||||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.resourcesPreset .Values.resources $) | nindent 6 }}
|
|
||||||
replicas: {{ .Values.replicas }}
|
replicas: {{ .Values.replicas }}
|
||||||
|
{{- if .Values.resources }}
|
||||||
|
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.resources $) | nindent 6 }}
|
||||||
|
{{- else if ne .Values.resourcesPreset "none" }}
|
||||||
|
resources: {{- include "cozy-lib.resources.preset" (list .Values.resourcesPreset $) | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
{{- with .Values.size }}
|
{{- with .Values.size }}
|
||||||
storage:
|
storage:
|
||||||
persistentVolumeClaim:
|
persistentVolumeClaim:
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user