mirror of
https://github.com/outbackdingo/cozystack.git
synced 2026-01-28 18:18:41 +00:00
Compare commits
215 Commits
519-cross-
...
v0.33.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
70f8266767 | ||
|
|
a9674d2ae7 | ||
|
|
cb6a55bc4a | ||
|
|
3ecbaf23a4 | ||
|
|
946fad8bb8 | ||
|
|
f1d86e5045 | ||
|
|
9adcd48c44 | ||
|
|
fb82bfae11 | ||
|
|
bd9e283d3b | ||
|
|
d2126b6703 | ||
|
|
73fe621da1 | ||
|
|
0b7bbb1ba9 | ||
|
|
bb46aa4b7d | ||
|
|
6256e40169 | ||
|
|
22cda073b9 | ||
|
|
0d46393e8c | ||
|
|
193f43d7bb | ||
|
|
8ec882ca5f | ||
|
|
c596805b60 | ||
|
|
f891d0bee6 | ||
|
|
1f748d563f | ||
|
|
210f3c7b6b | ||
|
|
433bfe7b6c | ||
|
|
fa6442998a | ||
|
|
6d06d3b1fb | ||
|
|
4c347cc026 | ||
|
|
986de717f1 | ||
|
|
d38c8aa5ab | ||
|
|
7f9f850b47 | ||
|
|
ca772fae2e | ||
|
|
fb831c05c0 | ||
|
|
f7f8020b9b | ||
|
|
98194a7414 | ||
|
|
70c7978306 | ||
|
|
d5521df9bd | ||
|
|
6ed1243f86 | ||
|
|
d1275ecd08 | ||
|
|
6c9d8bb47f | ||
|
|
1f240387f9 | ||
|
|
1d3964352e | ||
|
|
512277fa93 | ||
|
|
cd7fec68fc | ||
|
|
d12d07fd5c | ||
|
|
00bd212886 | ||
|
|
d19d6b58d0 | ||
|
|
f953db50da | ||
|
|
55e11fcc7b | ||
|
|
12184bc2b9 | ||
|
|
39daa3a38a | ||
|
|
a5ff9bf65b | ||
|
|
036fa6f888 | ||
|
|
792f6b4af8 | ||
|
|
52714f5cce | ||
|
|
bc54bd7bb0 | ||
|
|
0b85a52bee | ||
|
|
b3a2bc85e3 | ||
|
|
d097433266 | ||
|
|
2d294f0546 | ||
|
|
78b4d06b25 | ||
|
|
ae90969b7e | ||
|
|
6732205b24 | ||
|
|
60dee45a61 | ||
|
|
70cd3ce3e7 | ||
|
|
9dc21c6c2d | ||
|
|
4648c7b4c1 | ||
|
|
6a080fbf5d | ||
|
|
72f40f32ad | ||
|
|
cfc8c269f3 | ||
|
|
1da45ff039 | ||
|
|
c6ee006d6b | ||
|
|
848abc4bd1 | ||
|
|
4369b03141 | ||
|
|
baefc78bfe | ||
|
|
4f11814551 | ||
|
|
307b5617f0 | ||
|
|
7cf0ce1abf | ||
|
|
5602e9753f | ||
|
|
ab20502b37 | ||
|
|
8369fcddbf | ||
|
|
9f9ca50dd9 | ||
|
|
e7681debe2 | ||
|
|
36b10341ca | ||
|
|
0c234e400b | ||
|
|
c0b7f4e938 | ||
|
|
654778a0c7 | ||
|
|
86fdb51236 | ||
|
|
e8b83fbbda | ||
|
|
29f26f4dd0 | ||
|
|
a0526be17d | ||
|
|
4e41c133b4 | ||
|
|
587904e8cc | ||
|
|
6358fd7a45 | ||
|
|
af595f34dc | ||
|
|
2832058036 | ||
|
|
b9d3b43c3e | ||
|
|
bd0bc64c2a | ||
|
|
2dd62f052e | ||
|
|
778577e0d5 | ||
|
|
8568b9925f | ||
|
|
46ad1b1cd8 | ||
|
|
066ed77918 | ||
|
|
c7be1a5572 | ||
|
|
439e927f6b | ||
|
|
c354d5adc6 | ||
|
|
5ffe11dfc6 | ||
|
|
37a8bfaa06 | ||
|
|
0b03768482 | ||
|
|
620d626887 | ||
|
|
4e2a081c8b | ||
|
|
fa09845ef9 | ||
|
|
a2a79cb5d9 | ||
|
|
7f7cb019e6 | ||
|
|
ba74f397f5 | ||
|
|
7c45335abb | ||
|
|
ae13b58d5f | ||
|
|
3c7f7d1127 | ||
|
|
f0fc3238ca | ||
|
|
b3380d8365 | ||
|
|
d97d6cb81d | ||
|
|
b2a697f98d | ||
|
|
6e6a05d11e | ||
|
|
5d76294ff0 | ||
|
|
62a6da0063 | ||
|
|
6a8530a00a | ||
|
|
b3b40dcf9c | ||
|
|
4479ed5e95 | ||
|
|
b16e73ad42 | ||
|
|
4631f85114 | ||
|
|
746641e523 | ||
|
|
e848dde422 | ||
|
|
3ce6dbe850 | ||
|
|
8d5007919f | ||
|
|
08e569918b | ||
|
|
6498000721 | ||
|
|
8486e6b3aa | ||
|
|
3f6b6798f4 | ||
|
|
c1b928b8ef | ||
|
|
c2e8fba483 | ||
|
|
62cb694d72 | ||
|
|
c619343aa2 | ||
|
|
75ad26989d | ||
|
|
c4fc8c18df | ||
|
|
8663dc940f | ||
|
|
cf983a8f9c | ||
|
|
ad6aa0ca94 | ||
|
|
9dc5d62f47 | ||
|
|
3b8a9f9d2c | ||
|
|
ab9926a177 | ||
|
|
f83741eb09 | ||
|
|
028f2e4e8d | ||
|
|
255fa8cbe1 | ||
|
|
b42f5cdc01 | ||
|
|
74633ad699 | ||
|
|
980185ca2b | ||
|
|
8eabe30548 | ||
|
|
0c9c688e6d | ||
|
|
908c75927e | ||
|
|
0a1f078384 | ||
|
|
6a713e5eb4 | ||
|
|
8f0a28bad5 | ||
|
|
0fa70d9d38 | ||
|
|
b14c82d606 | ||
|
|
8e79f24c5b | ||
|
|
3266a5514e | ||
|
|
0c37323a15 | ||
|
|
10af98e158 | ||
|
|
632224a30a | ||
|
|
e8d11e64a6 | ||
|
|
27c7a2feb5 | ||
|
|
9555386bd7 | ||
|
|
9733de38a3 | ||
|
|
775a05cc3a | ||
|
|
4e5cc2ae61 | ||
|
|
32adf5ab38 | ||
|
|
28302e776e | ||
|
|
911ca64de0 | ||
|
|
045ea76539 | ||
|
|
cee820e82c | ||
|
|
6183b715b7 | ||
|
|
2669ab6072 | ||
|
|
96506c7cce | ||
|
|
7bb70c839e | ||
|
|
ba97a4593c | ||
|
|
c467ed798a | ||
|
|
ed881f0741 | ||
|
|
0e0dabdd08 | ||
|
|
bd8f8bde95 | ||
|
|
646dab497c | ||
|
|
dc3b61d164 | ||
|
|
4479a038cd | ||
|
|
dfd01ff118 | ||
|
|
d2bb66db31 | ||
|
|
7af97e2d9f | ||
|
|
ac5145be87 | ||
|
|
4779db2dda | ||
|
|
25c2774bc8 | ||
|
|
bbee8103eb | ||
|
|
730ea4d5ef | ||
|
|
13fccdc465 | ||
|
|
f1b66c80f6 | ||
|
|
f34f140d49 | ||
|
|
520fbfb2e4 | ||
|
|
25016580c1 | ||
|
|
f10f8455fc | ||
|
|
974581d39b | ||
|
|
7e24297913 | ||
|
|
b6142cd4f5 | ||
|
|
e87994c769 | ||
|
|
b140f1b57f | ||
|
|
64936021d2 | ||
|
|
a887e19e6c | ||
|
|
92b97a569e | ||
|
|
0e22358b30 | ||
|
|
7429daf99c | ||
|
|
fc8b52d73d |
24
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
24
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
<!-- Thank you for making a contribution! Here are some tips for you:
|
||||
- Start the PR title with the [label] of Cozystack component:
|
||||
- For system components: [platform], [system], [linstor], [cilium], [kube-ovn], [dashboard], [cluster-api], etc.
|
||||
- For managed apps: [apps], [tenant], [kubernetes], [postgres], [virtual-machine] etc.
|
||||
- For development and maintenance: [tests], [ci], [docs], [maintenance].
|
||||
- If it's a work in progress, consider creating this PR as a draft.
|
||||
- Don't hesistate to ask for opinion and review in the community chats, even if it's still a draft.
|
||||
- Add the label `backport` if it's a bugfix that needs to be backported to a previous version.
|
||||
-->
|
||||
|
||||
## What this PR does
|
||||
|
||||
|
||||
### Release note
|
||||
|
||||
<!-- Write a release note:
|
||||
- Explain what has changed internally and for users.
|
||||
- Start with the same [label] as in the PR title
|
||||
- Follow the guidelines at https://github.com/kubernetes/community/blob/master/contributors/guide/release-notes.md.
|
||||
-->
|
||||
|
||||
```release-note
|
||||
[]
|
||||
```
|
||||
9
.github/workflows/pre-commit.yml
vendored
9
.github/workflows/pre-commit.yml
vendored
@@ -2,7 +2,7 @@ name: Pre-Commit Checks
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [labeled, opened, synchronize, reopened]
|
||||
types: [opened, synchronize, reopened]
|
||||
|
||||
concurrency:
|
||||
group: pre-commit-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
@@ -30,12 +30,13 @@ jobs:
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install curl -y
|
||||
curl -fsSL https://deb.nodesource.com/setup_16.x | sudo -E bash -
|
||||
sudo apt install nodejs -y
|
||||
git clone https://github.com/bitnami/readme-generator-for-helm
|
||||
sudo apt install npm -y
|
||||
|
||||
git clone --branch 2.7.0 --depth 1 https://github.com/bitnami/readme-generator-for-helm.git
|
||||
cd ./readme-generator-for-helm
|
||||
npm install
|
||||
npm install -g pkg
|
||||
npm install -g @yao-pkg/pkg
|
||||
pkg . -o /usr/local/bin/readme-generator
|
||||
|
||||
- name: Run pre-commit hooks
|
||||
|
||||
95
.github/workflows/pull-requests-release.yaml
vendored
95
.github/workflows/pull-requests-release.yaml
vendored
@@ -1,100 +1,17 @@
|
||||
name: Releasing PR
|
||||
name: "Releasing PR"
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [labeled, opened, synchronize, reopened, closed]
|
||||
types: [closed]
|
||||
paths-ignore:
|
||||
- 'docs/**/*'
|
||||
|
||||
# Cancel in‑flight runs for the same PR when a new push arrives.
|
||||
concurrency:
|
||||
group: pull-requests-release-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
group: pr-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
verify:
|
||||
name: Test Release
|
||||
runs-on: [self-hosted]
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
if: |
|
||||
contains(github.event.pull_request.labels.*.name, 'release') &&
|
||||
github.event.action != 'closed'
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
|
||||
- name: Extract tag from PR branch
|
||||
id: get_tag
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const branch = context.payload.pull_request.head.ref;
|
||||
const m = branch.match(/^release-(\d+\.\d+\.\d+(?:[-\w\.]+)?)$/);
|
||||
if (!m) {
|
||||
core.setFailed(`❌ Branch '${branch}' does not match 'release-X.Y.Z[-suffix]'`);
|
||||
return;
|
||||
}
|
||||
const tag = `v${m[1]}`;
|
||||
core.setOutput('tag', tag);
|
||||
|
||||
- name: Find draft release and get asset IDs
|
||||
id: fetch_assets
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.GH_PAT }}
|
||||
script: |
|
||||
const tag = '${{ steps.get_tag.outputs.tag }}';
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
per_page: 100
|
||||
});
|
||||
const draft = releases.data.find(r => r.tag_name === tag && r.draft);
|
||||
if (!draft) {
|
||||
core.setFailed(`Draft release '${tag}' not found`);
|
||||
return;
|
||||
}
|
||||
const findAssetId = (name) =>
|
||||
draft.assets.find(a => a.name === name)?.id;
|
||||
const installerId = findAssetId("cozystack-installer.yaml");
|
||||
const diskId = findAssetId("nocloud-amd64.raw.xz");
|
||||
if (!installerId || !diskId) {
|
||||
core.setFailed("Missing required assets");
|
||||
return;
|
||||
}
|
||||
core.setOutput("installer_id", installerId);
|
||||
core.setOutput("disk_id", diskId);
|
||||
|
||||
- name: Download assets from GitHub API
|
||||
run: |
|
||||
mkdir -p _out/assets
|
||||
curl -sSL \
|
||||
-H "Authorization: token ${GH_PAT}" \
|
||||
-H "Accept: application/octet-stream" \
|
||||
-o _out/assets/cozystack-installer.yaml \
|
||||
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/assets/${{ steps.fetch_assets.outputs.installer_id }}"
|
||||
curl -sSL \
|
||||
-H "Authorization: token ${GH_PAT}" \
|
||||
-H "Accept: application/octet-stream" \
|
||||
-o _out/assets/nocloud-amd64.raw.xz \
|
||||
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/assets/${{ steps.fetch_assets.outputs.disk_id }}"
|
||||
env:
|
||||
GH_PAT: ${{ secrets.GH_PAT }}
|
||||
|
||||
- name: Run tests
|
||||
run: make test
|
||||
|
||||
finalize:
|
||||
name: Finalize Release
|
||||
runs-on: [self-hosted]
|
||||
|
||||
305
.github/workflows/pull-requests.yaml
vendored
305
.github/workflows/pull-requests.yaml
vendored
@@ -2,10 +2,13 @@ name: Pull Request
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [labeled, opened, synchronize, reopened]
|
||||
types: [opened, synchronize, reopened]
|
||||
paths-ignore:
|
||||
- 'docs/**/*'
|
||||
|
||||
# Cancel in‑flight runs for the same PR when a new push arrives.
|
||||
concurrency:
|
||||
group: pull-requests-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
group: pr-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
@@ -43,6 +46,17 @@ jobs:
|
||||
|
||||
- name: Build Talos image
|
||||
run: make -C packages/core/installer talos-nocloud
|
||||
|
||||
- name: Save git diff as patch
|
||||
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
|
||||
run: git diff HEAD > _out/assets/pr.patch
|
||||
|
||||
- name: Upload git diff patch
|
||||
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: pr-patch
|
||||
path: _out/assets/pr.patch
|
||||
|
||||
- name: Upload installer
|
||||
uses: actions/upload-artifact@v4
|
||||
@@ -55,28 +69,283 @@ jobs:
|
||||
with:
|
||||
name: talos-image
|
||||
path: _out/assets/nocloud-amd64.raw.xz
|
||||
|
||||
test:
|
||||
name: Test
|
||||
runs-on: [self-hosted]
|
||||
needs: build
|
||||
|
||||
# Never run when the PR carries the "release" label.
|
||||
if: |
|
||||
!contains(github.event.pull_request.labels.*.name, 'release')
|
||||
resolve_assets:
|
||||
name: "Resolve assets"
|
||||
runs-on: ubuntu-latest
|
||||
if: contains(github.event.pull_request.labels.*.name, 'release')
|
||||
outputs:
|
||||
installer_id: ${{ steps.fetch_assets.outputs.installer_id }}
|
||||
disk_id: ${{ steps.fetch_assets.outputs.disk_id }}
|
||||
|
||||
steps:
|
||||
- name: Download installer
|
||||
uses: actions/download-artifact@v4
|
||||
- name: Checkout code
|
||||
if: contains(github.event.pull_request.labels.*.name, 'release')
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
name: cozystack-installer
|
||||
path: _out/assets/
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Download Talos image
|
||||
- name: Extract tag from PR branch (release PR)
|
||||
if: contains(github.event.pull_request.labels.*.name, 'release')
|
||||
id: get_tag
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const branch = context.payload.pull_request.head.ref;
|
||||
const m = branch.match(/^release-(\d+\.\d+\.\d+(?:[-\w\.]+)?)$/);
|
||||
if (!m) {
|
||||
core.setFailed(`❌ Branch '${branch}' does not match 'release-X.Y.Z[-suffix]'`);
|
||||
return;
|
||||
}
|
||||
core.setOutput('tag', `v${m[1]}`);
|
||||
|
||||
- name: Find draft release & asset IDs (release PR)
|
||||
if: contains(github.event.pull_request.labels.*.name, 'release')
|
||||
id: fetch_assets
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.GH_PAT }}
|
||||
script: |
|
||||
const tag = '${{ steps.get_tag.outputs.tag }}';
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
per_page: 100
|
||||
});
|
||||
const draft = releases.data.find(r => r.tag_name === tag && r.draft);
|
||||
if (!draft) {
|
||||
core.setFailed(`Draft release '${tag}' not found`);
|
||||
return;
|
||||
}
|
||||
const find = (n) => draft.assets.find(a => a.name === n)?.id;
|
||||
const installerId = find('cozystack-installer.yaml');
|
||||
const diskId = find('nocloud-amd64.raw.xz');
|
||||
if (!installerId || !diskId) {
|
||||
core.setFailed('Required assets missing in draft release');
|
||||
return;
|
||||
}
|
||||
core.setOutput('installer_id', installerId);
|
||||
core.setOutput('disk_id', diskId);
|
||||
|
||||
|
||||
prepare_env:
|
||||
name: "Prepare environment"
|
||||
runs-on: [self-hosted]
|
||||
permissions:
|
||||
contents: read
|
||||
packages: read
|
||||
needs: ["build", "resolve_assets"]
|
||||
if: ${{ always() && (needs.build.result == 'success' || needs.resolve_assets.result == 'success') }}
|
||||
|
||||
steps:
|
||||
# ▸ Checkout and prepare the codebase
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# ▸ Regular PR path – download artefacts produced by the *build* job
|
||||
- name: "Download Talos image (regular PR)"
|
||||
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: talos-image
|
||||
path: _out/assets/
|
||||
path: _out/assets
|
||||
|
||||
- name: Download PR patch
|
||||
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: pr-patch
|
||||
path: _out/assets
|
||||
|
||||
- name: Apply patch
|
||||
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
|
||||
run: |
|
||||
git apply _out/assets/pr.patch
|
||||
|
||||
# ▸ Release PR path – fetch artefacts from the corresponding draft release
|
||||
- name: Download assets from draft release (release PR)
|
||||
if: contains(github.event.pull_request.labels.*.name, 'release')
|
||||
run: |
|
||||
mkdir -p _out/assets
|
||||
curl -sSL -H "Authorization: token ${GH_PAT}" -H "Accept: application/octet-stream" \
|
||||
-o _out/assets/nocloud-amd64.raw.xz \
|
||||
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/assets/${{ needs.resolve_assets.outputs.disk_id }}"
|
||||
env:
|
||||
GH_PAT: ${{ secrets.GH_PAT }}
|
||||
|
||||
- name: Set sandbox ID
|
||||
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
||||
|
||||
# ▸ Start actual job steps
|
||||
- name: Prepare workspace
|
||||
run: |
|
||||
rm -rf /tmp/$SANDBOX_NAME
|
||||
cp -r ${{ github.workspace }} /tmp/$SANDBOX_NAME
|
||||
|
||||
- name: Prepare environment
|
||||
run: |
|
||||
cd /tmp/$SANDBOX_NAME
|
||||
attempt=0
|
||||
until make SANDBOX_NAME=$SANDBOX_NAME prepare-env; do
|
||||
attempt=$((attempt + 1))
|
||||
if [ $attempt -ge 3 ]; then
|
||||
echo "❌ Attempt $attempt failed, exiting..."
|
||||
exit 1
|
||||
fi
|
||||
echo "❌ Attempt $attempt failed, retrying..."
|
||||
done
|
||||
echo "✅ The task completed successfully after $attempt attempts"
|
||||
|
||||
install_cozystack:
|
||||
name: "Install Cozystack"
|
||||
runs-on: [self-hosted]
|
||||
permissions:
|
||||
contents: read
|
||||
packages: read
|
||||
needs: ["prepare_env", "resolve_assets"]
|
||||
if: ${{ always() && needs.prepare_env.result == 'success' }}
|
||||
|
||||
steps:
|
||||
- name: Prepare _out/assets directory
|
||||
run: mkdir -p _out/assets
|
||||
|
||||
# ▸ Regular PR path – download artefacts produced by the *build* job
|
||||
- name: "Download installer (regular PR)"
|
||||
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: cozystack-installer
|
||||
path: _out/assets
|
||||
|
||||
# ▸ Release PR path – fetch artefacts from the corresponding draft release
|
||||
- name: Download assets from draft release (release PR)
|
||||
if: contains(github.event.pull_request.labels.*.name, 'release')
|
||||
run: |
|
||||
mkdir -p _out/assets
|
||||
curl -sSL -H "Authorization: token ${GH_PAT}" -H "Accept: application/octet-stream" \
|
||||
-o _out/assets/cozystack-installer.yaml \
|
||||
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/assets/${{ needs.resolve_assets.outputs.installer_id }}"
|
||||
env:
|
||||
GH_PAT: ${{ secrets.GH_PAT }}
|
||||
|
||||
# ▸ Start actual job steps
|
||||
- name: Set sandbox ID
|
||||
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
||||
|
||||
- name: Sync _out/assets directory
|
||||
run: |
|
||||
mkdir -p /tmp/$SANDBOX_NAME/_out/assets
|
||||
mv _out/assets/* /tmp/$SANDBOX_NAME/_out/assets/
|
||||
|
||||
- name: Install Cozystack into sandbox
|
||||
run: |
|
||||
cd /tmp/$SANDBOX_NAME
|
||||
attempt=0
|
||||
until make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME install-cozystack; do
|
||||
attempt=$((attempt + 1))
|
||||
if [ $attempt -ge 3 ]; then
|
||||
echo "❌ Attempt $attempt failed, exiting..."
|
||||
exit 1
|
||||
fi
|
||||
echo "❌ Attempt $attempt failed, retrying..."
|
||||
done
|
||||
echo "✅ The task completed successfully after $attempt attempts."
|
||||
|
||||
detect_test_matrix:
|
||||
name: "Detect e2e test matrix"
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
matrix: ${{ steps.set.outputs.matrix }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- id: set
|
||||
run: |
|
||||
apps=$(find hack/e2e-apps -maxdepth 1 -mindepth 1 -name '*.bats' | \
|
||||
awk -F/ '{sub(/\..+/, "", $NF); print $NF}' | jq -R . | jq -cs .)
|
||||
echo "matrix={\"app\":$apps}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
test_apps:
|
||||
strategy:
|
||||
matrix: ${{ fromJson(needs.detect_test_matrix.outputs.matrix) }}
|
||||
name: Test ${{ matrix.app }}
|
||||
runs-on: [self-hosted]
|
||||
needs: [install_cozystack,detect_test_matrix]
|
||||
if: ${{ always() && (needs.install_cozystack.result == 'success' && needs.detect_test_matrix.result == 'success') }}
|
||||
|
||||
steps:
|
||||
- name: Set sandbox ID
|
||||
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
||||
|
||||
- name: E2E Apps
|
||||
run: |
|
||||
cd /tmp/$SANDBOX_NAME
|
||||
attempt=0
|
||||
until make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME test-apps-${{ matrix.app }}; do
|
||||
attempt=$((attempt + 1))
|
||||
if [ $attempt -ge 3 ]; then
|
||||
echo "❌ Attempt $attempt failed, exiting..."
|
||||
exit 1
|
||||
fi
|
||||
echo "❌ Attempt $attempt failed, retrying..."
|
||||
done
|
||||
echo "✅ The task completed successfully after $attempt attempts"
|
||||
|
||||
collect_debug_information:
|
||||
name: Collect debug information
|
||||
runs-on: [self-hosted]
|
||||
needs: [test_apps]
|
||||
if: ${{ always() }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set sandbox ID
|
||||
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
||||
|
||||
- name: Collect report
|
||||
run: |
|
||||
cd /tmp/$SANDBOX_NAME
|
||||
make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME collect-report
|
||||
|
||||
- name: Upload cozyreport.tgz
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: cozyreport
|
||||
path: /tmp/${{ env.SANDBOX_NAME }}/_out/cozyreport.tgz
|
||||
|
||||
- name: Collect images list
|
||||
run: |
|
||||
cd /tmp/$SANDBOX_NAME
|
||||
make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME collect-images
|
||||
|
||||
- name: Upload image list
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: image-list
|
||||
path: /tmp/${{ env.SANDBOX_NAME }}/_out/images.txt
|
||||
|
||||
cleanup:
|
||||
name: Tear down environment
|
||||
runs-on: [self-hosted]
|
||||
needs: [collect_debug_information]
|
||||
if: ${{ always() && needs.test_apps.result == 'success' }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Set sandbox ID
|
||||
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
||||
|
||||
- name: Tear down sandbox
|
||||
run: make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME delete
|
||||
|
||||
- name: Remove workspace
|
||||
run: rm -rf /tmp/$SANDBOX_NAME
|
||||
|
||||
|
||||
- name: Test
|
||||
run: make test
|
||||
|
||||
13
.github/workflows/tags.yaml
vendored
13
.github/workflows/tags.yaml
vendored
@@ -112,9 +112,12 @@ jobs:
|
||||
# Commit built artifacts
|
||||
- name: Commit release artifacts
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
env:
|
||||
GH_PAT: ${{ secrets.GH_PAT }}
|
||||
run: |
|
||||
git config user.name "github-actions"
|
||||
git config user.email "github-actions@github.com"
|
||||
git config user.name "cozystack-bot"
|
||||
git config user.email "217169706+cozystack-bot@users.noreply.github.com"
|
||||
git remote set-url origin https://cozystack-bot:${GH_PAT}@github.com/${GITHUB_REPOSITORY}
|
||||
git add .
|
||||
git commit -m "Prepare release ${GITHUB_REF#refs/tags/}" -s || echo "No changes to commit"
|
||||
git push origin HEAD || true
|
||||
@@ -189,7 +192,12 @@ jobs:
|
||||
# Create release-X.Y.Z branch and push (force-update)
|
||||
- name: Create release branch
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
env:
|
||||
GH_PAT: ${{ secrets.GH_PAT }}
|
||||
run: |
|
||||
git config user.name "cozystack-bot"
|
||||
git config user.email "217169706+cozystack-bot@users.noreply.github.com"
|
||||
git remote set-url origin https://cozystack-bot:${GH_PAT}@github.com/${GITHUB_REPOSITORY}
|
||||
BRANCH="release-${GITHUB_REF#refs/tags/v}"
|
||||
git branch -f "$BRANCH"
|
||||
git push -f origin "$BRANCH"
|
||||
@@ -199,6 +207,7 @@ jobs:
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.GH_PAT }}
|
||||
script: |
|
||||
const version = context.ref.replace('refs/tags/v', '');
|
||||
const base = '${{ steps.get_base.outputs.branch }}';
|
||||
|
||||
5
Makefile
5
Makefile
@@ -9,7 +9,6 @@ build-deps:
|
||||
|
||||
build: build-deps
|
||||
make -C packages/apps/http-cache image
|
||||
make -C packages/apps/postgres image
|
||||
make -C packages/apps/mysql image
|
||||
make -C packages/apps/clickhouse image
|
||||
make -C packages/apps/kubernetes image
|
||||
@@ -49,6 +48,10 @@ test:
|
||||
make -C packages/core/testing apply
|
||||
make -C packages/core/testing test
|
||||
|
||||
prepare-env:
|
||||
make -C packages/core/testing apply
|
||||
make -C packages/core/testing prepare-cluster
|
||||
|
||||
generate:
|
||||
hack/update-codegen.sh
|
||||
|
||||
|
||||
12
README.md
12
README.md
@@ -12,11 +12,15 @@
|
||||
|
||||
**Cozystack** is a free PaaS platform and framework for building clouds.
|
||||
|
||||
Cozystack is a [CNCF Sandbox Level Project](https://www.cncf.io/sandbox-projects/) that was originally built and sponsored by [Ænix](https://aenix.io/).
|
||||
|
||||
With Cozystack, you can transform a bunch of servers into an intelligent system with a simple REST API for spawning Kubernetes clusters,
|
||||
Database-as-a-Service, virtual machines, load balancers, HTTP caching services, and other services with ease.
|
||||
|
||||
Use Cozystack to build your own cloud or provide a cost-effective development environment.
|
||||
|
||||

|
||||
|
||||
## Use-Cases
|
||||
|
||||
* [**Using Cozystack to build a public cloud**](https://cozystack.io/docs/guides/use-cases/public-cloud/)
|
||||
@@ -28,9 +32,6 @@ You can use Cozystack as a platform to build a private cloud powered by Infrastr
|
||||
* [**Using Cozystack as a Kubernetes distribution**](https://cozystack.io/docs/guides/use-cases/kubernetes-distribution/)
|
||||
You can use Cozystack as a Kubernetes distribution for Bare Metal
|
||||
|
||||
## Screenshot
|
||||
|
||||

|
||||
|
||||
## Documentation
|
||||
|
||||
@@ -59,7 +60,10 @@ Commits are used to generate the changelog, and their author will be referenced
|
||||
|
||||
If you have **Feature Requests** please use the [Discussion's Feature Request section](https://github.com/cozystack/cozystack/discussions/categories/feature-requests).
|
||||
|
||||
You are welcome to join our weekly community meetings (just add this events to your [Google Calendar](https://calendar.google.com/calendar?cid=ZTQzZDIxZTVjOWI0NWE5NWYyOGM1ZDY0OWMyY2IxZTFmNDMzZTJlNjUzYjU2ZGJiZGE3NGNhMzA2ZjBkMGY2OEBncm91cC5jYWxlbmRhci5nb29nbGUuY29t) or [iCal](https://calendar.google.com/calendar/ical/e43d21e5c9b45a95f28c5d649c2cb1e1f433e2e653b56dbbda74ca306f0d0f68%40group.calendar.google.com/public/basic.ics)) or [Telegram group](https://t.me/cozystack).
|
||||
## Community
|
||||
|
||||
You are welcome to join our [Telegram group](https://t.me/cozystack) and come to our weekly community meetings.
|
||||
Add them to your [Google Calendar](https://calendar.google.com/calendar?cid=ZTQzZDIxZTVjOWI0NWE5NWYyOGM1ZDY0OWMyY2IxZTFmNDMzZTJlNjUzYjU2ZGJiZGE3NGNhMzA2ZjBkMGY2OEBncm91cC5jYWxlbmRhci5nb29nbGUuY29t) or [iCal](https://calendar.google.com/calendar/ical/e43d21e5c9b45a95f28c5d649c2cb1e1f433e2e653b56dbbda74ca306f0d0f68%40group.calendar.google.com/public/basic.ics) for convenience.
|
||||
|
||||
## License
|
||||
|
||||
|
||||
@@ -194,7 +194,15 @@ func main() {
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "Workload")
|
||||
setupLog.Error(err, "unable to create controller", "controller", "TenantHelmReconciler")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&controller.CozystackConfigReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "CozystackConfigReconciler")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
||||
71
docs/changelogs/v0.32.0.md
Normal file
71
docs/changelogs/v0.32.0.md
Normal file
@@ -0,0 +1,71 @@
|
||||
Cozystack v0.32.0 is a significant release that brings new features, key fixes, and updates to underlying components.
|
||||
|
||||
## Major Features and Improvements
|
||||
|
||||
* [platform] Use `cozypkg` instead of Helm (@kvaps in https://github.com/cozystack/cozystack/pull/1057)
|
||||
* [platform] Introduce the HelmRelease reconciler for system components. (@kvaps in https://github.com/cozystack/cozystack/pull/1033)
|
||||
* [kubernetes] Enable using container registry mirrors by tenant Kubernetes clusters. Configure containerd for tenant Kubernetes clusters. (@klinch0 in https://github.com/cozystack/cozystack/pull/979, patched by @lllamnyp in https://github.com/cozystack/cozystack/pull/1032)
|
||||
* [platform] Allow users to specify CPU requests in VCPUs. Use a library chart for resource management. (@lllamnyp in https://github.com/cozystack/cozystack/pull/972 and https://github.com/cozystack/cozystack/pull/1025)
|
||||
* [platform] Annotate all child objects of apps with uniform labels for tracking by WorkloadMonitors. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1018 and https://github.com/cozystack/cozystack/pull/1024)
|
||||
* [platform] Introduce `cluster-domain` option and un-hardcode `cozy.local`. (@kvaps in https://github.com/cozystack/cozystack/pull/1039)
|
||||
* [platform] Get instance type when reconciling WorkloadMonitor (https://github.com/cozystack/cozystack/pull/1030)
|
||||
* [virtual-machine] Add RBAC rules to allow port forwarding in KubeVirt for SSH via `virtctl`. (@mattia-eleuteri in https://github.com/cozystack/cozystack/pull/1027, patched by @klinch0 in https://github.com/cozystack/cozystack/pull/1028)
|
||||
* [monitoring] Add events and audit inputs (@kevin880202 in https://github.com/cozystack/cozystack/pull/948)
|
||||
|
||||
## Security
|
||||
|
||||
* Resolve a security problem that allowed tenant administrator to gain enhanced privileges outside the tenant. (@kvaps in https://github.com/cozystack/cozystack/pull/1062)
|
||||
|
||||
## Fixes
|
||||
|
||||
* [dashboard] Fix a number of issues in the Cozystack Dashboard (@kvaps in https://github.com/cozystack/cozystack/pull/1042)
|
||||
* [kafka] Specify minimal working resource presets. (@kvaps in https://github.com/cozystack/cozystack/pull/1040)
|
||||
* [cilium] Fixed Gateway API manifest. (@zdenekjanda in https://github.com/cozystack/cozystack/pull/1016)
|
||||
* [platform] Fix RBAC for annotating namespaces. (@kvaps in https://github.com/cozystack/cozystack/pull/1031)
|
||||
* [platform] Fix dependencies for paas-hosted bundle. (@kvaps in https://github.com/cozystack/cozystack/pull/1034)
|
||||
* [platform] Reduce system resource consumption by using lesser resource presets for VerticalPodAutoscaler, SeaweedFS, and KubeOVN. (@klinch0 in https://github.com/cozystack/cozystack/pull/1054)
|
||||
* [virtual-machine] Fix handling of cloudinit and ssh-key input for `virtual-machine` and `vm-instance` applications. (@gwynbleidd2106 in https://github.com/cozystack/cozystack/pull/1019 and https://github.com/cozystack/cozystack/pull/1020)
|
||||
* [apps] Fix Clickhouse version parsing. (@kvaps in https://github.com/cozystack/cozystack/commit/28302e776e9d2bb8f424cf467619fa61d71ac49a)
|
||||
* [apps] Add resource quotas for PostgreSQL jobs and fix application readme generation check in CI. (@klinch0 in https://github.com/cozystack/cozystack/pull/1051)
|
||||
* [kube-ovn] Enable database health check. (@kvaps in https://github.com/cozystack/cozystack/pull/1047)
|
||||
* [kubernetes] Fix upstream issue by updating Kubevirt-CCM. (@kvaps in https://github.com/cozystack/cozystack/pull/1052)
|
||||
* [kubernetes] Fix resources and introduce a migration when upgrading tenant Kubernetes to v0.32.4. (@kvaps in https://github.com/cozystack/cozystack/pull/1073)
|
||||
* [cluster-api] Add a missing migration for `capi-providers`. (@kvaps in https://github.com/cozystack/cozystack/pull/1072)
|
||||
|
||||
## Dependencies
|
||||
|
||||
* Introduce cozykpg, update to v1.1.0. (@kvaps in https://github.com/cozystack/cozystack/pull/1057 and https://github.com/cozystack/cozystack/pull/1063)
|
||||
* Update flux-operator to 0.22.0, Flux to 2.6.x. (@kingdonb in https://github.com/cozystack/cozystack/pull/1035)
|
||||
* Update Talos Linux to v1.10.3. (@kvaps in https://github.com/cozystack/cozystack/pull/1006)
|
||||
* Update Cilium to v1.17.4. (@kvaps in https://github.com/cozystack/cozystack/pull/1046)
|
||||
* Update MetalLB to v0.15.2. (@kvaps in https://github.com/cozystack/cozystack/pull/1045)
|
||||
* Update Kube-OVN to v1.13.13. (@kvaps in https://github.com/cozystack/cozystack/pull/1047)
|
||||
|
||||
## Documentation
|
||||
|
||||
* [Oracle Cloud Infrastructure installation guide](https://cozystack.io/docs/operations/talos/installation/oracle-cloud/). (@kvaps, @lllamnyp, and @NickVolynkin in https://github.com/cozystack/website/pull/168)
|
||||
* [Cluster configuration with `talosctl`](https://cozystack.io/docs/operations/talos/configuration/talosctl/). (@NickVolynkin in https://github.com/cozystack/website/pull/211)
|
||||
* [Configuring container registry mirrors for tenant Kubernetes clusters](https://cozystack.io/docs/operations/talos/configuration/air-gapped/#5-configure-container-registry-mirrors-for-tenant-kubernetes). (@klinch0 in https://github.com/cozystack/website/pull/210)
|
||||
* [Explain application management strategies and available versions for managed applications.](https://cozystack.io/docs/guides/applications/). (@NickVolynkin in https://github.com/cozystack/website/pull/219)
|
||||
* [How to clean up etcd state](https://cozystack.io/docs/operations/faq/#how-to-clean-up-etcd-state). (@gwynbleidd2106 in https://github.com/cozystack/website/pull/214)
|
||||
* [State that Cozystack is a CNCF Sandbox project](https://github.com/cozystack/cozystack?tab=readme-ov-file#cozystack). (@NickVolynkin in https://github.com/cozystack/cozystack/pull/1055)
|
||||
|
||||
## Development, Testing, and CI/CD
|
||||
|
||||
* [tests] Add tests for applications `virtual-machine`, `vm-disk`, `vm-instance`, `postgresql`, `mysql`, and `clickhouse`. (@gwynbleidd2106 in https://github.com/cozystack/cozystack/pull/1048, patched by @kvaps in https://github.com/cozystack/cozystack/pull/1074)
|
||||
* [tests] Fix concurrency for the `docker login` action. (@kvaps in https://github.com/cozystack/cozystack/pull/1014)
|
||||
* [tests] Increase QEMU system disk size in tests. (@kvaps in https://github.com/cozystack/cozystack/pull/1011)
|
||||
* [tests] Increase the waiting timeout for VMs in tests. (@kvaps in https://github.com/cozystack/cozystack/pull/1038)
|
||||
* [ci] Separate build and testing jobs in CI. (@kvaps in https://github.com/cozystack/cozystack/pull/1005 and https://github.com/cozystack/cozystack/pull/1010)
|
||||
* [ci] Fix the release assets. (@kvaps in https://github.com/cozystack/cozystack/pull/1006 and https://github.com/cozystack/cozystack/pull/1009)
|
||||
|
||||
## New Contributors
|
||||
|
||||
* @kevin880202 made their first contribution in https://github.com/cozystack/cozystack/pull/948
|
||||
* @mattia-eleuteri made their first contribution in https://github.com/cozystack/cozystack/pull/1027
|
||||
|
||||
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.31.0...v0.32.0
|
||||
|
||||
<!--
|
||||
HEAD https://github.com/cozystack/cozystack/commit/3ce6dbe8
|
||||
-->
|
||||
13
go.mod
13
go.mod
@@ -37,6 +37,7 @@ require (
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fluxcd/pkg/apis/kustomize v1.6.1 // indirect
|
||||
@@ -91,14 +92,14 @@ require (
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/crypto v0.28.0 // indirect
|
||||
golang.org/x/crypto v0.31.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/net v0.30.0 // indirect
|
||||
golang.org/x/net v0.33.0 // indirect
|
||||
golang.org/x/oauth2 v0.23.0 // indirect
|
||||
golang.org/x/sync v0.8.0 // indirect
|
||||
golang.org/x/sys v0.26.0 // indirect
|
||||
golang.org/x/term v0.25.0 // indirect
|
||||
golang.org/x/text v0.19.0 // indirect
|
||||
golang.org/x/sync v0.10.0 // indirect
|
||||
golang.org/x/sys v0.28.0 // indirect
|
||||
golang.org/x/term v0.27.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.7.0 // indirect
|
||||
golang.org/x/tools v0.26.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
|
||||
28
go.sum
28
go.sum
@@ -26,8 +26,8 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k=
|
||||
github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
|
||||
github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
@@ -212,8 +212,8 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
|
||||
golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
|
||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
@@ -222,26 +222,26 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
|
||||
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
|
||||
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
|
||||
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
|
||||
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
|
||||
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
|
||||
golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
|
||||
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
|
||||
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
||||
32
hack/cdi_golden_image_create.sh
Normal file
32
hack/cdi_golden_image_create.sh
Normal file
@@ -0,0 +1,32 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
name="$1"
|
||||
url="$2"
|
||||
|
||||
if [ -z "$name" ] || [ -z "$url" ]; then
|
||||
echo "Usage: <name> <url>"
|
||||
echo "Example: 'ubuntu' 'https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
#### create DV ubuntu source for CDI image cloning
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: cdi.kubevirt.io/v1beta1
|
||||
kind: DataVolume
|
||||
metadata:
|
||||
name: "vm-image-$name"
|
||||
namespace: cozy-public
|
||||
annotations:
|
||||
cdi.kubevirt.io/storage.bind.immediate.requested: "true"
|
||||
spec:
|
||||
source:
|
||||
http:
|
||||
url: "$url"
|
||||
storage:
|
||||
resources:
|
||||
requests:
|
||||
storage: 5Gi
|
||||
storageClassName: replicated
|
||||
EOF
|
||||
8
hack/collect-images.sh
Executable file
8
hack/collect-images.sh
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/bin/sh
|
||||
|
||||
for node in 11 12 13; do
|
||||
talosctl -n 192.168.123.${node} -e 192.168.123.${node} images ls >> images.tmp
|
||||
talosctl -n 192.168.123.${node} -e 192.168.123.${node} images --namespace system ls >> images.tmp
|
||||
done
|
||||
|
||||
while read _ name sha _ ; do echo $sha $name ; done < images.tmp | sort -u > images.txt
|
||||
147
hack/cozyreport.sh
Executable file
147
hack/cozyreport.sh
Executable file
@@ -0,0 +1,147 @@
|
||||
#!/bin/sh
|
||||
REPORT_DATE=$(date +%Y-%m-%d_%H-%M-%S)
|
||||
REPORT_NAME=${1:-cozyreport-$REPORT_DATE}
|
||||
REPORT_PDIR=$(mktemp -d)
|
||||
REPORT_DIR=$REPORT_PDIR/$REPORT_NAME
|
||||
|
||||
# -- check dependencies
|
||||
command -V kubectl >/dev/null || exit $?
|
||||
command -V tar >/dev/null || exit $?
|
||||
|
||||
# -- cozystack module
|
||||
|
||||
echo "Collecting Cozystack information..."
|
||||
mkdir -p $REPORT_DIR/cozystack
|
||||
kubectl get deploy -n cozy-system cozystack -o jsonpath='{.spec.template.spec.containers[0].image}' > $REPORT_DIR/cozystack/image.txt 2>&1
|
||||
kubectl get cm -n cozy-system --no-headers | awk '$1 ~ /^cozystack/' |
|
||||
while read NAME _; do
|
||||
DIR=$REPORT_DIR/cozystack/configs
|
||||
mkdir -p $DIR
|
||||
kubectl get cm -n cozy-system $NAME -o yaml > $DIR/$NAME.yaml 2>&1
|
||||
done
|
||||
|
||||
# -- kubernetes module
|
||||
|
||||
echo "Collecting Kubernetes information..."
|
||||
mkdir -p $REPORT_DIR/kubernetes
|
||||
kubectl version > $REPORT_DIR/kubernetes/version.txt 2>&1
|
||||
|
||||
echo "Collecting nodes..."
|
||||
kubectl get nodes -o wide > $REPORT_DIR/kubernetes/nodes.txt 2>&1
|
||||
kubectl get nodes --no-headers | awk '$2 != "Ready"' |
|
||||
while read NAME _; do
|
||||
DIR=$REPORT_DIR/kubernetes/nodes/$NAME
|
||||
mkdir -p $DIR
|
||||
kubectl get node $NAME -o yaml > $DIR/node.yaml 2>&1
|
||||
kubectl describe node $NAME > $DIR/describe.txt 2>&1
|
||||
done
|
||||
|
||||
echo "Collecting namespaces..."
|
||||
kubectl get ns -o wide > $REPORT_DIR/kubernetes/namespaces.txt 2>&1
|
||||
kubectl get ns --no-headers | awk '$2 != "Active"' |
|
||||
while read NAME _; do
|
||||
DIR=$REPORT_DIR/kubernetes/namespaces/$NAME
|
||||
mkdir -p $DIR
|
||||
kubectl get ns $NAME -o yaml > $DIR/namespace.yaml 2>&1
|
||||
kubectl describe ns $NAME > $DIR/describe.txt 2>&1
|
||||
done
|
||||
|
||||
echo "Collecting helmreleases..."
|
||||
kubectl get hr -A > $REPORT_DIR/kubernetes/helmreleases.txt 2>&1
|
||||
kubectl get hr -A | awk '$4 != "True"' | \
|
||||
while read NAMESPACE NAME _; do
|
||||
DIR=$REPORT_DIR/kubernetes/helmreleases/$NAMESPACE/$NAME
|
||||
mkdir -p $DIR
|
||||
kubectl get hr -n $NAMESPACE $NAME -o yaml > $DIR/hr.yaml 2>&1
|
||||
kubectl describe hr -n $NAMESPACE $NAME > $DIR/describe.txt 2>&1
|
||||
done
|
||||
|
||||
echo "Collecting pods..."
|
||||
kubectl get pod -A -o wide > $REPORT_DIR/kubernetes/pods.txt 2>&1
|
||||
kubectl get pod -A --no-headers | awk '$4 !~ /Running|Succeeded|Completed/' |
|
||||
while read NAMESPACE NAME _ STATE _; do
|
||||
DIR=$REPORT_DIR/kubernetes/pods/$NAMESPACE/$NAME
|
||||
mkdir -p $DIR
|
||||
CONTAINERS=$(kubectl get pod -o jsonpath='{.spec.containers[*].name}' -n $NAMESPACE $NAME)
|
||||
kubectl get pod -n $NAMESPACE $NAME -o yaml > $DIR/pod.yaml 2>&1
|
||||
kubectl describe pod -n $NAMESPACE $NAME > $DIR/describe.txt 2>&1
|
||||
if [ "$STATE" != "Pending" ]; then
|
||||
for CONTAINER in $CONTAINERS; do
|
||||
kubectl logs -n $NAMESPACE $NAME $CONTAINER > $DIR/logs-$CONTAINER.txt 2>&1
|
||||
kubectl logs -n $NAMESPACE $NAME $CONTAINER --previous > $DIR/logs-$CONTAINER-previous.txt 2>&1
|
||||
done
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Collecting virtualmachines..."
|
||||
kubectl get vm -A > $REPORT_DIR/kubernetes/vms.txt 2>&1
|
||||
kubectl get vm -A --no-headers | awk '$5 != "True"' |
|
||||
while read NAMESPACE NAME _; do
|
||||
DIR=$REPORT_DIR/kubernetes/vm/$NAMESPACE/$NAME
|
||||
mkdir -p $DIR
|
||||
kubectl get vm -n $NAMESPACE $NAME -o yaml > $DIR/vm.yaml 2>&1
|
||||
kubectl describe vm -n $NAMESPACE $NAME > $DIR/describe.txt 2>&1
|
||||
done
|
||||
|
||||
echo "Collecting virtualmachine instances..."
|
||||
kubectl get vmi -A > $REPORT_DIR/kubernetes/vmis.txt 2>&1
|
||||
kubectl get vmi -A --no-headers | awk '$4 != "Running"' |
|
||||
while read NAMESPACE NAME _; do
|
||||
DIR=$REPORT_DIR/kubernetes/vmi/$NAMESPACE/$NAME
|
||||
mkdir -p $DIR
|
||||
kubectl get vmi -n $NAMESPACE $NAME -o yaml > $DIR/vmi.yaml 2>&1
|
||||
kubectl describe vmi -n $NAMESPACE $NAME > $DIR/describe.txt 2>&1
|
||||
done
|
||||
|
||||
echo "Collecting services..."
|
||||
kubectl get svc -A > $REPORT_DIR/kubernetes/services.txt 2>&1
|
||||
kubectl get svc -A --no-headers | awk '$4 == "<pending>"' |
|
||||
while read NAMESPACE NAME _; do
|
||||
DIR=$REPORT_DIR/kubernetes/services/$NAMESPACE/$NAME
|
||||
mkdir -p $DIR
|
||||
kubectl get svc -n $NAMESPACE $NAME -o yaml > $DIR/service.yaml 2>&1
|
||||
kubectl describe svc -n $NAMESPACE $NAME > $DIR/describe.txt 2>&1
|
||||
done
|
||||
|
||||
echo "Collecting pvcs..."
|
||||
kubectl get pvc -A > $REPORT_DIR/kubernetes/pvcs.txt 2>&1
|
||||
kubectl get pvc -A | awk '$3 != "Bound"' |
|
||||
while read NAMESPACE NAME _; do
|
||||
DIR=$REPORT_DIR/kubernetes/pvc/$NAMESPACE/$NAME
|
||||
mkdir -p $DIR
|
||||
kubectl get pvc -n $NAMESPACE $NAME -o yaml > $DIR/pvc.yaml 2>&1
|
||||
kubectl describe pvc -n $NAMESPACE $NAME > $DIR/describe.txt 2>&1
|
||||
done
|
||||
|
||||
# -- kamaji module
|
||||
|
||||
if kubectl get deploy -n cozy-linstor linstor-controller >/dev/null 2>&1; then
|
||||
echo "Collecting kamaji resources..."
|
||||
DIR=$REPORT_DIR/kamaji
|
||||
mkdir -p $DIR
|
||||
kubectl logs -n cozy-kamaji deployment/kamaji > $DIR/kamaji-controller.log 2>&1
|
||||
kubectl get kamajicontrolplanes.controlplane.cluster.x-k8s.io -A > $DIR/kamajicontrolplanes.txt 2>&1
|
||||
kubectl get kamajicontrolplanes.controlplane.cluster.x-k8s.io -A -o yaml > $DIR/kamajicontrolplanes.yaml 2>&1
|
||||
kubectl get tenantcontrolplanes.kamaji.clastix.io -A > $DIR/tenantcontrolplanes.txt 2>&1
|
||||
kubectl get tenantcontrolplanes.kamaji.clastix.io -A -o yaml > $DIR/tenantcontrolplanes.yaml 2>&1
|
||||
fi
|
||||
|
||||
# -- linstor module
|
||||
|
||||
if kubectl get deploy -n cozy-linstor linstor-controller >/dev/null 2>&1; then
|
||||
echo "Collecting linstor resources..."
|
||||
DIR=$REPORT_DIR/linstor
|
||||
mkdir -p $DIR
|
||||
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor --no-color n l > $DIR/nodes.txt 2>&1
|
||||
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor --no-color sp l > $DIR/storage-pools.txt 2>&1
|
||||
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor --no-color r l > $DIR/resources.txt 2>&1
|
||||
fi
|
||||
|
||||
# -- finalization
|
||||
|
||||
echo "Creating archive..."
|
||||
tar -czf $REPORT_NAME.tgz -C $REPORT_PDIR .
|
||||
echo "Report created: $REPORT_NAME.tgz"
|
||||
|
||||
echo "Cleaning up..."
|
||||
rm -rf $REPORT_PDIR
|
||||
41
hack/e2e-apps/clickhouse.bats
Normal file
41
hack/e2e-apps/clickhouse.bats
Normal file
@@ -0,0 +1,41 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
@test "Create DB ClickHouse" {
|
||||
name='test'
|
||||
kubectl apply -f- <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: ClickHouse
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
size: 10Gi
|
||||
logStorageSize: 2Gi
|
||||
shards: 1
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
logTTL: 15
|
||||
users:
|
||||
testuser:
|
||||
password: xai7Wepo
|
||||
backup:
|
||||
enabled: false
|
||||
s3Region: us-east-1
|
||||
s3Bucket: s3.example.org/clickhouse-backups
|
||||
schedule: "0 2 * * *"
|
||||
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr clickhouse-$name --timeout=20s --for=condition=ready
|
||||
timeout 180 sh -ec "until kubectl -n tenant-test get svc chendpoint-clickhouse-$name -o jsonpath='{.spec.ports[*].port}' | grep -q '8123 9000'; do sleep 10; done"
|
||||
kubectl -n tenant-test wait statefulset.apps/chi-clickhouse-$name-clickhouse-0-0 --timeout=120s --for=jsonpath='{.status.replicas}'=1
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints chi-clickhouse-$name-clickhouse-0-0 -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
timeout 100 sh -ec "until kubectl -n tenant-test get svc chi-clickhouse-$name-clickhouse-0-0 -o jsonpath='{.spec.ports[*].port}' | grep -q '9000 8123 9009'; do sleep 10; done"
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get sts chi-clickhouse-$name-clickhouse-0-1 ; do sleep 10; done"
|
||||
kubectl -n tenant-test wait statefulset.apps/chi-clickhouse-$name-clickhouse-0-1 --timeout=140s --for=jsonpath='{.status.replicas}'=1
|
||||
}
|
||||
51
hack/e2e-apps/kafka.bats
Normal file
51
hack/e2e-apps/kafka.bats
Normal file
@@ -0,0 +1,51 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
@test "Create Kafka" {
|
||||
name='test'
|
||||
kubectl apply -f- <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Kafka
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
external: false
|
||||
kafka:
|
||||
size: 10Gi
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
zookeeper:
|
||||
size: 5Gi
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
resources:
|
||||
resourcesPreset: "nano"
|
||||
topics:
|
||||
- name: testResults
|
||||
partitions: 1
|
||||
replicas: 2
|
||||
config:
|
||||
min.insync.replicas: 2
|
||||
- name: testOrders
|
||||
config:
|
||||
cleanup.policy: compact
|
||||
segment.ms: 3600000
|
||||
max.compaction.lag.ms: 5400000
|
||||
min.insync.replicas: 2
|
||||
partitions: 1
|
||||
replicas: 2
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr kafka-$name --timeout=30s --for=condition=ready
|
||||
kubectl wait kafkas -n tenant-test test --timeout=60s --for=condition=ready
|
||||
timeout 60 sh -ec "until kubectl -n tenant-test get pvc data-kafka-$name-zookeeper-0; do sleep 10; done"
|
||||
kubectl -n tenant-test wait pvc data-kafka-$name-zookeeper-0 --timeout=50s --for=jsonpath='{.status.phase}'=Bound
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc kafka-$name-zookeeper-client -o jsonpath='{.spec.ports[0].port}' | grep -q '2181'; do sleep 10; done"
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc kafka-$name-zookeeper-nodes -o jsonpath='{.spec.ports[*].port}' | grep -q '2181 2888 3888'; do sleep 10; done"
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints kafka-$name-zookeeper-nodes -o jsonpath='{.subsets[*].addresses[0].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test delete kafka.apps.cozystack.io $name
|
||||
kubectl -n tenant-test delete pvc data-kafka-$name-zookeeper-0
|
||||
kubectl -n tenant-test delete pvc data-kafka-$name-zookeeper-1
|
||||
}
|
||||
28
hack/e2e-apps.bats → hack/e2e-apps/kubernetes.bats
Executable file → Normal file
28
hack/e2e-apps.bats → hack/e2e-apps/kubernetes.bats
Executable file → Normal file
@@ -1,30 +1,7 @@
|
||||
#!/usr/bin/env bats
|
||||
# -----------------------------------------------------------------------------
|
||||
# Cozystack end‑to‑end provisioning test (Bats)
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
@test "Create tenant with isolated mode enabled" {
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: test
|
||||
namespace: tenant-root
|
||||
spec:
|
||||
etcd: false
|
||||
host: ""
|
||||
ingress: false
|
||||
isolated: true
|
||||
monitoring: false
|
||||
resourceQuotas: {}
|
||||
seaweedfs: false
|
||||
EOF
|
||||
kubectl wait hr/tenant-test -n tenant-root --timeout=1m --for=condition=ready
|
||||
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
|
||||
}
|
||||
|
||||
@test "Create a tenant Kubernetes control plane" {
|
||||
kubectl create -f - <<EOF
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Kubernetes
|
||||
metadata:
|
||||
@@ -90,5 +67,6 @@ EOF
|
||||
kubectl wait tcp -n tenant-test kubernetes-test --timeout=2m --for=jsonpath='{.status.kubernetesResources.version.status}'=Ready
|
||||
kubectl wait deploy --timeout=4m --for=condition=available -n tenant-test kubernetes-test kubernetes-test-cluster-autoscaler kubernetes-test-kccm kubernetes-test-kcsi-controller
|
||||
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=1m --for=jsonpath='{.status.replicas}'=2
|
||||
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=5m --for=jsonpath='{.status.v1beta2.readyReplicas}'=2
|
||||
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=10m --for=jsonpath='{.status.v1beta2.readyReplicas}'=2
|
||||
kubectl -n tenant-test delete kuberneteses.apps.cozystack.io test
|
||||
}
|
||||
46
hack/e2e-apps/mysql.bats
Normal file
46
hack/e2e-apps/mysql.bats
Normal file
@@ -0,0 +1,46 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
@test "Create DB MySQL" {
|
||||
name='test'
|
||||
kubectl apply -f- <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: MySQL
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
external: false
|
||||
size: 10Gi
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
users:
|
||||
testuser:
|
||||
maxUserConnections: 1000
|
||||
password: xai7Wepo
|
||||
databases:
|
||||
testdb:
|
||||
roles:
|
||||
admin:
|
||||
- testuser
|
||||
backup:
|
||||
enabled: false
|
||||
s3Region: us-east-1
|
||||
s3Bucket: s3.example.org/postgres-backups
|
||||
schedule: "0 2 * * *"
|
||||
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr mysql-$name --timeout=30s --for=condition=ready
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get svc mysql-$name -o jsonpath='{.spec.ports[0].port}' | grep -q '3306'; do sleep 10; done"
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints mysql-$name -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test wait statefulset.apps/mysql-$name --timeout=110s --for=jsonpath='{.status.replicas}'=2
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get svc mysql-$name-metrics -o jsonpath='{.spec.ports[0].port}' | grep -q '9104'; do sleep 10; done"
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get endpoints mysql-$name-metrics -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test wait deployment.apps/mysql-$name-metrics --timeout=90s --for=jsonpath='{.status.replicas}'=1
|
||||
kubectl -n tenant-test delete mysqls.apps.cozystack.io $name
|
||||
}
|
||||
54
hack/e2e-apps/postgres.bats
Normal file
54
hack/e2e-apps/postgres.bats
Normal file
@@ -0,0 +1,54 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
@test "Create DB PostgreSQL" {
|
||||
name='test'
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Postgres
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
external: false
|
||||
size: 10Gi
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
postgresql:
|
||||
parameters:
|
||||
max_connections: 100
|
||||
quorum:
|
||||
minSyncReplicas: 0
|
||||
maxSyncReplicas: 0
|
||||
users:
|
||||
testuser:
|
||||
password: xai7Wepo
|
||||
databases:
|
||||
testdb:
|
||||
roles:
|
||||
admin:
|
||||
- testuser
|
||||
backup:
|
||||
enabled: false
|
||||
s3Region: us-east-1
|
||||
s3Bucket: s3.example.org/postgres-backups
|
||||
schedule: "0 2 * * *"
|
||||
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr postgres-$name --timeout=100s --for=condition=ready
|
||||
kubectl -n tenant-test wait job.batch postgres-$name-init-job --timeout=50s --for=condition=Complete
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-r -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-ro -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-rw -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
||||
timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-r -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
# for some reason it takes longer for the read-only endpoint to be ready
|
||||
#timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-ro -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-rw -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test delete postgreses.apps.cozystack.io $name
|
||||
kubectl -n tenant-test delete job.batch/postgres-$name-init-job
|
||||
}
|
||||
26
hack/e2e-apps/redis.bats
Normal file
26
hack/e2e-apps/redis.bats
Normal file
@@ -0,0 +1,26 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
@test "Create Redis" {
|
||||
name='test'
|
||||
kubectl apply -f- <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Redis
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
external: false
|
||||
size: 1Gi
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
authEnabled: true
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr redis-$name --timeout=20s --for=condition=ready
|
||||
kubectl -n tenant-test wait pvc redisfailover-persistent-data-rfr-redis-$name-0 --timeout=50s --for=jsonpath='{.status.phase}'=Bound
|
||||
kubectl -n tenant-test wait deploy rfs-redis-$name --timeout=90s --for=condition=available
|
||||
kubectl -n tenant-test wait sts rfr-redis-$name --timeout=90s --for=jsonpath='{.status.replicas}'=2
|
||||
kubectl -n tenant-test delete redis.apps.cozystack.io $name
|
||||
}
|
||||
47
hack/e2e-apps/virtualmachine.bats
Normal file
47
hack/e2e-apps/virtualmachine.bats
Normal file
@@ -0,0 +1,47 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
@test "Create a Virtual Machine" {
|
||||
name='test'
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: VirtualMachine
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
external: false
|
||||
externalMethod: PortList
|
||||
externalPorts:
|
||||
- 22
|
||||
instanceType: "u1.medium"
|
||||
instanceProfile: ubuntu
|
||||
systemDisk:
|
||||
image: ubuntu
|
||||
storage: 5Gi
|
||||
storageClass: replicated
|
||||
gpus: []
|
||||
resources:
|
||||
cpu: ""
|
||||
memory: ""
|
||||
sshKeys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF
|
||||
test@test
|
||||
cloudInit: |
|
||||
#cloud-config
|
||||
users:
|
||||
- name: test
|
||||
shell: /bin/bash
|
||||
sudo: ['ALL=(ALL) NOPASSWD: ALL']
|
||||
groups: sudo
|
||||
ssh_authorized_keys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF test@test
|
||||
cloudInitSeed: ""
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr virtual-machine-$name --timeout=10s --for=condition=ready
|
||||
kubectl -n tenant-test wait dv virtual-machine-$name --timeout=150s --for=condition=ready
|
||||
kubectl -n tenant-test wait pvc virtual-machine-$name --timeout=100s --for=jsonpath='{.status.phase}'=Bound
|
||||
kubectl -n tenant-test wait vm virtual-machine-$name --timeout=100s --for=condition=ready
|
||||
timeout 120 sh -ec "until kubectl -n tenant-test get vmi virtual-machine-$name -o jsonpath='{.status.interfaces[0].ipAddress}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test delete virtualmachines.apps.cozystack.io $name
|
||||
}
|
||||
68
hack/e2e-apps/vminstance.bats
Normal file
68
hack/e2e-apps/vminstance.bats
Normal file
@@ -0,0 +1,68 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
@test "Create a VM Disk" {
|
||||
name='test'
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: VMDisk
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
source:
|
||||
http:
|
||||
url: https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img
|
||||
optical: false
|
||||
storage: 5Gi
|
||||
storageClass: replicated
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr vm-disk-$name --timeout=5s --for=condition=ready
|
||||
kubectl -n tenant-test wait dv vm-disk-$name --timeout=150s --for=condition=ready
|
||||
kubectl -n tenant-test wait pvc vm-disk-$name --timeout=100s --for=jsonpath='{.status.phase}'=Bound
|
||||
}
|
||||
|
||||
@test "Create a VM Instance" {
|
||||
diskName='test'
|
||||
name='test'
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: VMInstance
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
external: false
|
||||
externalMethod: PortList
|
||||
externalPorts:
|
||||
- 22
|
||||
running: true
|
||||
instanceType: "u1.medium"
|
||||
instanceProfile: ubuntu
|
||||
disks:
|
||||
- name: $diskName
|
||||
gpus: []
|
||||
resources:
|
||||
cpu: ""
|
||||
memory: ""
|
||||
sshKeys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF
|
||||
test@test
|
||||
cloudInit: |
|
||||
#cloud-config
|
||||
users:
|
||||
- name: test
|
||||
shell: /bin/bash
|
||||
sudo: ['ALL=(ALL) NOPASSWD: ALL']
|
||||
groups: sudo
|
||||
ssh_authorized_keys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF test@test
|
||||
cloudInitSeed: ""
|
||||
EOF
|
||||
sleep 5
|
||||
timeout 20 sh -ec "until kubectl -n tenant-test get vmi vm-instance-$name -o jsonpath='{.status.interfaces[0].ipAddress}' | grep -q '[0-9]'; do sleep 5; done"
|
||||
kubectl -n tenant-test wait hr vm-instance-$name --timeout=5s --for=condition=ready
|
||||
kubectl -n tenant-test wait vm vm-instance-$name --timeout=20s --for=condition=ready
|
||||
kubectl -n tenant-test delete vminstances.apps.cozystack.io $name
|
||||
kubectl -n tenant-test delete vmdisks.apps.cozystack.io $diskName
|
||||
}
|
||||
260
hack/e2e-cluster.bats → hack/e2e-install-cozystack.bats
Executable file → Normal file
260
hack/e2e-cluster.bats → hack/e2e-install-cozystack.bats
Executable file → Normal file
@@ -1,237 +1,10 @@
|
||||
#!/usr/bin/env bats
|
||||
# -----------------------------------------------------------------------------
|
||||
# Cozystack end‑to‑end provisioning test (Bats)
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
@test "Required installer assets exist" {
|
||||
if [ ! -f _out/assets/cozystack-installer.yaml ]; then
|
||||
echo "Missing: _out/assets/cozystack-installer.yaml" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f _out/assets/nocloud-amd64.raw.xz ]; then
|
||||
echo "Missing: _out/assets/nocloud-amd64.raw.xz" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
@test "IPv4 forwarding is enabled" {
|
||||
if [ "$(cat /proc/sys/net/ipv4/ip_forward)" != 1 ]; then
|
||||
echo "IPv4 forwarding is disabled!" >&2
|
||||
echo >&2
|
||||
echo "Enable it with:" >&2
|
||||
echo " echo 1 > /proc/sys/net/ipv4/ip_forward" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
@test "Clean previous VMs" {
|
||||
kill $(cat srv1/qemu.pid srv2/qemu.pid srv3/qemu.pid 2>/dev/null) 2>/dev/null || true
|
||||
rm -rf srv1 srv2 srv3
|
||||
}
|
||||
|
||||
@test "Prepare networking and masquerading" {
|
||||
ip link del cozy-br0 2>/dev/null || true
|
||||
ip link add cozy-br0 type bridge
|
||||
ip link set cozy-br0 up
|
||||
ip address add 192.168.123.1/24 dev cozy-br0
|
||||
|
||||
# Masquerading rule – idempotent (delete first, then add)
|
||||
iptables -t nat -D POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE 2>/dev/null || true
|
||||
iptables -t nat -A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
|
||||
}
|
||||
|
||||
@test "Prepare cloud‑init drive for VMs" {
|
||||
mkdir -p srv1 srv2 srv3
|
||||
|
||||
# Generate cloud‑init ISOs
|
||||
for i in 1 2 3; do
|
||||
echo "hostname: srv${i}" > "srv${i}/meta-data"
|
||||
|
||||
cat > "srv${i}/user-data" <<'EOF'
|
||||
#cloud-config
|
||||
EOF
|
||||
|
||||
cat > "srv${i}/network-config" <<EOF
|
||||
version: 2
|
||||
ethernets:
|
||||
eth0:
|
||||
dhcp4: false
|
||||
addresses:
|
||||
- "192.168.123.1${i}/26"
|
||||
gateway4: "192.168.123.1"
|
||||
nameservers:
|
||||
search: [cluster.local]
|
||||
addresses: [8.8.8.8]
|
||||
EOF
|
||||
|
||||
( cd "srv${i}" && genisoimage \
|
||||
-output seed.img \
|
||||
-volid cidata -rational-rock -joliet \
|
||||
user-data meta-data network-config )
|
||||
done
|
||||
}
|
||||
|
||||
@test "Use Talos NoCloud image from assets" {
|
||||
if [ ! -f _out/assets/nocloud-amd64.raw.xz ]; then
|
||||
echo "Missing _out/assets/nocloud-amd64.raw.xz" 2>&1
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm -f nocloud-amd64.raw
|
||||
cp _out/assets/nocloud-amd64.raw.xz .
|
||||
xz --decompress nocloud-amd64.raw.xz
|
||||
}
|
||||
|
||||
@test "Prepare VM disks" {
|
||||
for i in 1 2 3; do
|
||||
cp nocloud-amd64.raw srv${i}/system.img
|
||||
qemu-img resize srv${i}/system.img 50G
|
||||
qemu-img create srv${i}/data.img 100G
|
||||
done
|
||||
}
|
||||
|
||||
@test "Create tap devices" {
|
||||
for i in 1 2 3; do
|
||||
ip link del cozy-srv${i} 2>/dev/null || true
|
||||
ip tuntap add dev cozy-srv${i} mode tap
|
||||
ip link set cozy-srv${i} up
|
||||
ip link set cozy-srv${i} master cozy-br0
|
||||
done
|
||||
}
|
||||
|
||||
@test "Boot QEMU VMs" {
|
||||
for i in 1 2 3; do
|
||||
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 8 -m 16384 \
|
||||
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5${i} \
|
||||
-netdev tap,id=net0,ifname=cozy-srv${i},script=no,downscript=no \
|
||||
-drive file=srv${i}/system.img,if=virtio,format=raw \
|
||||
-drive file=srv${i}/seed.img,if=virtio,format=raw \
|
||||
-drive file=srv${i}/data.img,if=virtio,format=raw \
|
||||
-display none -daemonize -pidfile srv${i}/qemu.pid
|
||||
done
|
||||
|
||||
# Give qemu a few seconds to start up networking
|
||||
sleep 5
|
||||
}
|
||||
|
||||
@test "Wait until Talos API port 50000 is reachable on all machines" {
|
||||
timeout 60 sh -ec 'until nc -nz 192.168.123.11 50000 && nc -nz 192.168.123.12 50000 && nc -nz 192.168.123.13 50000; do sleep 1; done'
|
||||
}
|
||||
|
||||
@test "Generate Talos cluster configuration" {
|
||||
# Cluster‑wide patches
|
||||
cat > patch.yaml <<'EOF'
|
||||
machine:
|
||||
kubelet:
|
||||
nodeIP:
|
||||
validSubnets:
|
||||
- 192.168.123.0/24
|
||||
extraConfig:
|
||||
maxPods: 512
|
||||
kernel:
|
||||
modules:
|
||||
- name: openvswitch
|
||||
- name: drbd
|
||||
parameters:
|
||||
- usermode_helper=disabled
|
||||
- name: zfs
|
||||
- name: spl
|
||||
registries:
|
||||
mirrors:
|
||||
docker.io:
|
||||
endpoints:
|
||||
- https://mirror.gcr.io
|
||||
files:
|
||||
- content: |
|
||||
[plugins]
|
||||
[plugins."io.containerd.cri.v1.runtime"]
|
||||
device_ownership_from_security_context = true
|
||||
path: /etc/cri/conf.d/20-customization.part
|
||||
op: create
|
||||
|
||||
cluster:
|
||||
apiServer:
|
||||
extraArgs:
|
||||
oidc-issuer-url: "https://keycloak.example.org/realms/cozy"
|
||||
oidc-client-id: "kubernetes"
|
||||
oidc-username-claim: "preferred_username"
|
||||
oidc-groups-claim: "groups"
|
||||
network:
|
||||
cni:
|
||||
name: none
|
||||
dnsDomain: cozy.local
|
||||
podSubnets:
|
||||
- 10.244.0.0/16
|
||||
serviceSubnets:
|
||||
- 10.96.0.0/16
|
||||
EOF
|
||||
|
||||
# Control‑plane‑only patches
|
||||
cat > patch-controlplane.yaml <<'EOF'
|
||||
machine:
|
||||
nodeLabels:
|
||||
node.kubernetes.io/exclude-from-external-load-balancers:
|
||||
$patch: delete
|
||||
network:
|
||||
interfaces:
|
||||
- interface: eth0
|
||||
vip:
|
||||
ip: 192.168.123.10
|
||||
cluster:
|
||||
allowSchedulingOnControlPlanes: true
|
||||
controllerManager:
|
||||
extraArgs:
|
||||
bind-address: 0.0.0.0
|
||||
scheduler:
|
||||
extraArgs:
|
||||
bind-address: 0.0.0.0
|
||||
apiServer:
|
||||
certSANs:
|
||||
- 127.0.0.1
|
||||
proxy:
|
||||
disabled: true
|
||||
discovery:
|
||||
enabled: false
|
||||
etcd:
|
||||
advertisedSubnets:
|
||||
- 192.168.123.0/24
|
||||
EOF
|
||||
|
||||
# Generate secrets once
|
||||
if [ ! -f secrets.yaml ]; then
|
||||
talosctl gen secrets
|
||||
fi
|
||||
|
||||
rm -f controlplane.yaml worker.yaml talosconfig kubeconfig
|
||||
talosctl gen config --with-secrets secrets.yaml cozystack https://192.168.123.10:6443 \
|
||||
--config-patch=@patch.yaml --config-patch-control-plane @patch-controlplane.yaml
|
||||
}
|
||||
|
||||
@test "Apply Talos configuration to the node" {
|
||||
# Apply the configuration to all three nodes
|
||||
for node in 11 12 13; do
|
||||
talosctl apply -f controlplane.yaml -n 192.168.123.${node} -e 192.168.123.${node} -i
|
||||
done
|
||||
|
||||
# Wait for Talos services to come up again
|
||||
timeout 60 sh -ec 'until nc -nz 192.168.123.11 50000 && nc -nz 192.168.123.12 50000 && nc -nz 192.168.123.13 50000; do sleep 1; done'
|
||||
}
|
||||
|
||||
@test "Bootstrap Talos cluster" {
|
||||
# Bootstrap etcd on the first node
|
||||
timeout 10 sh -ec 'until talosctl bootstrap -n 192.168.123.11 -e 192.168.123.11; do sleep 1; done'
|
||||
|
||||
# Wait until etcd is healthy
|
||||
timeout 180 sh -ec 'until talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 >/dev/null 2>&1; do sleep 1; done'
|
||||
timeout 60 sh -ec 'while talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1 | grep -q "rpc error"; do sleep 1; done'
|
||||
|
||||
# Retrieve kubeconfig
|
||||
rm -f kubeconfig
|
||||
talosctl kubeconfig kubeconfig -e 192.168.123.10 -n 192.168.123.10
|
||||
|
||||
# Wait until all three nodes register in Kubernetes
|
||||
timeout 60 sh -ec 'until [ $(kubectl get node --no-headers | wc -l) -eq 3 ]; do sleep 1; done'
|
||||
}
|
||||
|
||||
@test "Install Cozystack" {
|
||||
@@ -254,14 +27,14 @@ EOF
|
||||
kubectl wait deployment/cozystack -n cozy-system --timeout=1m --for=condition=Available
|
||||
|
||||
# Wait until HelmReleases appear & reconcile them
|
||||
timeout 60 sh -ec 'until kubectl get hr -A | grep -q cozys; do sleep 1; done'
|
||||
timeout 60 sh -ec 'until kubectl get hr -A -l cozystack.io/system-app=true | grep -q cozys; do sleep 1; done'
|
||||
sleep 5
|
||||
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n "$1" hr/"$2" &"} END {print "wait"}' | sh -ex
|
||||
kubectl get hr -A -l cozystack.io/system-app=true | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n "$1" hr/"$2" &"} END {print "wait"}' | sh -ex
|
||||
|
||||
# Fail the test if any HelmRelease is not Ready
|
||||
if kubectl get hr -A | grep -v " True " | grep -v NAME; then
|
||||
kubectl get hr -A
|
||||
fail "Some HelmReleases failed to reconcile"
|
||||
echo "Some HelmReleases failed to reconcile" >&2
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -276,7 +49,11 @@ EOF
|
||||
kubectl wait deployment/linstor-controller -n cozy-linstor --timeout=5m --for=condition=available
|
||||
timeout 60 sh -ec 'until [ $(kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor node list | grep -c Online) -eq 3 ]; do sleep 1; done'
|
||||
|
||||
created_pools=$(kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor sp l -s data --pastable | awk '$2 == "data" {printf " " $4} END{printf " "}')
|
||||
for node in srv1 srv2 srv3; do
|
||||
case $created_pools in
|
||||
*" $node "*) echo "Storage pool 'data' already exists on node $node"; continue;;
|
||||
esac
|
||||
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs ${node} /dev/vdc --pool-name data --storage-pool data
|
||||
done
|
||||
|
||||
@@ -311,7 +88,7 @@ parameters:
|
||||
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-no-data-accessible: suspend-io
|
||||
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-suspended-primary-outdated: force-secondary
|
||||
property.linstor.csi.linbit.com/DrbdOptions/Net/rr-conflict: retry-connect
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
volumeBindingMode: Immediate
|
||||
allowVolumeExpansion: true
|
||||
EOF
|
||||
}
|
||||
@@ -389,3 +166,24 @@ EOF
|
||||
timeout 120 sh -ec 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator >/dev/null 2>&1; do sleep 1; done'
|
||||
kubectl wait hr/keycloak hr/keycloak-configure hr/keycloak-operator -n cozy-keycloak --timeout=10m --for=condition=ready
|
||||
}
|
||||
|
||||
@test "Create tenant with isolated mode enabled" {
|
||||
kubectl -n tenant-root get tenants.apps.cozystack.io test ||
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: test
|
||||
namespace: tenant-root
|
||||
spec:
|
||||
etcd: false
|
||||
host: ""
|
||||
ingress: false
|
||||
isolated: true
|
||||
monitoring: false
|
||||
resourceQuotas: {}
|
||||
seaweedfs: false
|
||||
EOF
|
||||
kubectl wait hr/tenant-test -n tenant-root --timeout=1m --for=condition=ready
|
||||
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
|
||||
}
|
||||
248
hack/e2e-prepare-cluster.bats
Normal file
248
hack/e2e-prepare-cluster.bats
Normal file
@@ -0,0 +1,248 @@
|
||||
#!/usr/bin/env bats
|
||||
# -----------------------------------------------------------------------------
|
||||
# Cozystack end‑to‑end provisioning test (Bats)
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
@test "Required installer assets exist" {
|
||||
if [ ! -f _out/assets/nocloud-amd64.raw.xz ]; then
|
||||
echo "Missing: _out/assets/nocloud-amd64.raw.xz" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
@test "IPv4 forwarding is enabled" {
|
||||
if [ "$(cat /proc/sys/net/ipv4/ip_forward)" != 1 ]; then
|
||||
echo "IPv4 forwarding is disabled!" >&2
|
||||
echo >&2
|
||||
echo "Enable it with:" >&2
|
||||
echo " echo 1 > /proc/sys/net/ipv4/ip_forward" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
@test "Clean previous VMs" {
|
||||
kill $(cat srv1/qemu.pid srv2/qemu.pid srv3/qemu.pid 2>/dev/null) 2>/dev/null || true
|
||||
rm -rf srv1 srv2 srv3
|
||||
}
|
||||
|
||||
@test "Prepare networking and masquerading" {
|
||||
ip link del cozy-br0 2>/dev/null || true
|
||||
ip link add cozy-br0 type bridge
|
||||
ip link set cozy-br0 up
|
||||
ip address add 192.168.123.1/24 dev cozy-br0
|
||||
|
||||
# Masquerading rule – idempotent (delete first, then add)
|
||||
iptables -t nat -D POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE 2>/dev/null || true
|
||||
iptables -t nat -A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
|
||||
}
|
||||
|
||||
@test "Prepare cloud‑init drive for VMs" {
|
||||
mkdir -p srv1 srv2 srv3
|
||||
|
||||
# Generate cloud‑init ISOs
|
||||
for i in 1 2 3; do
|
||||
echo "hostname: srv${i}" > "srv${i}/meta-data"
|
||||
|
||||
cat > "srv${i}/user-data" <<'EOF'
|
||||
#cloud-config
|
||||
EOF
|
||||
|
||||
cat > "srv${i}/network-config" <<EOF
|
||||
version: 2
|
||||
ethernets:
|
||||
eth0:
|
||||
dhcp4: false
|
||||
addresses:
|
||||
- "192.168.123.1${i}/26"
|
||||
gateway4: "192.168.123.1"
|
||||
nameservers:
|
||||
search: [cluster.local]
|
||||
addresses: [8.8.8.8]
|
||||
EOF
|
||||
|
||||
( cd "srv${i}" && genisoimage \
|
||||
-output seed.img \
|
||||
-volid cidata -rational-rock -joliet \
|
||||
user-data meta-data network-config )
|
||||
done
|
||||
}
|
||||
|
||||
@test "Use Talos NoCloud image from assets" {
|
||||
if [ ! -f _out/assets/nocloud-amd64.raw.xz ]; then
|
||||
echo "Missing _out/assets/nocloud-amd64.raw.xz" 2>&1
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm -f nocloud-amd64.raw
|
||||
cp _out/assets/nocloud-amd64.raw.xz .
|
||||
xz --decompress nocloud-amd64.raw.xz
|
||||
}
|
||||
|
||||
@test "Prepare VM disks" {
|
||||
for i in 1 2 3; do
|
||||
cp nocloud-amd64.raw srv${i}/system.img
|
||||
qemu-img resize srv${i}/system.img 50G
|
||||
qemu-img create srv${i}/data.img 100G
|
||||
done
|
||||
}
|
||||
|
||||
@test "Create tap devices" {
|
||||
for i in 1 2 3; do
|
||||
ip link del cozy-srv${i} 2>/dev/null || true
|
||||
ip tuntap add dev cozy-srv${i} mode tap
|
||||
ip link set cozy-srv${i} up
|
||||
ip link set cozy-srv${i} master cozy-br0
|
||||
done
|
||||
}
|
||||
|
||||
@test "Boot QEMU VMs" {
|
||||
for i in 1 2 3; do
|
||||
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 8 -m 24576 \
|
||||
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5${i} \
|
||||
-netdev tap,id=net0,ifname=cozy-srv${i},script=no,downscript=no \
|
||||
-drive file=srv${i}/system.img,if=virtio,format=raw \
|
||||
-drive file=srv${i}/seed.img,if=virtio,format=raw \
|
||||
-drive file=srv${i}/data.img,if=virtio,format=raw \
|
||||
-display none -daemonize -pidfile srv${i}/qemu.pid
|
||||
done
|
||||
|
||||
# Give qemu a few seconds to start up networking
|
||||
sleep 5
|
||||
}
|
||||
|
||||
@test "Wait until Talos API port 50000 is reachable on all machines" {
|
||||
timeout 60 sh -ec 'until nc -nz 192.168.123.11 50000 && nc -nz 192.168.123.12 50000 && nc -nz 192.168.123.13 50000; do sleep 1; done'
|
||||
}
|
||||
|
||||
@test "Generate Talos cluster configuration" {
|
||||
# Cluster‑wide patches
|
||||
cat > patch.yaml <<'EOF'
|
||||
machine:
|
||||
kubelet:
|
||||
nodeIP:
|
||||
validSubnets:
|
||||
- 192.168.123.0/24
|
||||
extraConfig:
|
||||
maxPods: 512
|
||||
kernel:
|
||||
modules:
|
||||
- name: openvswitch
|
||||
- name: drbd
|
||||
parameters:
|
||||
- usermode_helper=disabled
|
||||
- name: zfs
|
||||
- name: spl
|
||||
registries:
|
||||
mirrors:
|
||||
docker.io:
|
||||
endpoints:
|
||||
- https://dockerio.nexus.lllamnyp.su
|
||||
cr.fluentbit.io:
|
||||
endpoints:
|
||||
- https://fluentbit.nexus.lllamnyp.su
|
||||
docker-registry3.mariadb.com:
|
||||
endpoints:
|
||||
- https://mariadb.nexus.lllamnyp.su
|
||||
gcr.io:
|
||||
endpoints:
|
||||
- https://gcr.nexus.lllamnyp.su
|
||||
ghcr.io:
|
||||
endpoints:
|
||||
- https://ghcr.nexus.lllamnyp.su
|
||||
quay.io:
|
||||
endpoints:
|
||||
- https://quay.nexus.lllamnyp.su
|
||||
registry.k8s.io:
|
||||
endpoints:
|
||||
- https://k8s.nexus.lllamnyp.su
|
||||
files:
|
||||
- content: |
|
||||
[plugins]
|
||||
[plugins."io.containerd.cri.v1.runtime"]
|
||||
device_ownership_from_security_context = true
|
||||
path: /etc/cri/conf.d/20-customization.part
|
||||
op: create
|
||||
|
||||
cluster:
|
||||
apiServer:
|
||||
extraArgs:
|
||||
oidc-issuer-url: "https://keycloak.example.org/realms/cozy"
|
||||
oidc-client-id: "kubernetes"
|
||||
oidc-username-claim: "preferred_username"
|
||||
oidc-groups-claim: "groups"
|
||||
network:
|
||||
cni:
|
||||
name: none
|
||||
dnsDomain: cozy.local
|
||||
podSubnets:
|
||||
- 10.244.0.0/16
|
||||
serviceSubnets:
|
||||
- 10.96.0.0/16
|
||||
EOF
|
||||
|
||||
# Control‑plane‑only patches
|
||||
cat > patch-controlplane.yaml <<'EOF'
|
||||
machine:
|
||||
nodeLabels:
|
||||
node.kubernetes.io/exclude-from-external-load-balancers:
|
||||
$patch: delete
|
||||
network:
|
||||
interfaces:
|
||||
- interface: eth0
|
||||
vip:
|
||||
ip: 192.168.123.10
|
||||
cluster:
|
||||
allowSchedulingOnControlPlanes: true
|
||||
controllerManager:
|
||||
extraArgs:
|
||||
bind-address: 0.0.0.0
|
||||
scheduler:
|
||||
extraArgs:
|
||||
bind-address: 0.0.0.0
|
||||
apiServer:
|
||||
certSANs:
|
||||
- 127.0.0.1
|
||||
proxy:
|
||||
disabled: true
|
||||
discovery:
|
||||
enabled: false
|
||||
etcd:
|
||||
advertisedSubnets:
|
||||
- 192.168.123.0/24
|
||||
EOF
|
||||
|
||||
# Generate secrets once
|
||||
if [ ! -f secrets.yaml ]; then
|
||||
talosctl gen secrets
|
||||
fi
|
||||
|
||||
rm -f controlplane.yaml worker.yaml talosconfig kubeconfig
|
||||
talosctl gen config --with-secrets secrets.yaml cozystack https://192.168.123.10:6443 \
|
||||
--config-patch=@patch.yaml --config-patch-control-plane @patch-controlplane.yaml
|
||||
}
|
||||
|
||||
@test "Apply Talos configuration to the node" {
|
||||
# Apply the configuration to all three nodes
|
||||
for node in 11 12 13; do
|
||||
talosctl apply -f controlplane.yaml -n 192.168.123.${node} -e 192.168.123.${node} -i
|
||||
done
|
||||
|
||||
# Wait for Talos services to come up again
|
||||
timeout 60 sh -ec 'until nc -nz 192.168.123.11 50000 && nc -nz 192.168.123.12 50000 && nc -nz 192.168.123.13 50000; do sleep 1; done'
|
||||
}
|
||||
|
||||
@test "Bootstrap Talos cluster" {
|
||||
# Bootstrap etcd on the first node
|
||||
timeout 10 sh -ec 'until talosctl bootstrap -n 192.168.123.11 -e 192.168.123.11; do sleep 1; done'
|
||||
|
||||
# Wait until etcd is healthy
|
||||
timeout 180 sh -ec 'until talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 >/dev/null 2>&1; do sleep 1; done'
|
||||
timeout 60 sh -ec 'while talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1 | grep -q "rpc error"; do sleep 1; done'
|
||||
|
||||
# Retrieve kubeconfig
|
||||
rm -f kubeconfig
|
||||
talosctl kubeconfig kubeconfig -e 192.168.123.10 -n 192.168.123.10
|
||||
|
||||
# Wait until all three nodes register in Kubernetes
|
||||
timeout 60 sh -ec 'until [ $(kubectl get node --no-headers | wc -l) -eq 3 ]; do sleep 1; done'
|
||||
}
|
||||
139
internal/controller/system_helm_reconciler.go
Normal file
139
internal/controller/system_helm_reconciler.go
Normal file
@@ -0,0 +1,139 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
helmv2 "github.com/fluxcd/helm-controller/api/v2"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
)
|
||||
|
||||
type CozystackConfigReconciler struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
var configMapNames = []string{"cozystack", "cozystack-branding", "cozystack-scheduling"}
|
||||
|
||||
const configMapNamespace = "cozy-system"
|
||||
const digestAnnotation = "cozystack.io/cozy-config-digest"
|
||||
const forceReconcileKey = "reconcile.fluxcd.io/forceAt"
|
||||
const requestedAt = "reconcile.fluxcd.io/requestedAt"
|
||||
|
||||
func (r *CozystackConfigReconciler) Reconcile(ctx context.Context, _ ctrl.Request) (ctrl.Result, error) {
|
||||
log := log.FromContext(ctx)
|
||||
|
||||
digest, err := r.computeDigest(ctx)
|
||||
if err != nil {
|
||||
log.Error(err, "failed to compute config digest")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
var helmList helmv2.HelmReleaseList
|
||||
if err := r.List(ctx, &helmList); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to list HelmReleases: %w", err)
|
||||
}
|
||||
|
||||
now := time.Now().Format(time.RFC3339Nano)
|
||||
updated := 0
|
||||
|
||||
for _, hr := range helmList.Items {
|
||||
isSystemApp := hr.Labels["cozystack.io/system-app"] == "true"
|
||||
isTenantRoot := hr.Namespace == "tenant-root" && hr.Name == "tenant-root"
|
||||
if !isSystemApp && !isTenantRoot {
|
||||
continue
|
||||
}
|
||||
|
||||
if hr.Annotations == nil {
|
||||
hr.Annotations = map[string]string{}
|
||||
}
|
||||
|
||||
if hr.Annotations[digestAnnotation] == digest {
|
||||
continue
|
||||
}
|
||||
|
||||
patch := client.MergeFrom(hr.DeepCopy())
|
||||
hr.Annotations[digestAnnotation] = digest
|
||||
hr.Annotations[forceReconcileKey] = now
|
||||
hr.Annotations[requestedAt] = now
|
||||
|
||||
if err := r.Patch(ctx, &hr, patch); err != nil {
|
||||
log.Error(err, "failed to patch HelmRelease", "name", hr.Name, "namespace", hr.Namespace)
|
||||
continue
|
||||
}
|
||||
updated++
|
||||
log.Info("patched HelmRelease with new config digest", "name", hr.Name, "namespace", hr.Namespace)
|
||||
}
|
||||
|
||||
log.Info("finished reconciliation", "updatedHelmReleases", updated)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *CozystackConfigReconciler) computeDigest(ctx context.Context) (string, error) {
|
||||
hash := sha256.New()
|
||||
|
||||
for _, name := range configMapNames {
|
||||
var cm corev1.ConfigMap
|
||||
err := r.Get(ctx, client.ObjectKey{Namespace: configMapNamespace, Name: name}, &cm)
|
||||
if err != nil {
|
||||
if kerrors.IsNotFound(err) {
|
||||
continue // ignore missing
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Sort keys for consistent hashing
|
||||
var keys []string
|
||||
for k := range cm.Data {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, k := range keys {
|
||||
v := cm.Data[k]
|
||||
fmt.Fprintf(hash, "%s:%s=%s\n", name, k, v)
|
||||
}
|
||||
}
|
||||
|
||||
return hex.EncodeToString(hash.Sum(nil)), nil
|
||||
}
|
||||
|
||||
func (r *CozystackConfigReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
WithEventFilter(predicate.Funcs{
|
||||
UpdateFunc: func(e event.UpdateEvent) bool {
|
||||
cm, ok := e.ObjectNew.(*corev1.ConfigMap)
|
||||
return ok && cm.Namespace == configMapNamespace && contains(configMapNames, cm.Name)
|
||||
},
|
||||
CreateFunc: func(e event.CreateEvent) bool {
|
||||
cm, ok := e.Object.(*corev1.ConfigMap)
|
||||
return ok && cm.Namespace == configMapNamespace && contains(configMapNames, cm.Name)
|
||||
},
|
||||
DeleteFunc: func(e event.DeleteEvent) bool {
|
||||
cm, ok := e.Object.(*corev1.ConfigMap)
|
||||
return ok && cm.Namespace == configMapNamespace && contains(configMapNames, cm.Name)
|
||||
},
|
||||
}).
|
||||
For(&corev1.ConfigMap{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func contains(slice []string, val string) bool {
|
||||
for _, s := range slice {
|
||||
if s == val {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -248,15 +248,24 @@ func (r *WorkloadMonitorReconciler) reconcilePodForMonitor(
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("pod-%s", pod.Name),
|
||||
Namespace: pod.Namespace,
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
}
|
||||
|
||||
metaLabels := r.getWorkloadMetadata(&pod)
|
||||
_, err := ctrl.CreateOrUpdate(ctx, r.Client, workload, func() error {
|
||||
// Update owner references with the new monitor
|
||||
updateOwnerReferences(workload.GetObjectMeta(), monitor)
|
||||
|
||||
// Copy labels from the Pod if needed
|
||||
workload.Labels = pod.Labels
|
||||
for k, v := range pod.Labels {
|
||||
workload.Labels[k] = v
|
||||
}
|
||||
|
||||
// Add workload meta to labels
|
||||
for k, v := range metaLabels {
|
||||
workload.Labels[k] = v
|
||||
}
|
||||
|
||||
// Fill Workload status fields:
|
||||
workload.Status.Kind = monitor.Spec.Kind
|
||||
@@ -433,3 +442,12 @@ func mapObjectToMonitor[T client.Object](_ T, c client.Client) func(ctx context.
|
||||
return requests
|
||||
}
|
||||
}
|
||||
|
||||
func (r *WorkloadMonitorReconciler) getWorkloadMetadata(obj client.Object) map[string]string {
|
||||
labels := make(map[string]string)
|
||||
annotations := obj.GetAnnotations()
|
||||
if instanceType, ok := annotations["kubevirt.io/cluster-instancetype-name"]; ok {
|
||||
labels["workloads.cozystack.io/kubevirt-vmi-instance-type"] = instanceType
|
||||
}
|
||||
return labels
|
||||
}
|
||||
|
||||
@@ -16,10 +16,10 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.0
|
||||
version: 0.2.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "0.1.0"
|
||||
appVersion: "0.2.0"
|
||||
|
||||
1
packages/apps/bucket/charts/cozy-lib
Symbolic link
1
packages/apps/bucket/charts/cozy-lib
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../library/cozy-lib
|
||||
@@ -18,3 +18,14 @@ rules:
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}-ui
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
subjects:
|
||||
{{ include "cozy-lib.rbac.subjectsForTenantAndAccessLevel" (list "use" .Release.Namespace) }}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
@@ -16,15 +16,10 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.9.0
|
||||
version: 0.11.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "24.9.2"
|
||||
|
||||
dependencies:
|
||||
- name: cozy-lib
|
||||
version: 0.1.0
|
||||
repository: "http://cozystack.cozy-system.svc/repos/library"
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
CLICKHOUSE_BACKUP_TAG = $(shell awk '$$1 == "version:" {print $$2}' Chart.yaml)
|
||||
CLICKHOUSE_BACKUP_TAG = $(shell awk '$$0 ~ /^version:/ {print $$2}' Chart.yaml)
|
||||
|
||||
include ../../../scripts/common-envs.mk
|
||||
include ../../../scripts/package.mk
|
||||
|
||||
generate:
|
||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||
yq -i -o json --indent 4 '.properties.resourcesPreset.enum = ["none", "nano", "micro", "small", "medium", "large", "xlarge", "2xlarge"]' values.schema.json
|
||||
|
||||
image:
|
||||
docker buildx build images/clickhouse-backup \
|
||||
|
||||
@@ -1,32 +1,36 @@
|
||||
# Managed Clickhouse Service
|
||||
# Managed ClickHouse Service
|
||||
|
||||
### How to restore backup:
|
||||
ClickHouse is an open source high-performance and column-oriented SQL database management system (DBMS).
|
||||
It is used for online analytical processing (OLAP).
|
||||
|
||||
find snapshot:
|
||||
```
|
||||
restic -r s3:s3.example.org/clickhouse-backups/table_name snapshots
|
||||
```
|
||||
### How to restore backup from S3
|
||||
|
||||
restore:
|
||||
```
|
||||
restic -r s3:s3.example.org/clickhouse-backups/table_name restore latest --target /tmp/
|
||||
```
|
||||
1. Find the snapshot:
|
||||
|
||||
more details:
|
||||
- https://itnext.io/restic-effective-backup-from-stdin-4bc1e8f083c1
|
||||
```bash
|
||||
restic -r s3:s3.example.org/clickhouse-backups/table_name snapshots
|
||||
```
|
||||
|
||||
2. Restore it:
|
||||
|
||||
```bash
|
||||
restic -r s3:s3.example.org/clickhouse-backups/table_name restore latest --target /tmp/
|
||||
```
|
||||
|
||||
For more details, read [Restic: Effective Backup from Stdin](https://blog.aenix.io/restic-effective-backup-from-stdin-4bc1e8f083c1).
|
||||
|
||||
## Parameters
|
||||
|
||||
### Common parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ---------------- | ----------------------------------- | ------ |
|
||||
| `size` | Persistent Volume size | `10Gi` |
|
||||
| `logStorageSize` | Persistent Volume for logs size | `2Gi` |
|
||||
| `shards` | Number of Clickhouse replicas | `1` |
|
||||
| `replicas` | Number of Clickhouse shards | `2` |
|
||||
| `storageClass` | StorageClass used to store the data | `""` |
|
||||
| `logTTL` | for query_log and query_thread_log | `15` |
|
||||
| Name | Description | Value |
|
||||
| ---------------- | -------------------------------------------------------- | ------ |
|
||||
| `size` | Size of Persistent Volume for data | `10Gi` |
|
||||
| `logStorageSize` | Size of Persistent Volume for logs | `2Gi` |
|
||||
| `shards` | Number of Clickhouse shards | `1` |
|
||||
| `replicas` | Number of Clickhouse replicas | `2` |
|
||||
| `storageClass` | StorageClass used to store the data | `""` |
|
||||
| `logTTL` | TTL (expiration time) for query_log and query_thread_log | `15` |
|
||||
|
||||
### Configuration parameters
|
||||
|
||||
@@ -36,15 +40,41 @@ more details:
|
||||
|
||||
### Backup parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ |
|
||||
| `backup.enabled` | Enable pereiodic backups | `false` |
|
||||
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
|
||||
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/clickhouse-backups` |
|
||||
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
|
||||
| `backup.cleanupStrategy` | The strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
|
||||
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
||||
| `resources` | Resources | `{}` |
|
||||
| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | --------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ |
|
||||
| `backup.enabled` | Enable periodic backups | `false` |
|
||||
| `backup.s3Region` | AWS S3 region where backups are stored | `us-east-1` |
|
||||
| `backup.s3Bucket` | S3 bucket used for storing backups | `s3.example.org/clickhouse-backups` |
|
||||
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
|
||||
| `backup.cleanupStrategy` | Retention strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
|
||||
| `backup.s3AccessKey` | Access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||
| `backup.s3SecretKey` | Secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||
| `backup.resticPassword` | Password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
||||
| `resources` | Explicit CPU and memory configuration for each ClickHouse replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
|
||||
| `resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `small` |
|
||||
|
||||
## Parameter examples and reference
|
||||
|
||||
### resources and resourcesPreset
|
||||
|
||||
`resources` sets explicit CPU and memory configurations for each replica.
|
||||
When left empty, the preset defined in `resourcesPreset` is applied.
|
||||
|
||||
```yaml
|
||||
resources:
|
||||
cpu: 4000m
|
||||
memory: 4Gi
|
||||
```
|
||||
|
||||
`resourcesPreset` sets named CPU and memory configurations for each replica.
|
||||
This setting is ignored if the corresponding `resources` value is set.
|
||||
|
||||
| Preset name | CPU | memory |
|
||||
|-------------|--------|---------|
|
||||
| `nano` | `250m` | `128Mi` |
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/clickhouse-backup:0.9.0@sha256:3faf7a4cebf390b9053763107482de175aa0fdb88c1e77424fd81100b1c3a205
|
||||
ghcr.io/cozystack/cozystack/clickhouse-backup:0.11.0@sha256:3faf7a4cebf390b9053763107482de175aa0fdb88c1e77424fd81100b1c3a205
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
|
||||
{{- $clusterDomain := (index $cozyConfig.data "cluster-domain") | default "cozy.local" }}
|
||||
{{- $existingSecret := lookup "v1" "Secret" .Release.Namespace (printf "%s-credentials" .Release.Name) }}
|
||||
{{- $passwords := dict }}
|
||||
{{- $users := .Values.users }}
|
||||
@@ -32,7 +34,7 @@ kind: "ClickHouseInstallation"
|
||||
metadata:
|
||||
name: "{{ .Release.Name }}"
|
||||
spec:
|
||||
namespaceDomainPattern: "%s.svc.cozy.local"
|
||||
namespaceDomainPattern: "%s.svc.{{ $clusterDomain }}"
|
||||
defaults:
|
||||
templates:
|
||||
dataVolumeClaimTemplate: data-volume-template
|
||||
@@ -92,6 +94,9 @@ spec:
|
||||
templates:
|
||||
volumeClaimTemplates:
|
||||
- name: data-volume-template
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
@@ -99,6 +104,9 @@ spec:
|
||||
requests:
|
||||
storage: {{ .Values.size }}
|
||||
- name: log-volume-template
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
@@ -107,6 +115,9 @@ spec:
|
||||
storage: {{ .Values.logStorageSize }}
|
||||
podTemplates:
|
||||
- name: clickhouse-per-host
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
@@ -121,11 +132,7 @@ spec:
|
||||
containers:
|
||||
- name: clickhouse
|
||||
image: clickhouse/clickhouse-server:24.9.2.42
|
||||
{{- if .Values.resources }}
|
||||
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.resources $) | nindent 16 }}
|
||||
{{- else if ne .Values.resourcesPreset "none" }}
|
||||
resources: {{- include "cozy-lib.resources.preset" (list .Values.resourcesPreset $) | nindent 16 }}
|
||||
{{- end }}
|
||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.resourcesPreset .Values.resources $) | nindent 16 }}
|
||||
volumeMounts:
|
||||
- name: data-volume-template
|
||||
mountPath: /var/lib/clickhouse
|
||||
@@ -133,6 +140,9 @@ spec:
|
||||
mountPath: /var/log/clickhouse-server
|
||||
serviceTemplates:
|
||||
- name: svc-template
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
generateName: chendpoint-{chi}
|
||||
spec:
|
||||
ports:
|
||||
|
||||
@@ -24,3 +24,14 @@ rules:
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
subjects:
|
||||
{{ include "cozy-lib.rbac.subjectsForTenantAndAccessLevel" (list "use" .Release.Namespace) }}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
@@ -9,5 +9,5 @@ spec:
|
||||
kind: clickhouse
|
||||
type: clickhouse
|
||||
selector:
|
||||
clickhouse.altinity.com/chi: {{ $.Release.Name }}
|
||||
app.kubernetes.io/instance: {{ $.Release.Name }}
|
||||
version: {{ $.Chart.Version }}
|
||||
|
||||
@@ -4,22 +4,22 @@
|
||||
"properties": {
|
||||
"size": {
|
||||
"type": "string",
|
||||
"description": "Persistent Volume size",
|
||||
"description": "Size of Persistent Volume for data",
|
||||
"default": "10Gi"
|
||||
},
|
||||
"logStorageSize": {
|
||||
"type": "string",
|
||||
"description": "Persistent Volume for logs size",
|
||||
"description": "Size of Persistent Volume for logs",
|
||||
"default": "2Gi"
|
||||
},
|
||||
"shards": {
|
||||
"type": "number",
|
||||
"description": "Number of Clickhouse replicas",
|
||||
"description": "Number of Clickhouse shards",
|
||||
"default": 1
|
||||
},
|
||||
"replicas": {
|
||||
"type": "number",
|
||||
"description": "Number of Clickhouse shards",
|
||||
"description": "Number of Clickhouse replicas",
|
||||
"default": 2
|
||||
},
|
||||
"storageClass": {
|
||||
@@ -29,7 +29,7 @@
|
||||
},
|
||||
"logTTL": {
|
||||
"type": "number",
|
||||
"description": "for query_log and query_thread_log",
|
||||
"description": "TTL (expiration time) for query_log and query_thread_log",
|
||||
"default": 15
|
||||
},
|
||||
"backup": {
|
||||
@@ -37,17 +37,17 @@
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enable pereiodic backups",
|
||||
"description": "Enable periodic backups",
|
||||
"default": false
|
||||
},
|
||||
"s3Region": {
|
||||
"type": "string",
|
||||
"description": "The AWS S3 region where backups are stored",
|
||||
"description": "AWS S3 region where backups are stored",
|
||||
"default": "us-east-1"
|
||||
},
|
||||
"s3Bucket": {
|
||||
"type": "string",
|
||||
"description": "The S3 bucket used for storing backups",
|
||||
"description": "S3 bucket used for storing backups",
|
||||
"default": "s3.example.org/clickhouse-backups"
|
||||
},
|
||||
"schedule": {
|
||||
@@ -57,35 +57,45 @@
|
||||
},
|
||||
"cleanupStrategy": {
|
||||
"type": "string",
|
||||
"description": "The strategy for cleaning up old backups",
|
||||
"description": "Retention strategy for cleaning up old backups",
|
||||
"default": "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
|
||||
},
|
||||
"s3AccessKey": {
|
||||
"type": "string",
|
||||
"description": "The access key for S3, used for authentication",
|
||||
"description": "Access key for S3, used for authentication",
|
||||
"default": "oobaiRus9pah8PhohL1ThaeTa4UVa7gu"
|
||||
},
|
||||
"s3SecretKey": {
|
||||
"type": "string",
|
||||
"description": "The secret key for S3, used for authentication",
|
||||
"description": "Secret key for S3, used for authentication",
|
||||
"default": "ju3eum4dekeich9ahM1te8waeGai0oog"
|
||||
},
|
||||
"resticPassword": {
|
||||
"type": "string",
|
||||
"description": "The password for Restic backup encryption",
|
||||
"description": "Password for Restic backup encryption",
|
||||
"default": "ChaXoveekoh6eigh4siesheeda2quai0"
|
||||
}
|
||||
}
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"description": "Explicit CPU and memory configuration for each ClickHouse replica. When left empty, the preset defined in `resourcesPreset` is applied.",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "nano"
|
||||
"description": "Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.",
|
||||
"default": "small",
|
||||
"enum": [
|
||||
"none",
|
||||
"nano",
|
||||
"micro",
|
||||
"small",
|
||||
"medium",
|
||||
"large",
|
||||
"xlarge",
|
||||
"2xlarge"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
## @section Common parameters
|
||||
|
||||
## @param size Persistent Volume size
|
||||
## @param logStorageSize Persistent Volume for logs size
|
||||
## @param shards Number of Clickhouse replicas
|
||||
## @param replicas Number of Clickhouse shards
|
||||
## @param size Size of Persistent Volume for data
|
||||
## @param logStorageSize Size of Persistent Volume for logs
|
||||
## @param shards Number of Clickhouse shards
|
||||
## @param replicas Number of Clickhouse replicas
|
||||
## @param storageClass StorageClass used to store the data
|
||||
## @param logTTL for query_log and query_thread_log
|
||||
## @param logTTL TTL (expiration time) for query_log and query_thread_log
|
||||
##
|
||||
size: 10Gi
|
||||
logStorageSize: 2Gi
|
||||
@@ -29,14 +29,14 @@ users: {}
|
||||
|
||||
## @section Backup parameters
|
||||
|
||||
## @param backup.enabled Enable pereiodic backups
|
||||
## @param backup.s3Region The AWS S3 region where backups are stored
|
||||
## @param backup.s3Bucket The S3 bucket used for storing backups
|
||||
## @param backup.enabled Enable periodic backups
|
||||
## @param backup.s3Region AWS S3 region where backups are stored
|
||||
## @param backup.s3Bucket S3 bucket used for storing backups
|
||||
## @param backup.schedule Cron schedule for automated backups
|
||||
## @param backup.cleanupStrategy The strategy for cleaning up old backups
|
||||
## @param backup.s3AccessKey The access key for S3, used for authentication
|
||||
## @param backup.s3SecretKey The secret key for S3, used for authentication
|
||||
## @param backup.resticPassword The password for Restic backup encryption
|
||||
## @param backup.cleanupStrategy Retention strategy for cleaning up old backups
|
||||
## @param backup.s3AccessKey Access key for S3, used for authentication
|
||||
## @param backup.s3SecretKey Secret key for S3, used for authentication
|
||||
## @param backup.resticPassword Password for Restic backup encryption
|
||||
backup:
|
||||
enabled: false
|
||||
s3Region: us-east-1
|
||||
@@ -47,15 +47,11 @@ backup:
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
|
||||
## @param resources Resources
|
||||
## @param resources Explicit CPU and memory configuration for each ClickHouse replica. When left empty, the preset defined in `resourcesPreset` is applied.
|
||||
resources: {}
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 512Mi
|
||||
|
||||
## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
resourcesPreset: "nano"
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
|
||||
## @param resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.
|
||||
resourcesPreset: "small"
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.6.0
|
||||
version: 0.8.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -2,3 +2,4 @@ include ../../../scripts/package.mk
|
||||
|
||||
generate:
|
||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||
yq -i -o json --indent 4 '.properties.resourcesPreset.enum = ["none", "nano", "micro", "small", "medium", "large", "xlarge", "2xlarge"]' values.schema.json
|
||||
|
||||
@@ -1,17 +1,21 @@
|
||||
# Managed FerretDB Service
|
||||
|
||||
FerretDB is an open source MongoDB alternative.
|
||||
It translates MongoDB wire protocol queries to SQL and can be used as a direct replacement for MongoDB 5.0+.
|
||||
Internally, FerretDB service is backed by Postgres.
|
||||
|
||||
## Parameters
|
||||
|
||||
### Common parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | ----------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| `external` | Enable external access from outside the cluster | `false` |
|
||||
| `size` | Persistent Volume size | `10Gi` |
|
||||
| `replicas` | Number of Postgres replicas | `2` |
|
||||
| `storageClass` | StorageClass used to store the data | `""` |
|
||||
| `quorum.minSyncReplicas` | Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed. | `0` |
|
||||
| `quorum.maxSyncReplicas` | Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the number of instances). | `0` |
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | --------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| `external` | Enable external access from outside the cluster | `false` |
|
||||
| `size` | Persistent Volume size | `10Gi` |
|
||||
| `replicas` | Number of replicas | `2` |
|
||||
| `storageClass` | StorageClass used to store the data | `""` |
|
||||
| `quorum.minSyncReplicas` | Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed | `0` |
|
||||
| `quorum.maxSyncReplicas` | Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the total number of replicas) | `0` |
|
||||
|
||||
### Configuration parameters
|
||||
|
||||
@@ -21,17 +25,43 @@
|
||||
|
||||
### Backup parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ |
|
||||
| `backup.enabled` | Enable pereiodic backups | `false` |
|
||||
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
|
||||
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/postgres-backups` |
|
||||
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
|
||||
| `backup.cleanupStrategy` | The strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
|
||||
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
||||
| `resources` | Resources | `{}` |
|
||||
| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ |
|
||||
| `backup.enabled` | Enable periodic backups | `false` |
|
||||
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
|
||||
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/postgres-backups` |
|
||||
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
|
||||
| `backup.cleanupStrategy` | The strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
|
||||
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
||||
| `resources` | Explicit CPU and memory configuration for each FerretDB replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
|
||||
| `resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `nano` |
|
||||
|
||||
|
||||
|
||||
## Parameter examples and reference
|
||||
|
||||
### resources and resourcesPreset
|
||||
|
||||
`resources` sets explicit CPU and memory configurations for each replica.
|
||||
When left empty, the preset defined in `resourcesPreset` is applied.
|
||||
|
||||
```yaml
|
||||
resources:
|
||||
cpu: 4000m
|
||||
memory: 4Gi
|
||||
```
|
||||
|
||||
`resourcesPreset` sets named CPU and memory configurations for each replica.
|
||||
This setting is ignored if the corresponding `resources` value is set.
|
||||
|
||||
| Preset name | CPU | memory |
|
||||
|-------------|--------|---------|
|
||||
| `nano` | `250m` | `128Mi` |
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
1
packages/apps/ferretdb/charts/cozy-lib
Symbolic link
1
packages/apps/ferretdb/charts/cozy-lib
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../library/cozy-lib
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/postgres-backup:0.12.0@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f
|
||||
ghcr.io/cozystack/cozystack/postgres-backup:0.14.0@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f
|
||||
|
||||
@@ -24,3 +24,14 @@ rules:
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
subjects:
|
||||
{{ include "cozy-lib.rbac.subjectsForTenantAndAccessLevel" (list "use" .Release.Namespace) }}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
@@ -2,6 +2,8 @@ apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ .Release.Name }}
|
||||
labels:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
type: {{ ternary "LoadBalancer" "ClusterIP" .Values.external }}
|
||||
{{- if .Values.external }}
|
||||
|
||||
@@ -12,6 +12,7 @@ spec:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ .Release.Name }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
containers:
|
||||
- name: ferretdb
|
||||
|
||||
@@ -18,11 +18,7 @@ spec:
|
||||
{{- end }}
|
||||
minSyncReplicas: {{ .Values.quorum.minSyncReplicas }}
|
||||
maxSyncReplicas: {{ .Values.quorum.maxSyncReplicas }}
|
||||
{{- if .Values.resources }}
|
||||
resources: {{- toYaml .Values.resources | nindent 4 }}
|
||||
{{- else if ne .Values.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.resourcesPreset "Release" .Release) | nindent 4 }}
|
||||
{{- end }}
|
||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.resourcesPreset .Values.resources $) | nindent 4 }}
|
||||
monitoring:
|
||||
enablePodMonitor: true
|
||||
|
||||
@@ -35,6 +31,7 @@ spec:
|
||||
inheritedMetadata:
|
||||
labels:
|
||||
policy.cozystack.io/allow-to-apiserver: "true"
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
|
||||
{{- if .Values.users }}
|
||||
managed:
|
||||
|
||||
@@ -9,5 +9,5 @@ spec:
|
||||
kind: ferretdb
|
||||
type: ferretdb
|
||||
selector:
|
||||
app: {{ $.Release.Name }}
|
||||
app.kubernetes.io/instance: {{ $.Release.Name }}
|
||||
version: {{ $.Chart.Version }}
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
},
|
||||
"replicas": {
|
||||
"type": "number",
|
||||
"description": "Number of Postgres replicas",
|
||||
"description": "Number of replicas",
|
||||
"default": 2
|
||||
},
|
||||
"storageClass": {
|
||||
@@ -27,12 +27,12 @@
|
||||
"properties": {
|
||||
"minSyncReplicas": {
|
||||
"type": "number",
|
||||
"description": "Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed.",
|
||||
"description": "Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed",
|
||||
"default": 0
|
||||
},
|
||||
"maxSyncReplicas": {
|
||||
"type": "number",
|
||||
"description": "Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the number of instances).",
|
||||
"description": "Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the total number of replicas)",
|
||||
"default": 0
|
||||
}
|
||||
}
|
||||
@@ -42,7 +42,7 @@
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enable pereiodic backups",
|
||||
"description": "Enable periodic backups",
|
||||
"default": false
|
||||
},
|
||||
"s3Region": {
|
||||
@@ -84,13 +84,23 @@
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"description": "Explicit CPU and memory configuration for each FerretDB replica. When left empty, the preset defined in `resourcesPreset` is applied.",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "nano"
|
||||
"description": "Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.",
|
||||
"default": "nano",
|
||||
"enum": [
|
||||
"none",
|
||||
"nano",
|
||||
"micro",
|
||||
"small",
|
||||
"medium",
|
||||
"large",
|
||||
"xlarge",
|
||||
"2xlarge"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
## @param external Enable external access from outside the cluster
|
||||
## @param size Persistent Volume size
|
||||
## @param replicas Number of Postgres replicas
|
||||
## @param replicas Number of replicas
|
||||
## @param storageClass StorageClass used to store the data
|
||||
##
|
||||
external: false
|
||||
@@ -11,8 +11,8 @@ replicas: 2
|
||||
storageClass: ""
|
||||
|
||||
## Configuration for the quorum-based synchronous replication
|
||||
## @param quorum.minSyncReplicas Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed.
|
||||
## @param quorum.maxSyncReplicas Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the number of instances).
|
||||
## @param quorum.minSyncReplicas Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed
|
||||
## @param quorum.maxSyncReplicas Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the total number of replicas)
|
||||
quorum:
|
||||
minSyncReplicas: 0
|
||||
maxSyncReplicas: 0
|
||||
@@ -31,7 +31,7 @@ users: {}
|
||||
|
||||
## @section Backup parameters
|
||||
|
||||
## @param backup.enabled Enable pereiodic backups
|
||||
## @param backup.enabled Enable periodic backups
|
||||
## @param backup.s3Region The AWS S3 region where backups are stored
|
||||
## @param backup.s3Bucket The S3 bucket used for storing backups
|
||||
## @param backup.schedule Cron schedule for automated backups
|
||||
@@ -49,15 +49,11 @@ backup:
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
|
||||
## @param resources Resources
|
||||
## @param resources Explicit CPU and memory configuration for each FerretDB replica. When left empty, the preset defined in `resourcesPreset` is applied.
|
||||
resources: {}
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 512Mi
|
||||
|
||||
## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
|
||||
## @param resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.
|
||||
resourcesPreset: "nano"
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.5.0
|
||||
version: 0.6.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -23,6 +23,8 @@ image-nginx:
|
||||
|
||||
generate:
|
||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||
yq -i -o json --indent 4 '.properties.haproxy.properties.resourcesPreset.enum = ["none", "nano", "micro", "small", "medium", "large", "xlarge", "2xlarge"]' values.schema.json
|
||||
yq -i -o json --indent 4 '.properties.nginx.properties.resourcesPreset.enum = ["none", "nano", "micro", "small", "medium", "large", "xlarge", "2xlarge"]' values.schema.json
|
||||
|
||||
update:
|
||||
tag=$$(git ls-remote --tags --sort="v:refname" https://github.com/chrislim2888/IP2Location-C-Library | awk -F'[/^]' 'END{print $$3}') && \
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
# Managed Nginx Caching Service
|
||||
# Managed Nginx-based HTTP Cache Service
|
||||
|
||||
The Nginx Caching Service is designed to optimize web traffic and enhance web application performance. This service combines custom-built Nginx instances with HAproxy for efficient caching and load balancing.
|
||||
The Nginx-based HTTP caching service is designed to optimize web traffic and enhance web application performance.
|
||||
This service combines custom-built Nginx instances with HAProxy for efficient caching and load balancing.
|
||||
|
||||
## Deployment infromation
|
||||
## Deployment information
|
||||
|
||||
The Nginx instances include the following modules and features:
|
||||
|
||||
@@ -53,27 +54,67 @@ The deployment architecture is illustrated in the diagram below:
|
||||
|
||||
## Known issues
|
||||
|
||||
VTS module shows wrong upstream resonse time
|
||||
- https://github.com/vozlt/nginx-module-vts/issues/198
|
||||
- VTS module shows wrong upstream response time, [github.com/vozlt/nginx-module-vts#198](https://github.com/vozlt/nginx-module-vts/issues/198)
|
||||
|
||||
## Parameters
|
||||
|
||||
### Common parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| `external` | Enable external access from outside the cluster | `false` |
|
||||
| `size` | Persistent Volume size | `10Gi` |
|
||||
| `storageClass` | StorageClass used to store the data | `""` |
|
||||
| `haproxy.replicas` | Number of HAProxy replicas | `2` |
|
||||
| `nginx.replicas` | Number of Nginx replicas | `2` |
|
||||
| `haproxy.resources` | Resources | `{}` |
|
||||
| `haproxy.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||
| `nginx.resources` | Resources | `{}` |
|
||||
| `nginx.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||
| Name | Description | Value |
|
||||
| ------------------------- | ------------------------------------------------------------------------------------------------------------------------------------ | ------- |
|
||||
| `external` | Enable external access from outside the cluster | `false` |
|
||||
| `size` | Persistent Volume size | `10Gi` |
|
||||
| `storageClass` | StorageClass used to store the data | `""` |
|
||||
| `haproxy.replicas` | Number of HAProxy replicas | `2` |
|
||||
| `nginx.replicas` | Number of Nginx replicas | `2` |
|
||||
| `haproxy.resources` | Explicit CPU and memory configuration for each HAProxy replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
|
||||
| `haproxy.resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `nano` |
|
||||
| `nginx.resources` | Explicit CPU and memory configuration for each nginx replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
|
||||
| `nginx.resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `nano` |
|
||||
|
||||
### Configuration parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ----------- | ----------------------- | ----- |
|
||||
| `endpoints` | Endpoints configuration | `[]` |
|
||||
|
||||
## Parameter examples and reference
|
||||
|
||||
### resources and resourcesPreset
|
||||
|
||||
`resources` sets explicit CPU and memory configurations for each replica.
|
||||
When left empty, the preset defined in `resourcesPreset` is applied.
|
||||
|
||||
```yaml
|
||||
resources:
|
||||
cpu: 4000m
|
||||
memory: 4Gi
|
||||
```
|
||||
|
||||
`resourcesPreset` sets named CPU and memory configurations for each replica.
|
||||
This setting is ignored if the corresponding `resources` value is set.
|
||||
|
||||
| Preset name | CPU | memory |
|
||||
|-------------|--------|---------|
|
||||
| `nano` | `250m` | `128Mi` |
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
|
||||
### endpoints
|
||||
|
||||
`endpoints` is a flat list of IP addresses:
|
||||
|
||||
```yaml
|
||||
endpoints:
|
||||
- 10.100.3.1:80
|
||||
- 10.100.3.11:80
|
||||
- 10.100.3.2:80
|
||||
- 10.100.3.12:80
|
||||
- 10.100.3.3:80
|
||||
- 10.100.3.13:80
|
||||
```
|
||||
|
||||
1
packages/apps/http-cache/charts/cozy-lib
Symbolic link
1
packages/apps/http-cache/charts/cozy-lib
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../library/cozy-lib
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/nginx-cache:0.5.0@sha256:99cd04f09f80eb0c60cc0b2f6bc8180ada7ada00cb594606447674953dfa1b67
|
||||
ghcr.io/cozystack/cozystack/nginx-cache:0.6.0@sha256:b7633717cd7449c0042ae92d8ca9b36e4d69566561f5c7d44e21058e7d05c6d5
|
||||
|
||||
@@ -33,11 +33,7 @@ spec:
|
||||
containers:
|
||||
- image: haproxy:latest
|
||||
name: haproxy
|
||||
{{- if .Values.haproxy.resources }}
|
||||
resources: {{- toYaml .Values.haproxy.resources | nindent 10 }}
|
||||
{{- else if ne .Values.haproxy.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.haproxy.resourcesPreset "Release" .Release) | nindent 10 }}
|
||||
{{- end }}
|
||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.haproxy.resourcesPreset .Values.haproxy.resources $) | nindent 10 }}
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: http
|
||||
|
||||
@@ -52,11 +52,7 @@ spec:
|
||||
shareProcessNamespace: true
|
||||
containers:
|
||||
- name: nginx
|
||||
{{- if $.Values.nginx.resources }}
|
||||
resources: {{- toYaml $.Values.nginx.resources | nindent 10 }}
|
||||
{{- else if ne $.Values.nginx.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" $.Values.nginx.resourcesPreset "Release" $.Release) | nindent 10 }}
|
||||
{{- end }}
|
||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list $.Values.nginx.resourcesPreset $.Values.nginx.resources $) | nindent 10 }}
|
||||
image: "{{ $.Files.Get "images/nginx-cache.tag" | trim }}"
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
|
||||
39
packages/apps/http-cache/templates/workloadmonitor.yaml
Normal file
39
packages/apps/http-cache/templates/workloadmonitor.yaml
Normal file
@@ -0,0 +1,39 @@
|
||||
---
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}-haproxy
|
||||
spec:
|
||||
replicas: {{ .Values.haproxy.replicas }}
|
||||
minReplicas: 1
|
||||
kind: http-cache
|
||||
type: http-cache
|
||||
selector:
|
||||
app: {{ $.Release.Name }}-haproxy
|
||||
version: {{ $.Chart.Version }}
|
||||
---
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}-nginx
|
||||
spec:
|
||||
replicas: {{ .Values.nginx.replicas }}
|
||||
minReplicas: 1
|
||||
kind: http-cache
|
||||
type: http-cache
|
||||
selector:
|
||||
app: {{ $.Release.Name }}-nginx-cache
|
||||
version: {{ $.Chart.Version }}
|
||||
---
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicas }}
|
||||
minReplicas: 1
|
||||
kind: http-cache
|
||||
type: http-cache
|
||||
selector:
|
||||
app.kubernetes.io/instance: {{ $.Release.Name }}
|
||||
version: {{ $.Chart.Version }}
|
||||
@@ -27,13 +27,23 @@
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"description": "Explicit CPU and memory configuration for each HAProxy replica. When left empty, the preset defined in `resourcesPreset` is applied.",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "nano"
|
||||
"description": "Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.",
|
||||
"default": "nano",
|
||||
"enum": [
|
||||
"none",
|
||||
"nano",
|
||||
"micro",
|
||||
"small",
|
||||
"medium",
|
||||
"large",
|
||||
"xlarge",
|
||||
"2xlarge"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -47,13 +57,23 @@
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"description": "Explicit CPU and memory configuration for each nginx replica. When left empty, the preset defined in `resourcesPreset` is applied.",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "nano"
|
||||
"description": "Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.",
|
||||
"default": "nano",
|
||||
"enum": [
|
||||
"none",
|
||||
"nano",
|
||||
"micro",
|
||||
"small",
|
||||
"medium",
|
||||
"large",
|
||||
"xlarge",
|
||||
"2xlarge"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -64,4 +84,4 @@
|
||||
"items": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,31 +12,23 @@ size: 10Gi
|
||||
storageClass: ""
|
||||
haproxy:
|
||||
replicas: 2
|
||||
## @param haproxy.resources Resources
|
||||
## @param haproxy.resources Explicit CPU and memory configuration for each HAProxy replica. When left empty, the preset defined in `resourcesPreset` is applied.
|
||||
resources: {}
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 512Mi
|
||||
|
||||
## @param haproxy.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
|
||||
## @param haproxy.resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.
|
||||
resourcesPreset: "nano"
|
||||
nginx:
|
||||
replicas: 2
|
||||
## @param nginx.resources Resources
|
||||
## @param nginx.resources Explicit CPU and memory configuration for each nginx replica. When left empty, the preset defined in `resourcesPreset` is applied.
|
||||
resources: {}
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 512Mi
|
||||
|
||||
## @param nginx.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
|
||||
## @param nginx.resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.
|
||||
resourcesPreset: "nano"
|
||||
|
||||
## @section Configuration parameters
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.6.0
|
||||
version: 0.8.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -2,3 +2,5 @@ include ../../../scripts/package.mk
|
||||
|
||||
generate:
|
||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||
yq -i -o json --indent 4 '.properties.kafka.properties.resourcesPreset.enum = ["none", "nano", "micro", "small", "medium", "large", "xlarge", "2xlarge"]' values.schema.json
|
||||
yq -i -o json --indent 4 '.properties.zookeeper.properties.resourcesPreset.enum = ["none", "nano", "micro", "small", "medium", "large", "xlarge", "2xlarge"]' values.schema.json
|
||||
|
||||
@@ -4,22 +4,67 @@
|
||||
|
||||
### Common parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| --------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| `external` | Enable external access from outside the cluster | `false` |
|
||||
| `kafka.size` | Persistent Volume size for Kafka | `10Gi` |
|
||||
| `kafka.replicas` | Number of Kafka replicas | `3` |
|
||||
| `kafka.storageClass` | StorageClass used to store the Kafka data | `""` |
|
||||
| `zookeeper.size` | Persistent Volume size for ZooKeeper | `5Gi` |
|
||||
| `zookeeper.replicas` | Number of ZooKeeper replicas | `3` |
|
||||
| `zookeeper.storageClass` | StorageClass used to store the ZooKeeper data | `""` |
|
||||
| `kafka.resources` | Resources | `{}` |
|
||||
| `kafka.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||
| `zookeeper.resources` | Resources | `{}` |
|
||||
| `zookeeper.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||
| Name | Description | Value |
|
||||
| --------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| `external` | Enable external access from outside the cluster | `false` |
|
||||
| `kafka.size` | Persistent Volume size for Kafka | `10Gi` |
|
||||
| `kafka.replicas` | Number of Kafka replicas | `3` |
|
||||
| `kafka.storageClass` | StorageClass used to store the Kafka data | `""` |
|
||||
| `zookeeper.size` | Persistent Volume size for ZooKeeper | `5Gi` |
|
||||
| `zookeeper.replicas` | Number of ZooKeeper replicas | `3` |
|
||||
| `zookeeper.storageClass` | StorageClass used to store the ZooKeeper data | `""` |
|
||||
| `kafka.resources` | Explicit CPU and memory configuration for each Kafka replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
|
||||
| `kafka.resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `small` |
|
||||
| `zookeeper.resources` | Explicit CPU and memory configuration for each Zookeeper replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
|
||||
| `zookeeper.resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `small` |
|
||||
|
||||
### Configuration parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| -------- | -------------------- | ----- |
|
||||
| `topics` | Topics configuration | `[]` |
|
||||
|
||||
## Parameter examples and reference
|
||||
|
||||
### resources and resourcesPreset
|
||||
|
||||
`resources` sets explicit CPU and memory configurations for each replica.
|
||||
When left empty, the preset defined in `resourcesPreset` is applied.
|
||||
|
||||
```yaml
|
||||
resources:
|
||||
cpu: 4000m
|
||||
memory: 4Gi
|
||||
```
|
||||
|
||||
`resourcesPreset` sets named CPU and memory configurations for each replica.
|
||||
This setting is ignored if the corresponding `resources` value is set.
|
||||
|
||||
| Preset name | CPU | memory |
|
||||
|-------------|--------|---------|
|
||||
| `nano` | `250m` | `128Mi` |
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
### topics
|
||||
|
||||
```yaml
|
||||
topics:
|
||||
- name: Results
|
||||
partitions: 1
|
||||
replicas: 3
|
||||
config:
|
||||
min.insync.replicas: 2
|
||||
- name: Orders
|
||||
config:
|
||||
cleanup.policy: compact
|
||||
segment.ms: 3600000
|
||||
max.compaction.lag.ms: 5400000
|
||||
min.insync.replicas: 2
|
||||
partitions: 1
|
||||
replicas: 3
|
||||
```
|
||||
|
||||
1
packages/apps/kafka/charts/cozy-lib
Symbolic link
1
packages/apps/kafka/charts/cozy-lib
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../library/cozy-lib
|
||||
@@ -25,3 +25,14 @@ rules:
|
||||
- {{ .Release.Name }}
|
||||
- {{ $.Release.Name }}-zookeeper
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
subjects:
|
||||
{{ include "cozy-lib.rbac.subjectsForTenantAndAccessLevel" (list "use" .Release.Namespace) }}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
@@ -8,11 +8,7 @@ metadata:
|
||||
spec:
|
||||
kafka:
|
||||
replicas: {{ .Values.kafka.replicas }}
|
||||
{{- if .Values.kafka.resources }}
|
||||
resources: {{- toYaml .Values.kafka.resources | nindent 6 }}
|
||||
{{- else if ne .Values.kafka.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.kafka.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
{{- end }}
|
||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.kafka.resourcesPreset .Values.kafka.resources $) | nindent 6 }}
|
||||
listeners:
|
||||
- name: plain
|
||||
port: 9092
|
||||
@@ -70,11 +66,7 @@ spec:
|
||||
key: kafka-metrics-config.yml
|
||||
zookeeper:
|
||||
replicas: {{ .Values.zookeeper.replicas }}
|
||||
{{- if .Values.zookeeper.resources }}
|
||||
resources: {{- toYaml .Values.zookeeper.resources | nindent 6 }}
|
||||
{{- else if ne .Values.zookeeper.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.zookeeper.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
{{- end }}
|
||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.zookeeper.resourcesPreset .Values.zookeeper.resources $) | nindent 6 }}
|
||||
storage:
|
||||
type: persistent-claim
|
||||
{{- with .Values.zookeeper.size }}
|
||||
|
||||
@@ -27,13 +27,23 @@
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"description": "Explicit CPU and memory configuration for each Kafka replica. When left empty, the preset defined in `resourcesPreset` is applied.",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "nano"
|
||||
"description": "Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.",
|
||||
"default": "small",
|
||||
"enum": [
|
||||
"none",
|
||||
"nano",
|
||||
"micro",
|
||||
"small",
|
||||
"medium",
|
||||
"large",
|
||||
"xlarge",
|
||||
"2xlarge"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -57,13 +67,23 @@
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"description": "Explicit CPU and memory configuration for each Zookeeper replica. When left empty, the preset defined in `resourcesPreset` is applied.",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "nano"
|
||||
"description": "Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.",
|
||||
"default": "small",
|
||||
"enum": [
|
||||
"none",
|
||||
"nano",
|
||||
"micro",
|
||||
"small",
|
||||
"medium",
|
||||
"large",
|
||||
"xlarge",
|
||||
"2xlarge"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -74,4 +94,4 @@
|
||||
"items": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,35 +14,25 @@ kafka:
|
||||
size: 10Gi
|
||||
replicas: 3
|
||||
storageClass: ""
|
||||
## @param kafka.resources Resources
|
||||
## @param kafka.resources Explicit CPU and memory configuration for each Kafka replica. When left empty, the preset defined in `resourcesPreset` is applied.
|
||||
resources: {}
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 512Mi
|
||||
|
||||
## @param kafka.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
resourcesPreset: "nano"
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
## @param kafka.resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.
|
||||
resourcesPreset: "small"
|
||||
|
||||
zookeeper:
|
||||
size: 5Gi
|
||||
replicas: 3
|
||||
storageClass: ""
|
||||
## @param zookeeper.resources Resources
|
||||
## @param zookeeper.resources Explicit CPU and memory configuration for each Zookeeper replica. When left empty, the preset defined in `resourcesPreset` is applied.
|
||||
resources: {}
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 512Mi
|
||||
|
||||
## @param zookeeper.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
resourcesPreset: "nano"
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
## @param zookeeper.resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.
|
||||
resourcesPreset: "small"
|
||||
|
||||
## @section Configuration parameters
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.21.0
|
||||
version: 0.25.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -6,6 +6,7 @@ include ../../../scripts/package.mk
|
||||
|
||||
generate:
|
||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||
yq -o json -i '.properties.addons.properties.ingressNginx.properties.exposeMethod.enum = ["Proxied","LoadBalancer"]' values.schema.json
|
||||
yq -o json -i '.properties.controlPlane.properties.apiServer.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
|
||||
yq -o json -i '.properties.controlPlane.properties.controllerManager.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
|
||||
yq -o json -i '.properties.controlPlane.properties.scheduler.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
|
||||
|
||||
@@ -90,53 +90,65 @@ See the reference for components utilized in this service:
|
||||
|
||||
### Cluster Addons
|
||||
|
||||
| Name | Description | Value |
|
||||
| --------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| `addons.certManager.enabled` | Enable cert-manager, which automatically creates and manages SSL/TLS certificates. | `false` |
|
||||
| `addons.certManager.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.cilium.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.gatewayAPI.enabled` | Enable the Gateway API | `false` |
|
||||
| `addons.ingressNginx.enabled` | Enable the Ingress-NGINX controller (requires nodes labeled with the 'ingress-nginx' role). | `false` |
|
||||
| `addons.ingressNginx.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.ingressNginx.hosts` | List of domain names that the parent cluster should route to this tenant cluster. | `[]` |
|
||||
| `addons.gpuOperator.enabled` | Enable the GPU-operator | `false` |
|
||||
| `addons.gpuOperator.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.fluxcd.enabled` | Enable FluxCD | `false` |
|
||||
| `addons.fluxcd.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.monitoringAgents.enabled` | Enable monitoring agents (Fluent Bit and VMAgents) to send logs and metrics. If tenant monitoring is enabled, data is sent to tenant storage; otherwise, it goes to root storage. | `false` |
|
||||
| `addons.monitoringAgents.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.verticalPodAutoscaler.valuesOverride` | Custom values to override | `{}` |
|
||||
| Name | Description | Value |
|
||||
| --------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- |
|
||||
| `addons.certManager.enabled` | Enable cert-manager, which automatically creates and manages SSL/TLS certificates. | `false` |
|
||||
| `addons.certManager.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.cilium.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.gatewayAPI.enabled` | Enable the Gateway API | `false` |
|
||||
| `addons.ingressNginx.enabled` | Enable the Ingress-NGINX controller (requires nodes labeled with the 'ingress-nginx' role). | `false` |
|
||||
| `addons.ingressNginx.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.ingressNginx.exposeMethod` | Method to expose the Ingress-NGINX controller. (allowed values: Proxied, LoadBalancer) | `Proxied` |
|
||||
| `addons.ingressNginx.hosts` | List of domain names that the parent cluster should route to this tenant cluster. Taken into account only when `exposeMethod` is set to `Proxied`. | `[]` |
|
||||
| `addons.gpuOperator.enabled` | Enable the GPU-operator | `false` |
|
||||
| `addons.gpuOperator.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.fluxcd.enabled` | Enable FluxCD | `false` |
|
||||
| `addons.fluxcd.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.monitoringAgents.enabled` | Enable monitoring agents (Fluent Bit and VMAgents) to send logs and metrics. If tenant monitoring is enabled, data is sent to tenant storage; otherwise, it goes to root storage. | `false` |
|
||||
| `addons.monitoringAgents.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.verticalPodAutoscaler.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.velero.enabled` | Enable velero for backup and restore k8s cluster. | `false` |
|
||||
| `addons.velero.valuesOverride` | Custom values to override | `{}` |
|
||||
|
||||
### Kubernetes Control Plane Configuration
|
||||
|
||||
| Name | Description | Value |
|
||||
| -------------------------------------------------- | ---------------------------------------------------------------------------- | ------- |
|
||||
| `controlPlane.apiServer.resources` | Explicit CPU/memory resource requests and limits for the API server. | `{}` |
|
||||
| `controlPlane.apiServer.resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `small` |
|
||||
| `controlPlane.controllerManager.resources` | Explicit CPU/memory resource requests and limits for the controller manager. | `{}` |
|
||||
| `controlPlane.controllerManager.resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `micro` |
|
||||
| `controlPlane.scheduler.resources` | Explicit CPU/memory resource requests and limits for the scheduler. | `{}` |
|
||||
| `controlPlane.scheduler.resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `micro` |
|
||||
| `controlPlane.konnectivity.server.resources` | Explicit CPU/memory resource requests and limits for the Konnectivity. | `{}` |
|
||||
| `controlPlane.konnectivity.server.resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `micro` |
|
||||
| Name | Description | Value |
|
||||
| -------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | -------- |
|
||||
| `controlPlane.apiServer.resources` | Explicit CPU and memory configuration for the API Server. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
|
||||
| `controlPlane.apiServer.resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `medium` |
|
||||
| `controlPlane.controllerManager.resources` | Explicit CPU and memory configuration for the Controller Manager. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
|
||||
| `controlPlane.controllerManager.resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `micro` |
|
||||
| `controlPlane.scheduler.resources` | Explicit CPU and memory configuration for the Scheduler. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
|
||||
| `controlPlane.scheduler.resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `micro` |
|
||||
| `controlPlane.konnectivity.server.resources` | Explicit CPU and memory configuration for Konnectivity. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
|
||||
| `controlPlane.konnectivity.server.resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `micro` |
|
||||
|
||||
In production environments, it's recommended to set `resources` explicitly.
|
||||
Example of `controlPlane.*.resources`:
|
||||
|
||||
## Parameter examples and reference
|
||||
|
||||
### resources and resourcesPreset
|
||||
|
||||
`resources` sets explicit CPU and memory configurations for each replica.
|
||||
When left empty, the preset defined in `resourcesPreset` is applied.
|
||||
|
||||
```yaml
|
||||
resources:
|
||||
limits:
|
||||
cpu: 4000m
|
||||
memory: 4Gi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 512Mi
|
||||
cpu: 4000m
|
||||
memory: 4Gi
|
||||
```
|
||||
|
||||
Allowed values for `controlPlane.*.resourcesPreset` are `none`, `nano`, `micro`, `small`, `medium`, `large`, `xlarge`, `2xlarge`.
|
||||
This value is ignored if the corresponding `resources` value is set.
|
||||
`resourcesPreset` sets named CPU and memory configurations for each replica.
|
||||
This setting is ignored if the corresponding `resources` value is set.
|
||||
|
||||
## Resources Reference
|
||||
| Preset name | CPU | memory |
|
||||
|-------------|--------|---------|
|
||||
| `nano` | `250m` | `128Mi` |
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
### instanceType Resources
|
||||
|
||||
@@ -300,4 +312,3 @@ Specific characteristics of this series are:
|
||||
workload.
|
||||
- *vCPU-To-Memory Ratio (1:4)* - A vCPU-to-Memory ratio of 1:4 starting from
|
||||
the medium size.
|
||||
|
||||
|
||||
1
packages/apps/kubernetes/charts/cozy-lib
Symbolic link
1
packages/apps/kubernetes/charts/cozy-lib
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../library/cozy-lib
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.21.0@sha256:7315850634728a5864a3de3150c12f0e1454f3f1ce33cdf21a278f57611dd5e9
|
||||
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.25.0@sha256:3a8170433e1632e5cc2b6d9db34d0605e8e6c63c158282c38450415e700e932e
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.21.0@sha256:6962bdf51ab2ff40b420b9cff7c850aeea02187da2a65a67f10e0471744649d7
|
||||
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.25.0@sha256:412ed2b3c77249bd1b973e6dc9c87976d31863717fb66ba74ccda573af737eb1
|
||||
|
||||
@@ -8,7 +8,7 @@ ENV GOARCH=$TARGETARCH
|
||||
|
||||
RUN git clone https://github.com/kubevirt/cloud-provider-kubevirt /go/src/kubevirt.io/cloud-provider-kubevirt \
|
||||
&& cd /go/src/kubevirt.io/cloud-provider-kubevirt \
|
||||
&& git checkout 443a1fe
|
||||
&& git checkout a0acf33
|
||||
|
||||
WORKDIR /go/src/kubevirt.io/cloud-provider-kubevirt
|
||||
|
||||
|
||||
@@ -37,7 +37,7 @@ index 74166b5d9..4e744f8de 100644
|
||||
klog.Infof("Initializing kubevirtEPSController")
|
||||
|
||||
diff --git a/pkg/controller/kubevirteps/kubevirteps_controller.go b/pkg/controller/kubevirteps/kubevirteps_controller.go
|
||||
index 6f6e3d322..b56882c12 100644
|
||||
index 53388eb8e..b56882c12 100644
|
||||
--- a/pkg/controller/kubevirteps/kubevirteps_controller.go
|
||||
+++ b/pkg/controller/kubevirteps/kubevirteps_controller.go
|
||||
@@ -54,10 +54,10 @@ type Controller struct {
|
||||
@@ -286,22 +286,6 @@ index 6f6e3d322..b56882c12 100644
|
||||
for _, eps := range slicesToDelete {
|
||||
err := c.infraClient.DiscoveryV1().EndpointSlices(eps.Namespace).Delete(context.TODO(), eps.Name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
@@ -474,11 +538,11 @@ func (c *Controller) reconcileByAddressType(service *v1.Service, tenantSlices []
|
||||
// Create the desired port configuration
|
||||
var desiredPorts []discovery.EndpointPort
|
||||
|
||||
- for _, port := range service.Spec.Ports {
|
||||
+ for i := range service.Spec.Ports {
|
||||
desiredPorts = append(desiredPorts, discovery.EndpointPort{
|
||||
- Port: &port.TargetPort.IntVal,
|
||||
- Protocol: &port.Protocol,
|
||||
- Name: &port.Name,
|
||||
+ Port: &service.Spec.Ports[i].TargetPort.IntVal,
|
||||
+ Protocol: &service.Spec.Ports[i].Protocol,
|
||||
+ Name: &service.Spec.Ports[i].Name,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -588,55 +652,114 @@ func ownedBy(endpointSlice *discovery.EndpointSlice, svc *v1.Service) bool {
|
||||
return false
|
||||
}
|
||||
@@ -437,18 +421,10 @@ index 6f6e3d322..b56882c12 100644
|
||||
|
||||
return nil
|
||||
diff --git a/pkg/controller/kubevirteps/kubevirteps_controller_test.go b/pkg/controller/kubevirteps/kubevirteps_controller_test.go
|
||||
index 1fb86e25f..14d92d340 100644
|
||||
index 1c97035b4..14d92d340 100644
|
||||
--- a/pkg/controller/kubevirteps/kubevirteps_controller_test.go
|
||||
+++ b/pkg/controller/kubevirteps/kubevirteps_controller_test.go
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
+ "k8s.io/apimachinery/pkg/util/sets"
|
||||
dfake "k8s.io/client-go/dynamic/fake"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/testing"
|
||||
@@ -189,7 +190,7 @@ func setupTestKubevirtEPSController() *testKubevirtEPSController {
|
||||
@@ -190,7 +190,7 @@ func setupTestKubevirtEPSController() *testKubevirtEPSController {
|
||||
}: "VirtualMachineInstanceList",
|
||||
})
|
||||
|
||||
@@ -457,83 +433,87 @@ index 1fb86e25f..14d92d340 100644
|
||||
|
||||
err := controller.Init()
|
||||
if err != nil {
|
||||
@@ -686,5 +687,229 @@ var _ = g.Describe("KubevirtEPSController", g.Ordered, func() {
|
||||
return false, err
|
||||
}).Should(BeTrue(), "EndpointSlice in infra cluster should be recreated by the controller after deletion")
|
||||
})
|
||||
+
|
||||
+ g.It("Should correctly handle multiple unique ports in EndpointSlice", func() {
|
||||
+ // Create a VMI in the infra cluster
|
||||
+ createAndAssertVMI("worker-0-test", "ip-10-32-5-13", "123.45.67.89")
|
||||
+
|
||||
+ // Create an EndpointSlice in the tenant cluster
|
||||
+ createAndAssertTenantSlice("test-epslice", "tenant-service-name", discoveryv1.AddressTypeIPv4,
|
||||
+ *createPort("http", 80, v1.ProtocolTCP),
|
||||
+ []discoveryv1.Endpoint{*createEndpoint("123.45.67.89", "worker-0-test", true, true, false)})
|
||||
+
|
||||
@@ -697,51 +697,43 @@ var _ = g.Describe("KubevirtEPSController", g.Ordered, func() {
|
||||
*createPort("http", 80, v1.ProtocolTCP),
|
||||
[]discoveryv1.Endpoint{*createEndpoint("123.45.67.89", "worker-0-test", true, true, false)})
|
||||
|
||||
- // Define several unique ports for the Service
|
||||
+ // Define multiple ports for the Service
|
||||
+ servicePorts := []v1.ServicePort{
|
||||
+ {
|
||||
servicePorts := []v1.ServicePort{
|
||||
{
|
||||
- Name: "client",
|
||||
- Protocol: v1.ProtocolTCP,
|
||||
- Port: 10001,
|
||||
- TargetPort: intstr.FromInt(30396),
|
||||
- NodePort: 30396,
|
||||
- AppProtocol: nil,
|
||||
+ Name: "client",
|
||||
+ Protocol: v1.ProtocolTCP,
|
||||
+ Port: 10001,
|
||||
+ TargetPort: intstr.FromInt(30396),
|
||||
+ NodePort: 30396,
|
||||
+ },
|
||||
+ {
|
||||
},
|
||||
{
|
||||
- Name: "dashboard",
|
||||
- Protocol: v1.ProtocolTCP,
|
||||
- Port: 8265,
|
||||
- TargetPort: intstr.FromInt(31003),
|
||||
- NodePort: 31003,
|
||||
- AppProtocol: nil,
|
||||
+ Name: "dashboard",
|
||||
+ Protocol: v1.ProtocolTCP,
|
||||
+ Port: 8265,
|
||||
+ TargetPort: intstr.FromInt(31003),
|
||||
+ NodePort: 31003,
|
||||
+ },
|
||||
+ {
|
||||
},
|
||||
{
|
||||
- Name: "metrics",
|
||||
- Protocol: v1.ProtocolTCP,
|
||||
- Port: 8080,
|
||||
- TargetPort: intstr.FromInt(30452),
|
||||
- NodePort: 30452,
|
||||
- AppProtocol: nil,
|
||||
+ Name: "metrics",
|
||||
+ Protocol: v1.ProtocolTCP,
|
||||
+ Port: 8080,
|
||||
+ TargetPort: intstr.FromInt(30452),
|
||||
+ NodePort: 30452,
|
||||
+ },
|
||||
+ }
|
||||
+
|
||||
+ createAndAssertInfraServiceLB("infra-multiport-service", "tenant-service-name", "test-cluster",
|
||||
},
|
||||
}
|
||||
|
||||
- // Create a Service with the first port
|
||||
createAndAssertInfraServiceLB("infra-multiport-service", "tenant-service-name", "test-cluster",
|
||||
- servicePorts[0],
|
||||
- v1.ServiceExternalTrafficPolicyLocal)
|
||||
+ servicePorts[0], v1.ServiceExternalTrafficPolicyLocal)
|
||||
+
|
||||
+ svc, err := testVals.infraClient.CoreV1().Services(infraNamespace).Get(context.TODO(), "infra-multiport-service", metav1.GetOptions{})
|
||||
+ Expect(err).To(BeNil())
|
||||
+
|
||||
+ svc.Spec.Ports = servicePorts
|
||||
+ _, err = testVals.infraClient.CoreV1().Services(infraNamespace).Update(context.TODO(), svc, metav1.UpdateOptions{})
|
||||
+ Expect(err).To(BeNil())
|
||||
+
|
||||
+ var epsListMultiPort *discoveryv1.EndpointSliceList
|
||||
+
|
||||
+ Eventually(func() (bool, error) {
|
||||
+ epsListMultiPort, err = testVals.infraClient.DiscoveryV1().EndpointSlices(infraNamespace).List(context.TODO(), metav1.ListOptions{})
|
||||
+ if len(epsListMultiPort.Items) != 1 {
|
||||
+ return false, err
|
||||
+ }
|
||||
+
|
||||
+ createdSlice := epsListMultiPort.Items[0]
|
||||
+ expectedPortNames := []string{"client", "dashboard", "metrics"}
|
||||
+ foundPortNames := []string{}
|
||||
+
|
||||
+ for _, port := range createdSlice.Ports {
|
||||
+ if port.Name != nil {
|
||||
+ foundPortNames = append(foundPortNames, *port.Name)
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if len(foundPortNames) != len(expectedPortNames) {
|
||||
+ return false, err
|
||||
+ }
|
||||
+
|
||||
+ portSet := sets.NewString(foundPortNames...)
|
||||
+ expectedPortSet := sets.NewString(expectedPortNames...)
|
||||
+ return portSet.Equal(expectedPortSet), err
|
||||
+ }).Should(BeTrue(), "EndpointSlice should contain all unique ports from the Service without duplicates")
|
||||
+ })
|
||||
+
|
||||
|
||||
- // Update the Service by adding the remaining ports
|
||||
svc, err := testVals.infraClient.CoreV1().Services(infraNamespace).Get(context.TODO(), "infra-multiport-service", metav1.GetOptions{})
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
svc.Spec.Ports = servicePorts
|
||||
-
|
||||
_, err = testVals.infraClient.CoreV1().Services(infraNamespace).Update(context.TODO(), svc, metav1.UpdateOptions{})
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
var epsListMultiPort *discoveryv1.EndpointSliceList
|
||||
|
||||
- // Verify that the EndpointSlice is created with correct unique ports
|
||||
Eventually(func() (bool, error) {
|
||||
epsListMultiPort, err = testVals.infraClient.DiscoveryV1().EndpointSlices(infraNamespace).List(context.TODO(), metav1.ListOptions{})
|
||||
if len(epsListMultiPort.Items) != 1 {
|
||||
@@ -758,7 +750,6 @@ var _ = g.Describe("KubevirtEPSController", g.Ordered, func() {
|
||||
}
|
||||
}
|
||||
|
||||
- // Verify that all expected ports are present and without duplicates
|
||||
if len(foundPortNames) != len(expectedPortNames) {
|
||||
return false, err
|
||||
}
|
||||
@@ -769,5 +760,156 @@ var _ = g.Describe("KubevirtEPSController", g.Ordered, func() {
|
||||
}).Should(BeTrue(), "EndpointSlice should contain all unique ports from the Service without duplicates")
|
||||
})
|
||||
|
||||
+ g.It("Should not panic when Service changes to have a non-nil selector, causing EndpointSlice deletion with no new slices to create", func() {
|
||||
+ createAndAssertVMI("worker-0-test", "ip-10-32-5-13", "123.45.67.89")
|
||||
+ createAndAssertTenantSlice("test-epslice", "tenant-service-name", discoveryv1.AddressTypeIPv4,
|
||||
|
||||
@@ -0,0 +1,142 @@
|
||||
diff --git a/pkg/controller/kubevirteps/kubevirteps_controller.go b/pkg/controller/kubevirteps/kubevirteps_controller.go
|
||||
index 53388eb8e..28644236f 100644
|
||||
--- a/pkg/controller/kubevirteps/kubevirteps_controller.go
|
||||
+++ b/pkg/controller/kubevirteps/kubevirteps_controller.go
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
- "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
@@ -669,35 +668,50 @@ func (c *Controller) getDesiredEndpoints(service *v1.Service, tenantSlices []*di
|
||||
for _, slice := range tenantSlices {
|
||||
for _, endpoint := range slice.Endpoints {
|
||||
// find all unique nodes that correspond to an endpoint in a tenant slice
|
||||
+ if endpoint.NodeName == nil {
|
||||
+ klog.Warningf("Skipping endpoint without NodeName in slice %s/%s", slice.Namespace, slice.Name)
|
||||
+ continue
|
||||
+ }
|
||||
nodeSet.Insert(*endpoint.NodeName)
|
||||
}
|
||||
}
|
||||
|
||||
- klog.Infof("Desired nodes for service %s in namespace %s: %v", service.Name, service.Namespace, sets.List(nodeSet))
|
||||
+ klog.Infof("Desired nodes for service %s/%s: %v", service.Namespace, service.Name, sets.List(nodeSet))
|
||||
|
||||
for _, node := range sets.List(nodeSet) {
|
||||
// find vmi for node name
|
||||
- obj := &unstructured.Unstructured{}
|
||||
- vmi := &kubevirtv1.VirtualMachineInstance{}
|
||||
-
|
||||
- obj, err := c.infraDynamic.Resource(kubevirtv1.VirtualMachineInstanceGroupVersionKind.GroupVersion().WithResource("virtualmachineinstances")).Namespace(c.infraNamespace).Get(context.TODO(), node, metav1.GetOptions{})
|
||||
+ obj, err := c.infraDynamic.
|
||||
+ Resource(kubevirtv1.VirtualMachineInstanceGroupVersionKind.GroupVersion().WithResource("virtualmachineinstances")).
|
||||
+ Namespace(c.infraNamespace).
|
||||
+ Get(context.TODO(), node, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
- klog.Errorf("Failed to get VirtualMachineInstance %s in namespace %s:%v", node, c.infraNamespace, err)
|
||||
+ klog.Errorf("Failed to get VMI %q in namespace %q: %v", node, c.infraNamespace, err)
|
||||
continue
|
||||
}
|
||||
|
||||
+ vmi := &kubevirtv1.VirtualMachineInstance{}
|
||||
err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, vmi)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to convert Unstructured to VirtualMachineInstance: %v", err)
|
||||
- klog.Fatal(err)
|
||||
+ continue
|
||||
}
|
||||
|
||||
+ if vmi.Status.NodeName == "" {
|
||||
+ klog.Warningf("Skipping VMI %s/%s: NodeName is empty", vmi.Namespace, vmi.Name)
|
||||
+ continue
|
||||
+ }
|
||||
+ nodeNamePtr := &vmi.Status.NodeName
|
||||
+
|
||||
ready := vmi.Status.Phase == kubevirtv1.Running
|
||||
serving := vmi.Status.Phase == kubevirtv1.Running
|
||||
terminating := vmi.Status.Phase == kubevirtv1.Failed || vmi.Status.Phase == kubevirtv1.Succeeded
|
||||
|
||||
for _, i := range vmi.Status.Interfaces {
|
||||
if i.Name == "default" {
|
||||
+ if i.IP == "" {
|
||||
+ klog.Warningf("VMI %s/%s interface %q has no IP, skipping", vmi.Namespace, vmi.Name, i.Name)
|
||||
+ continue
|
||||
+ }
|
||||
desiredEndpoints = append(desiredEndpoints, &discovery.Endpoint{
|
||||
Addresses: []string{i.IP},
|
||||
Conditions: discovery.EndpointConditions{
|
||||
@@ -705,9 +719,9 @@ func (c *Controller) getDesiredEndpoints(service *v1.Service, tenantSlices []*di
|
||||
Serving: &serving,
|
||||
Terminating: &terminating,
|
||||
},
|
||||
- NodeName: &vmi.Status.NodeName,
|
||||
+ NodeName: nodeNamePtr,
|
||||
})
|
||||
- continue
|
||||
+ break
|
||||
}
|
||||
}
|
||||
}
|
||||
diff --git a/pkg/controller/kubevirteps/kubevirteps_controller_test.go b/pkg/controller/kubevirteps/kubevirteps_controller_test.go
|
||||
index 1c97035b4..d205d0bed 100644
|
||||
--- a/pkg/controller/kubevirteps/kubevirteps_controller_test.go
|
||||
+++ b/pkg/controller/kubevirteps/kubevirteps_controller_test.go
|
||||
@@ -771,3 +771,55 @@ var _ = g.Describe("KubevirtEPSController", g.Ordered, func() {
|
||||
|
||||
})
|
||||
})
|
||||
+
|
||||
+var _ = g.Describe("getDesiredEndpoints", func() {
|
||||
+ g.It("should skip endpoints without NodeName and VMIs without NodeName or IP", func() {
|
||||
+ // Setup controller
|
||||
+ ctrl := setupTestKubevirtEPSController().controller
|
||||
+
|
||||
+ // Manually inject dynamic client content (1 VMI with missing NodeName)
|
||||
+ vmi := createUnstructuredVMINode("vmi-without-node", "", "10.0.0.1") // empty NodeName
|
||||
+ _, err := ctrl.infraDynamic.
|
||||
+ Resource(kubevirtv1.VirtualMachineInstanceGroupVersionKind.GroupVersion().WithResource("virtualmachineinstances")).
|
||||
+ Namespace(infraNamespace).
|
||||
+ Create(context.TODO(), vmi, metav1.CreateOptions{})
|
||||
+ Expect(err).To(BeNil())
|
||||
+
|
||||
+ // Create service
|
||||
+ svc := createInfraServiceLB("test-svc", "test-svc", "test-cluster",
|
||||
+ v1.ServicePort{
|
||||
+ Name: "http",
|
||||
+ Port: 80,
|
||||
+ TargetPort: intstr.FromInt(8080),
|
||||
+ Protocol: v1.ProtocolTCP,
|
||||
+ },
|
||||
+ v1.ServiceExternalTrafficPolicyLocal,
|
||||
+ )
|
||||
+
|
||||
+ // One endpoint has nil NodeName, another is valid
|
||||
+ nodeName := "vmi-without-node"
|
||||
+ tenantSlice := &discoveryv1.EndpointSlice{
|
||||
+ ObjectMeta: metav1.ObjectMeta{
|
||||
+ Name: "slice",
|
||||
+ Namespace: tenantNamespace,
|
||||
+ Labels: map[string]string{
|
||||
+ discoveryv1.LabelServiceName: "test-svc",
|
||||
+ },
|
||||
+ },
|
||||
+ AddressType: discoveryv1.AddressTypeIPv4,
|
||||
+ Endpoints: []discoveryv1.Endpoint{
|
||||
+ { // should be skipped due to nil NodeName
|
||||
+ Addresses: []string{"10.1.1.1"},
|
||||
+ NodeName: nil,
|
||||
+ },
|
||||
+ { // will hit VMI without NodeName and also be skipped
|
||||
+ Addresses: []string{"10.1.1.2"},
|
||||
+ NodeName: &nodeName,
|
||||
+ },
|
||||
+ },
|
||||
+ }
|
||||
+
|
||||
+ endpoints := ctrl.getDesiredEndpoints(svc, []*discoveryv1.EndpointSlice{tenantSlice})
|
||||
+ Expect(endpoints).To(HaveLen(0), "Expected no endpoints due to missing NodeName or IP")
|
||||
+ })
|
||||
+})
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.21.0@sha256:b1525163cd21938ac934bb1b860f2f3151464fa463b82880ab058167aeaf3e29
|
||||
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.25.0@sha256:445c2727b04ac68595b43c988ff17b3d69a7b22b0644fde3b10c65b47a7bc036
|
||||
|
||||
@@ -3,7 +3,7 @@ ARG builder_image=docker.io/library/golang:1.22.5
|
||||
FROM ${builder_image} AS builder
|
||||
RUN git clone https://github.com/kubevirt/csi-driver /src/kubevirt-csi-driver \
|
||||
&& cd /src/kubevirt-csi-driver \
|
||||
&& git checkout 35836e0c8b68d9916d29a838ea60cdd3fc6199cf
|
||||
&& git checkout a8d6605bc9997bcfda3fb9f1f82ba6445b4984cc
|
||||
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
@@ -11,6 +11,7 @@ ENV GOOS=$TARGETOS
|
||||
ENV GOARCH=$TARGETARCH
|
||||
|
||||
WORKDIR /src/kubevirt-csi-driver
|
||||
|
||||
RUN make build
|
||||
|
||||
FROM quay.io/centos/centos:stream9
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/ubuntu-container-disk:v1.32@sha256:bfe568db4b768a4b6c67a8d562892bbba766d0245e140d431754589b347f0b41
|
||||
ghcr.io/cozystack/cozystack/ubuntu-container-disk:v1.32@sha256:e53f2394c7aa76ad10818ffb945e40006cd77406999e47e036d41b8b0bf094cc
|
||||
|
||||
@@ -120,23 +120,11 @@ metadata:
|
||||
kamaji.clastix.io/kubeconfig-secret-key: "super-admin.svc"
|
||||
spec:
|
||||
apiServer:
|
||||
{{- if .Values.controlPlane.apiServer.resources }}
|
||||
resources: {{- toYaml .Values.controlPlane.apiServer.resources | nindent 6 }}
|
||||
{{- else if ne .Values.controlPlane.apiServer.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.controlPlane.apiServer.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
{{- end }}
|
||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.controlPlane.apiServer.resourcesPreset .Values.controlPlane.apiServer.resources $) | nindent 6 }}
|
||||
controllerManager:
|
||||
{{- if .Values.controlPlane.controllerManager.resources }}
|
||||
resources: {{- toYaml .Values.controlPlane.controllerManager.resources | nindent 6 }}
|
||||
{{- else if ne .Values.controlPlane.controllerManager.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.controlPlane.controllerManager.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
{{- end }}
|
||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.controlPlane.controllerManager.resourcesPreset .Values.controlPlane.controllerManager.resources $) | nindent 6 }}
|
||||
scheduler:
|
||||
{{- if .Values.controlPlane.scheduler.resources }}
|
||||
resources: {{- toYaml .Values.controlPlane.scheduler.resources | nindent 6 }}
|
||||
{{- else if ne .Values.controlPlane.scheduler.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.controlPlane.scheduler.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
{{- end }}
|
||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.controlPlane.scheduler.resourcesPreset .Values.controlPlane.scheduler.resources $) | nindent 6 }}
|
||||
dataStoreName: "{{ $etcd }}"
|
||||
addons:
|
||||
coreDNS:
|
||||
@@ -145,11 +133,7 @@ spec:
|
||||
konnectivity:
|
||||
server:
|
||||
port: 8132
|
||||
{{- if .Values.controlPlane.konnectivity.server.resources }}
|
||||
resources: {{- toYaml .Values.controlPlane.konnectivity.server.resources | nindent 10 }}
|
||||
{{- else if ne .Values.controlPlane.konnectivity.server.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.controlPlane.konnectivity.server.resourcesPreset "Release" .Release) | nindent 10 }}
|
||||
{{- end }}
|
||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.controlPlane.konnectivity.server.resourcesPreset .Values.controlPlane.konnectivity.server.resources $) | nindent 10 }}
|
||||
kubelet:
|
||||
cgroupfs: systemd
|
||||
preferredAddressTypes:
|
||||
@@ -211,12 +195,25 @@ spec:
|
||||
- ["LABEL=ephemeral", "/ephemeral"]
|
||||
- ["/ephemeral/kubelet", "/var/lib/kubelet", "none", "bind,nofail"]
|
||||
- ["/ephemeral/containerd", "/var/lib/containerd", "none", "bind,nofail"]
|
||||
{{- $sec := lookup "v1" "Secret" $.Release.Namespace (printf "%s-patch-containerd" $.Release.Name) }}
|
||||
{{- if $sec }}
|
||||
files:
|
||||
{{- range $key, $_ := $sec.data }}
|
||||
- path: /etc/containerd/certs.d/{{ trimSuffix ".toml" $key }}/hosts.toml
|
||||
contentFrom:
|
||||
secret:
|
||||
name: {{ $.Release.Name }}-patch-containerd
|
||||
key: {{ $key }}
|
||||
permissions: "0400"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
preKubeadmCommands:
|
||||
- sed -i 's|root:x:|root::|' /etc/passwd
|
||||
- systemctl stop containerd.service
|
||||
- mkdir -p /ephemeral/kubelet /ephemeral/containerd
|
||||
- mount -o bind /ephemeral/kubelet /var/lib/kubelet
|
||||
- mount -o bind /ephemeral/containerd /var/lib/containerd
|
||||
- sudo sed -i '/\[plugins."io.containerd.grpc.v1.cri".registry\]/,/^\[/ s|^\(\s*config_path\s*=\s*\).*|\1"/etc/containerd/certs.d"|' /etc/containerd/config.toml
|
||||
- systemctl start containerd.service
|
||||
joinConfiguration:
|
||||
nodeRegistration:
|
||||
|
||||
@@ -0,0 +1,13 @@
|
||||
{{- $sourceSecret := lookup "v1" "Secret" "cozy-system" "patch-containerd" }}
|
||||
{{- if $sourceSecret }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-patch-containerd
|
||||
namespace: {{ .Release.Namespace }}
|
||||
type: {{ $sourceSecret.type }}
|
||||
data:
|
||||
{{- range $key, $value := $sourceSecret.data }}
|
||||
{{ printf "%s: %s" $key ($value | quote) | indent 2 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -34,3 +34,14 @@ rules:
|
||||
- {{ $.Release.Name }}-{{ $groupName }}
|
||||
{{- end }}
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
subjects:
|
||||
{{ include "cozy-lib.rbac.subjectsForTenantAndAccessLevel" (list "use" .Release.Namespace) }}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
{{- if .Values.addons.certManager.enabled }}
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
@@ -54,3 +55,4 @@ stringData:
|
||||
values: |
|
||||
{{- toYaml .Values.addons.certManager.valuesOverride | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -17,6 +17,7 @@ spec:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '>= 0.0.0-0'
|
||||
kubeConfig:
|
||||
secretRef:
|
||||
name: {{ .Release.Name }}-admin-kubeconfig
|
||||
|
||||
@@ -3,9 +3,11 @@ ingress-nginx:
|
||||
fullnameOverride: ingress-nginx
|
||||
controller:
|
||||
kind: DaemonSet
|
||||
{{- if eq .Values.addons.ingressNginx.exposeMethod "Proxied" }}
|
||||
hostNetwork: true
|
||||
service:
|
||||
enabled: false
|
||||
{{- end }}
|
||||
{{- if not .Values.addons.certManager.enabled }}
|
||||
admissionWebhooks:
|
||||
certManager:
|
||||
|
||||
@@ -24,8 +24,8 @@ spec:
|
||||
secretRef:
|
||||
name: {{ .Release.Name }}-admin-kubeconfig
|
||||
key: super-admin.svc
|
||||
targetNamespace: cozy-monitoring-agents
|
||||
storageNamespace: cozy-monitoring-agents
|
||||
targetNamespace: cozy-monitoring
|
||||
storageNamespace: cozy-monitoring
|
||||
install:
|
||||
createNamespace: true
|
||||
timeout: "300s"
|
||||
|
||||
46
packages/apps/kubernetes/templates/helmreleases/velero.yaml
Normal file
46
packages/apps/kubernetes/templates/helmreleases/velero.yaml
Normal file
@@ -0,0 +1,46 @@
|
||||
{{- if .Values.addons.velero.enabled }}
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-velero
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: velero
|
||||
chart:
|
||||
spec:
|
||||
chart: cozy-velero
|
||||
reconcileStrategy: Revision
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '>= 0.0.0-0'
|
||||
kubeConfig:
|
||||
secretRef:
|
||||
name: {{ .Release.Name }}-admin-kubeconfig
|
||||
key: super-admin.svc
|
||||
targetNamespace: cozy-velero
|
||||
storageNamespace: cozy-velero
|
||||
install:
|
||||
createNamespace: true
|
||||
remediation:
|
||||
retries: -1
|
||||
upgrade:
|
||||
remediation:
|
||||
retries: -1
|
||||
{{- with .Values.addons.velero.valuesOverride }}
|
||||
values:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
dependsOn:
|
||||
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
|
||||
- name: {{ .Release.Name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
- name: {{ .Release.Name }}-cilium
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
@@ -1,4 +1,6 @@
|
||||
{{- define "cozystack.defaultVPAValues" -}}
|
||||
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
|
||||
{{- $clusterDomain := (index $cozyConfig.data "cluster-domain") | default "cozy.local" }}
|
||||
{{- $myNS := lookup "v1" "Namespace" "" .Release.Namespace }}
|
||||
{{- $targetTenant := index $myNS.metadata.annotations "namespace.cozystack.io/monitoring" }}
|
||||
vertical-pod-autoscaler:
|
||||
@@ -13,7 +15,7 @@ vertical-pod-autoscaler:
|
||||
metric-for-pod-labels: kube_pod_labels{job="kube-state-metrics", tenant="{{ .Release.Namespace }}", cluster="{{ .Release.Name }}"}[8d]
|
||||
pod-name-label: pod
|
||||
pod-namespace-label: namespace
|
||||
prometheus-address: http://vmselect-shortterm.{{ $targetTenant }}.svc.cozy.local:8481/select/0/prometheus/
|
||||
prometheus-address: http://vmselect-shortterm.{{ $targetTenant }}.svc.{{ $clusterDomain }}:8481/select/0/prometheus/
|
||||
prometheus-cadvisor-job-name: cadvisor
|
||||
resources:
|
||||
limits:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{{- $myNS := lookup "v1" "Namespace" "" .Release.Namespace }}
|
||||
{{- $ingress := index $myNS.metadata.annotations "namespace.cozystack.io/ingress" }}
|
||||
{{- if .Values.addons.ingressNginx.hosts }}
|
||||
{{- if and (eq .Values.addons.ingressNginx.exposeMethod "Proxied") .Values.addons.ingressNginx.hosts }}
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
|
||||
@@ -20,13 +20,13 @@
|
||||
"properties": {
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Explicit CPU/memory resource requests and limits for the API server.",
|
||||
"description": "Explicit CPU and memory configuration for the API Server. When left empty, the preset defined in `resourcesPreset` is applied.",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Use a common resources preset when `resources` is not set explicitly.",
|
||||
"default": "small",
|
||||
"description": "Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.",
|
||||
"default": "medium",
|
||||
"enum": [
|
||||
"none",
|
||||
"nano",
|
||||
@@ -45,12 +45,12 @@
|
||||
"properties": {
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Explicit CPU/memory resource requests and limits for the controller manager.",
|
||||
"description": "Explicit CPU and memory configuration for the Controller Manager. When left empty, the preset defined in `resourcesPreset` is applied.",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Use a common resources preset when `resources` is not set explicitly.",
|
||||
"description": "Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.",
|
||||
"default": "micro",
|
||||
"enum": [
|
||||
"none",
|
||||
@@ -70,12 +70,12 @@
|
||||
"properties": {
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Explicit CPU/memory resource requests and limits for the scheduler.",
|
||||
"description": "Explicit CPU and memory configuration for the Scheduler. When left empty, the preset defined in `resourcesPreset` is applied.",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Use a common resources preset when `resources` is not set explicitly.",
|
||||
"description": "Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.",
|
||||
"default": "micro",
|
||||
"enum": [
|
||||
"none",
|
||||
@@ -98,12 +98,12 @@
|
||||
"properties": {
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Explicit CPU/memory resource requests and limits for the Konnectivity.",
|
||||
"description": "Explicit CPU and memory configuration for Konnectivity. When left empty, the preset defined in `resourcesPreset` is applied.",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Use a common resources preset when `resources` is not set explicitly.",
|
||||
"description": "Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.",
|
||||
"default": "micro",
|
||||
"enum": [
|
||||
"none",
|
||||
@@ -178,9 +178,18 @@
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
},
|
||||
"exposeMethod": {
|
||||
"type": "string",
|
||||
"description": "Method to expose the Ingress-NGINX controller. (allowed values: Proxied, LoadBalancer)",
|
||||
"default": "Proxied",
|
||||
"enum": [
|
||||
"Proxied",
|
||||
"LoadBalancer"
|
||||
]
|
||||
},
|
||||
"hosts": {
|
||||
"type": "array",
|
||||
"description": "List of domain names that the parent cluster should route to this tenant cluster.",
|
||||
"description": "List of domain names that the parent cluster should route to this tenant cluster. Taken into account only when `exposeMethod` is set to `Proxied`.",
|
||||
"default": [],
|
||||
"items": {}
|
||||
}
|
||||
@@ -240,6 +249,21 @@
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"velero": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enable velero for backup and restore k8s cluster.",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
"type": "object",
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -61,12 +61,14 @@ addons:
|
||||
## @param addons.ingressNginx.valuesOverride Custom values to override
|
||||
##
|
||||
enabled: false
|
||||
## @param addons.ingressNginx.hosts List of domain names that the parent cluster should route to this tenant cluster.
|
||||
## @param addons.ingressNginx.exposeMethod Method to expose the Ingress-NGINX controller. (allowed values: Proxied, LoadBalancer)
|
||||
## @param addons.ingressNginx.hosts List of domain names that the parent cluster should route to this tenant cluster. Taken into account only when `exposeMethod` is set to `Proxied`.
|
||||
## e.g:
|
||||
## hosts:
|
||||
## - example.org
|
||||
## - foo.example.net
|
||||
##
|
||||
exposeMethod: Proxied
|
||||
hosts: []
|
||||
valuesOverride: {}
|
||||
|
||||
@@ -103,6 +105,15 @@ addons:
|
||||
##
|
||||
valuesOverride: {}
|
||||
|
||||
## Velero
|
||||
##
|
||||
velero:
|
||||
## @param addons.velero.enabled Enable velero for backup and restore k8s cluster.
|
||||
## @param addons.velero.valuesOverride Custom values to override
|
||||
##
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
|
||||
## @section Kubernetes Control Plane Configuration
|
||||
##
|
||||
|
||||
@@ -110,35 +121,31 @@ controlPlane:
|
||||
replicas: 2
|
||||
|
||||
apiServer:
|
||||
## @param controlPlane.apiServer.resources Explicit CPU/memory resource requests and limits for the API server.
|
||||
## @param controlPlane.apiServer.resourcesPreset Use a common resources preset when `resources` is not set explicitly.
|
||||
## @param controlPlane.apiServer.resources Explicit CPU and memory configuration for the API Server. When left empty, the preset defined in `resourcesPreset` is applied.
|
||||
## @param controlPlane.apiServer.resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.
|
||||
## e.g:
|
||||
## resources:
|
||||
## limits:
|
||||
## cpu: 4000m
|
||||
## memory: 4Gi
|
||||
## requests:
|
||||
## cpu: 100m
|
||||
## memory: 512Mi
|
||||
## cpu: 4000m
|
||||
## memory: 4Gi
|
||||
##
|
||||
resourcesPreset: "small"
|
||||
resourcesPreset: "medium"
|
||||
resources: {}
|
||||
|
||||
controllerManager:
|
||||
## @param controlPlane.controllerManager.resources Explicit CPU/memory resource requests and limits for the controller manager.
|
||||
## @param controlPlane.controllerManager.resourcesPreset Use a common resources preset when `resources` is not set explicitly.
|
||||
## @param controlPlane.controllerManager.resources Explicit CPU and memory configuration for the Controller Manager. When left empty, the preset defined in `resourcesPreset` is applied.
|
||||
## @param controlPlane.controllerManager.resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.
|
||||
resourcesPreset: "micro"
|
||||
resources: {}
|
||||
|
||||
scheduler:
|
||||
## @param controlPlane.scheduler.resources Explicit CPU/memory resource requests and limits for the scheduler.
|
||||
## @param controlPlane.scheduler.resourcesPreset Use a common resources preset when `resources` is not set explicitly.
|
||||
## @param controlPlane.scheduler.resources Explicit CPU and memory configuration for the Scheduler. When left empty, the preset defined in `resourcesPreset` is applied.
|
||||
## @param controlPlane.scheduler.resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.
|
||||
resourcesPreset: "micro"
|
||||
resources: {}
|
||||
|
||||
konnectivity:
|
||||
server:
|
||||
## @param controlPlane.konnectivity.server.resources Explicit CPU/memory resource requests and limits for the Konnectivity.
|
||||
## @param controlPlane.konnectivity.server.resourcesPreset Use a common resources preset when `resources` is not set explicitly.
|
||||
## @param controlPlane.konnectivity.server.resources Explicit CPU and memory configuration for Konnectivity. When left empty, the preset defined in `resourcesPreset` is applied.
|
||||
## @param controlPlane.konnectivity.server.resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.
|
||||
resourcesPreset: "micro"
|
||||
resources: {}
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.7.0
|
||||
version: 0.9.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -5,6 +5,7 @@ include ../../../scripts/package.mk
|
||||
|
||||
generate:
|
||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||
yq -i -o json --indent 4 '.properties.resourcesPreset.enum = ["none", "nano", "micro", "small", "medium", "large", "xlarge", "2xlarge"]' values.schema.json
|
||||
|
||||
image:
|
||||
docker buildx build images/mariadb-backup \
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
## Managed MariaDB Service
|
||||
|
||||
The Managed MariaDB Service offers a powerful and widely used relational database solution. This service allows you to create and manage a replicated MariaDB cluster seamlessly.
|
||||
The Managed MariaDB Service offers a powerful and widely used relational database solution.
|
||||
This service allows you to create and manage a replicated MariaDB cluster seamlessly.
|
||||
|
||||
## Deployment Details
|
||||
|
||||
@@ -46,7 +47,7 @@ restic -r s3:s3.example.org/mariadb-backups/database_name restore latest --targe
|
||||
```
|
||||
|
||||
more details:
|
||||
- https://itnext.io/restic-effective-backup-from-stdin-4bc1e8f083c1
|
||||
- https://blog.aenix.io/restic-effective-backup-from-stdin-4bc1e8f083c1
|
||||
|
||||
### Known issues
|
||||
|
||||
@@ -83,16 +84,66 @@ more details:
|
||||
|
||||
### Backup parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ |
|
||||
| `backup.enabled` | Enable pereiodic backups | `false` |
|
||||
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
|
||||
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/postgres-backups` |
|
||||
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
|
||||
| `backup.cleanupStrategy` | The strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
|
||||
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
||||
| `resources` | Resources | `{}` |
|
||||
| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------ |
|
||||
| `backup.enabled` | Enable periodic backups | `false` |
|
||||
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
|
||||
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/postgres-backups` |
|
||||
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
|
||||
| `backup.cleanupStrategy` | The strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
|
||||
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
||||
| `resources` | Explicit CPU and memory configuration for each MariaDB replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
|
||||
| `resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `nano` |
|
||||
|
||||
## Parameter examples and reference
|
||||
|
||||
### resources and resourcesPreset
|
||||
|
||||
`resources` sets explicit CPU and memory configurations for each replica.
|
||||
When left empty, the preset defined in `resourcesPreset` is applied.
|
||||
|
||||
```yaml
|
||||
resources:
|
||||
cpu: 4000m
|
||||
memory: 4Gi
|
||||
```
|
||||
|
||||
`resourcesPreset` sets named CPU and memory configurations for each replica.
|
||||
This setting is ignored if the corresponding `resources` value is set.
|
||||
|
||||
| Preset name | CPU | memory |
|
||||
|-------------|--------|---------|
|
||||
| `nano` | `250m` | `128Mi` |
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `3` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
|
||||
### users
|
||||
|
||||
```yaml
|
||||
users:
|
||||
user1:
|
||||
maxUserConnections: 1000
|
||||
password: hackme
|
||||
user2:
|
||||
maxUserConnections: 1000
|
||||
password: hackme
|
||||
```
|
||||
|
||||
|
||||
### databases
|
||||
|
||||
```yaml
|
||||
databases:
|
||||
myapp1:
|
||||
roles:
|
||||
admin:
|
||||
- user1
|
||||
readonly:
|
||||
- user2
|
||||
```
|
||||
|
||||
1
packages/apps/mysql/charts/cozy-lib
Symbolic link
1
packages/apps/mysql/charts/cozy-lib
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../library/cozy-lib
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/mariadb-backup:0.7.0@sha256:cfd1c37d8ad24e10681d82d6e6ce8a641b4602c1b0ffa8516ae15b4958bb12d4
|
||||
ghcr.io/cozystack/cozystack/mariadb-backup:0.9.0@sha256:cfd1c37d8ad24e10681d82d6e6ce8a641b4602c1b0ffa8516ae15b4958bb12d4
|
||||
|
||||
@@ -25,3 +25,14 @@ rules:
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
subjects:
|
||||
{{ include "cozy-lib.rbac.subjectsForTenantAndAccessLevel" (list "use" .Release.Namespace) }}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
@@ -57,6 +57,13 @@ spec:
|
||||
name: {{ .Release.Name }}-my-cnf
|
||||
key: config
|
||||
|
||||
service:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: {{ $.Release.Name }}
|
||||
{{- if and .Values.external (eq (int .Values.replicas) 1) }}
|
||||
type: LoadBalancer
|
||||
{{- end }}
|
||||
storage:
|
||||
size: {{ .Values.size }}
|
||||
resizeInUseVolumes: true
|
||||
@@ -65,7 +72,7 @@ spec:
|
||||
storageClassName: {{ . }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.external }}
|
||||
{{- if and .Values.external (gt (int .Values.replicas) 1) }}
|
||||
primaryService:
|
||||
type: LoadBalancer
|
||||
{{- end }}
|
||||
@@ -73,8 +80,4 @@ spec:
|
||||
#secondaryService:
|
||||
# type: LoadBalancer
|
||||
|
||||
{{- if .Values.resources }}
|
||||
resources: {{- toYaml .Values.resources | nindent 4 }}
|
||||
{{- else if ne .Values.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.resourcesPreset "Release" .Release) | nindent 4 }}
|
||||
{{- end }}
|
||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.resourcesPreset .Values.resources $) | nindent 4 }}
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enable pereiodic backups",
|
||||
"description": "Enable periodic backups",
|
||||
"default": false
|
||||
},
|
||||
"s3Region": {
|
||||
@@ -69,13 +69,23 @@
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"description": "Explicit CPU and memory configuration for each MariaDB replica. When left empty, the preset defined in `resourcesPreset` is applied.",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "nano"
|
||||
"description": "Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.",
|
||||
"default": "nano",
|
||||
"enum": [
|
||||
"none",
|
||||
"nano",
|
||||
"micro",
|
||||
"small",
|
||||
"medium",
|
||||
"large",
|
||||
"xlarge",
|
||||
"2xlarge"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user