mirror of
https://github.com/outbackdingo/cozystack.git
synced 2026-01-28 18:18:41 +00:00
Compare commits
254 Commits
v0.31.0-rc
...
v0.32.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3ce6dbe850 | ||
|
|
8d5007919f | ||
|
|
08e569918b | ||
|
|
6498000721 | ||
|
|
8486e6b3aa | ||
|
|
3f6b6798f4 | ||
|
|
c1b928b8ef | ||
|
|
c2e8fba483 | ||
|
|
62cb694d72 | ||
|
|
c619343aa2 | ||
|
|
75ad26989d | ||
|
|
c4fc8c18df | ||
|
|
8663dc940f | ||
|
|
cf983a8f9c | ||
|
|
ad6aa0ca94 | ||
|
|
9dc5d62f47 | ||
|
|
3b8a9f9d2c | ||
|
|
ab9926a177 | ||
|
|
f83741eb09 | ||
|
|
028f2e4e8d | ||
|
|
255fa8cbe1 | ||
|
|
b42f5cdc01 | ||
|
|
74633ad699 | ||
|
|
980185ca2b | ||
|
|
8eabe30548 | ||
|
|
0c9c688e6d | ||
|
|
908c75927e | ||
|
|
0a1f078384 | ||
|
|
6a713e5eb4 | ||
|
|
8f0a28bad5 | ||
|
|
0fa70d9d38 | ||
|
|
b14c82d606 | ||
|
|
8e79f24c5b | ||
|
|
3266a5514e | ||
|
|
0c37323a15 | ||
|
|
10af98e158 | ||
|
|
632224a30a | ||
|
|
e8d11e64a6 | ||
|
|
27c7a2feb5 | ||
|
|
9555386bd7 | ||
|
|
9733de38a3 | ||
|
|
775a05cc3a | ||
|
|
4e5cc2ae61 | ||
|
|
32adf5ab38 | ||
|
|
28302e776e | ||
|
|
911ca64de0 | ||
|
|
045ea76539 | ||
|
|
cee820e82c | ||
|
|
6183b715b7 | ||
|
|
2669ab6072 | ||
|
|
96506c7cce | ||
|
|
7bb70c839e | ||
|
|
ba97a4593c | ||
|
|
c467ed798a | ||
|
|
ed881f0741 | ||
|
|
0e0dabdd08 | ||
|
|
bd8f8bde95 | ||
|
|
646dab497c | ||
|
|
dc3b61d164 | ||
|
|
4479a038cd | ||
|
|
dfd01ff118 | ||
|
|
d2bb66db31 | ||
|
|
7af97e2d9f | ||
|
|
ac5145be87 | ||
|
|
4779db2dda | ||
|
|
25c2774bc8 | ||
|
|
bbee8103eb | ||
|
|
730ea4d5ef | ||
|
|
13fccdc465 | ||
|
|
f1b66c80f6 | ||
|
|
f34f140d49 | ||
|
|
520fbfb2e4 | ||
|
|
25016580c1 | ||
|
|
f10f8455fc | ||
|
|
974581d39b | ||
|
|
7e24297913 | ||
|
|
b6142cd4f5 | ||
|
|
e87994c769 | ||
|
|
b140f1b57f | ||
|
|
64936021d2 | ||
|
|
a887e19e6c | ||
|
|
92b97a569e | ||
|
|
0e22358b30 | ||
|
|
7429daf99c | ||
|
|
b470b82e2a | ||
|
|
a0700e7399 | ||
|
|
228e1983bc | ||
|
|
7023abdba7 | ||
|
|
1b43a5f160 | ||
|
|
20f4066c16 | ||
|
|
ea0dd68e84 | ||
|
|
e0c3d2324f | ||
|
|
cb303d694c | ||
|
|
6130f43d06 | ||
|
|
4db55ac5eb | ||
|
|
bfd20a5e0e | ||
|
|
977141bed3 | ||
|
|
c4f8d6a251 | ||
|
|
9633ca4d25 | ||
|
|
f798cbd9f9 | ||
|
|
cf87779f7b | ||
|
|
c69135e0e5 | ||
|
|
a9c3a4c601 | ||
|
|
d1081c86b3 | ||
|
|
beadc80778 | ||
|
|
5bbb5a6266 | ||
|
|
0664370218 | ||
|
|
225d103509 | ||
|
|
0e22e3c12c | ||
|
|
7b8e7e40ce | ||
|
|
c941e487fb | ||
|
|
8386e985f2 | ||
|
|
e4c944488f | ||
|
|
99a7754c00 | ||
|
|
6cbfab9b2a | ||
|
|
461f756c88 | ||
|
|
50932ba49e | ||
|
|
f9f8bb2f11 | ||
|
|
2ae8f2aa19 | ||
|
|
1a872ca95c | ||
|
|
3e379e9697 | ||
|
|
7746974644 | ||
|
|
d989a8865d | ||
|
|
4aad0fc8f2 | ||
|
|
0e5ac5ed7c | ||
|
|
c267c7eb9a | ||
|
|
7792e29065 | ||
|
|
d35ff17de8 | ||
|
|
3a7d4c24ee | ||
|
|
ff2638ef66 | ||
|
|
bc294a0fe6 | ||
|
|
bf5bccb7d9 | ||
|
|
f00364037e | ||
|
|
e83bf379ba | ||
|
|
ae0549f78b | ||
|
|
74e7e5cdfb | ||
|
|
2bf4032d5b | ||
|
|
ee1763cb85 | ||
|
|
d497be9e95 | ||
|
|
6176a18a12 | ||
|
|
5789f12f3f | ||
|
|
6279873a35 | ||
|
|
79c441acb7 | ||
|
|
7864811016 | ||
|
|
13938f34fd | ||
|
|
9fb6b41e03 | ||
|
|
a8ba6b1328 | ||
|
|
9592f7fe46 | ||
|
|
119d582379 | ||
|
|
451267310b | ||
|
|
0fee3f280b | ||
|
|
2461fcd531 | ||
|
|
866b6e0a5a | ||
|
|
1dccf96506 | ||
|
|
bca27dcfdc | ||
|
|
5407ee01ee | ||
|
|
fc8b52d73d | ||
|
|
609e7ede86 | ||
|
|
289853e661 | ||
|
|
013e1936ec | ||
|
|
893818f64e | ||
|
|
56bdaae2c9 | ||
|
|
775ecb7b11 | ||
|
|
8d74a35e9c | ||
|
|
b753fd9fa8 | ||
|
|
9698f3c9f4 | ||
|
|
31b110cd39 | ||
|
|
b4da00f96f | ||
|
|
0369852035 | ||
|
|
115497b73f | ||
|
|
4f78b133c2 | ||
|
|
d550a67f19 | ||
|
|
8e6941dfbd | ||
|
|
c54567ab45 | ||
|
|
dd592ca676 | ||
|
|
5273722769 | ||
|
|
fb26e3e9b7 | ||
|
|
5e0b0167fc | ||
|
|
73fdc5ded7 | ||
|
|
5fe7b3bf16 | ||
|
|
4ecf492cd4 | ||
|
|
c42a50229f | ||
|
|
6f55a66328 | ||
|
|
9d551cc69b | ||
|
|
93b8dbb9ab | ||
|
|
8ad010d331 | ||
|
|
404579c361 | ||
|
|
f8210cf276 | ||
|
|
545e256695 | ||
|
|
e9c463c867 | ||
|
|
798ca12e43 | ||
|
|
3780925a68 | ||
|
|
a240c0b6ed | ||
|
|
de1b38c64b | ||
|
|
15d7b6d99e | ||
|
|
9377f55000 | ||
|
|
d002879b0b | ||
|
|
2c6338a2ef | ||
|
|
fd72d7c486 | ||
|
|
db34f31175 | ||
|
|
653e2bc774 | ||
|
|
31ea5eeeb2 | ||
|
|
4a2c67e045 | ||
|
|
68fb7570f7 | ||
|
|
56fc08fab4 | ||
|
|
b00ba53171 | ||
|
|
4dd52290ea | ||
|
|
492aff5265 | ||
|
|
395cdc3af1 | ||
|
|
e6f3000b3c | ||
|
|
e21c38c103 | ||
|
|
7a7512da30 | ||
|
|
58b5f6610d | ||
|
|
e81053f7dd | ||
|
|
424aab4a83 | ||
|
|
77e6db3381 | ||
|
|
f6e3188ab8 | ||
|
|
1ca0594060 | ||
|
|
ac59b4540b | ||
|
|
d0bd4b1329 | ||
|
|
ccbcaf6331 | ||
|
|
1ad1b15a5b | ||
|
|
2349ff61c1 | ||
|
|
13139dd71d | ||
|
|
57ac614865 | ||
|
|
bbb93c647d | ||
|
|
951ba75d93 | ||
|
|
15c9c4a068 | ||
|
|
57fefde732 | ||
|
|
b4a04df6f3 | ||
|
|
1e63b5e8ce | ||
|
|
6ad30915eb | ||
|
|
557ffa536f | ||
|
|
ae05d2f545 | ||
|
|
563c643813 | ||
|
|
68c85ac9ef | ||
|
|
3ac00ea4ec | ||
|
|
29b49496f2 | ||
|
|
3c27192d3e | ||
|
|
dca732cde0 | ||
|
|
0346dc05bb | ||
|
|
a03cdeff04 | ||
|
|
062d72805a | ||
|
|
70fed8148d | ||
|
|
12c6df83f5 | ||
|
|
f61a7817e6 | ||
|
|
c482289b14 | ||
|
|
1e59e5fbb6 | ||
|
|
6106a9fe51 | ||
|
|
ec9e26c054 | ||
|
|
108fc647ea | ||
|
|
a9b235048d | ||
|
|
e1c14619d2 | ||
|
|
f644bf20c5 |
9
.github/workflows/pre-commit.yml
vendored
9
.github/workflows/pre-commit.yml
vendored
@@ -3,8 +3,6 @@ name: Pre-Commit Checks
|
||||
on:
|
||||
pull_request:
|
||||
types: [labeled, opened, synchronize, reopened]
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
|
||||
concurrency:
|
||||
group: pre-commit-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
@@ -32,12 +30,13 @@ jobs:
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install curl -y
|
||||
curl -fsSL https://deb.nodesource.com/setup_16.x | sudo -E bash -
|
||||
sudo apt install nodejs -y
|
||||
git clone https://github.com/bitnami/readme-generator-for-helm
|
||||
sudo apt install npm -y
|
||||
|
||||
git clone --branch 2.7.0 --depth 1 https://github.com/bitnami/readme-generator-for-helm.git
|
||||
cd ./readme-generator-for-helm
|
||||
npm install
|
||||
npm install -g pkg
|
||||
npm install -g @yao-pkg/pkg
|
||||
pkg . -o /usr/local/bin/readme-generator
|
||||
|
||||
- name: Run pre-commit hooks
|
||||
|
||||
121
.github/workflows/pull-requests-release.yaml
vendored
121
.github/workflows/pull-requests-release.yaml
vendored
@@ -3,11 +3,6 @@ name: Releasing PR
|
||||
on:
|
||||
pull_request:
|
||||
types: [labeled, opened, synchronize, reopened, closed]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
sha:
|
||||
description: "Commit SHA to test"
|
||||
required: true
|
||||
|
||||
concurrency:
|
||||
group: pull-requests-release-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
@@ -21,19 +16,14 @@ jobs:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
# Run only when the PR carries the "release" label and not closed or via workflow_dispatch
|
||||
if: |
|
||||
(github.event_name == 'pull_request' &&
|
||||
contains(github.event.pull_request.labels.*.name, 'release') &&
|
||||
github.event.action != 'closed')
|
||||
|| github.event_name == 'workflow_dispatch'
|
||||
contains(github.event.pull_request.labels.*.name, 'release') &&
|
||||
github.event.action != 'closed'
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# for workflow_dispatch take a specific SHA, otherwise — head.sha PR
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || inputs.sha }}
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
@@ -44,6 +34,64 @@ jobs:
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
|
||||
- name: Extract tag from PR branch
|
||||
id: get_tag
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const branch = context.payload.pull_request.head.ref;
|
||||
const m = branch.match(/^release-(\d+\.\d+\.\d+(?:[-\w\.]+)?)$/);
|
||||
if (!m) {
|
||||
core.setFailed(`❌ Branch '${branch}' does not match 'release-X.Y.Z[-suffix]'`);
|
||||
return;
|
||||
}
|
||||
const tag = `v${m[1]}`;
|
||||
core.setOutput('tag', tag);
|
||||
|
||||
- name: Find draft release and get asset IDs
|
||||
id: fetch_assets
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.GH_PAT }}
|
||||
script: |
|
||||
const tag = '${{ steps.get_tag.outputs.tag }}';
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
per_page: 100
|
||||
});
|
||||
const draft = releases.data.find(r => r.tag_name === tag && r.draft);
|
||||
if (!draft) {
|
||||
core.setFailed(`Draft release '${tag}' not found`);
|
||||
return;
|
||||
}
|
||||
const findAssetId = (name) =>
|
||||
draft.assets.find(a => a.name === name)?.id;
|
||||
const installerId = findAssetId("cozystack-installer.yaml");
|
||||
const diskId = findAssetId("nocloud-amd64.raw.xz");
|
||||
if (!installerId || !diskId) {
|
||||
core.setFailed("Missing required assets");
|
||||
return;
|
||||
}
|
||||
core.setOutput("installer_id", installerId);
|
||||
core.setOutput("disk_id", diskId);
|
||||
|
||||
- name: Download assets from GitHub API
|
||||
run: |
|
||||
mkdir -p _out/assets
|
||||
curl -sSL \
|
||||
-H "Authorization: token ${GH_PAT}" \
|
||||
-H "Accept: application/octet-stream" \
|
||||
-o _out/assets/cozystack-installer.yaml \
|
||||
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/assets/${{ steps.fetch_assets.outputs.installer_id }}"
|
||||
curl -sSL \
|
||||
-H "Authorization: token ${GH_PAT}" \
|
||||
-H "Accept: application/octet-stream" \
|
||||
-o _out/assets/nocloud-amd64.raw.xz \
|
||||
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/assets/${{ steps.fetch_assets.outputs.disk_id }}"
|
||||
env:
|
||||
GH_PAT: ${{ secrets.GH_PAT }}
|
||||
|
||||
- name: Run tests
|
||||
run: make test
|
||||
|
||||
@@ -89,6 +137,7 @@ jobs:
|
||||
- name: Ensure maintenance branch release-X.Y
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.GH_PAT }}
|
||||
script: |
|
||||
const tag = '${{ steps.get_tag.outputs.tag }}'; // e.g. v0.1.3 or v0.1.3-rc3
|
||||
const match = tag.match(/^v(\d+)\.(\d+)\.\d+(?:[-\w\.]+)?$/);
|
||||
@@ -98,21 +147,45 @@ jobs:
|
||||
}
|
||||
const line = `${match[1]}.${match[2]}`;
|
||||
const branch = `release-${line}`;
|
||||
|
||||
// Get main branch commit for the tag
|
||||
const ref = await github.rest.git.getRef({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
ref: `tags/${tag}`
|
||||
});
|
||||
|
||||
const commitSha = ref.data.object.sha;
|
||||
|
||||
try {
|
||||
await github.rest.repos.getBranch({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
repo: context.repo.repo,
|
||||
branch
|
||||
});
|
||||
console.log(`Branch '${branch}' already exists`);
|
||||
} catch (_) {
|
||||
await github.rest.git.createRef({
|
||||
|
||||
await github.rest.git.updateRef({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
ref: `refs/heads/${branch}`,
|
||||
sha: context.sha
|
||||
repo: context.repo.repo,
|
||||
ref: `heads/${branch}`,
|
||||
sha: commitSha,
|
||||
force: true
|
||||
});
|
||||
console.log(`✅ Branch '${branch}' created at ${context.sha}`);
|
||||
console.log(`🔁 Force-updated '${branch}' to ${commitSha}`);
|
||||
} catch (err) {
|
||||
if (err.status === 404) {
|
||||
await github.rest.git.createRef({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
ref: `refs/heads/${branch}`,
|
||||
sha: commitSha
|
||||
});
|
||||
console.log(`✅ Created branch '${branch}' at ${commitSha}`);
|
||||
} else {
|
||||
console.error('Unexpected error --', err);
|
||||
core.setFailed(`Unexpected error creating/updating branch: ${err.message}`);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
# Get the latest published release
|
||||
@@ -145,13 +218,13 @@ jobs:
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const tag = '${{ steps.get_tag.outputs.tag }}'; // v0.31.5-rc1
|
||||
const m = tag.match(/^v(\d+\.\d+\.\d+)(-rc\d+)?$/);
|
||||
const tag = '${{ steps.get_tag.outputs.tag }}'; // v0.31.5-rc.1
|
||||
const m = tag.match(/^v(\d+\.\d+\.\d+)(-(?:alpha|beta|rc)\.\d+)?$/);
|
||||
if (!m) {
|
||||
core.setFailed(`❌ tag '${tag}' must match 'vX.Y.Z' or 'vX.Y.Z-rcN'`);
|
||||
core.setFailed(`❌ tag '${tag}' must match 'vX.Y.Z' or 'vX.Y.Z-(alpha|beta|rc).N'`);
|
||||
return;
|
||||
}
|
||||
const version = m[1] + (m[2] ?? ''); // 0.31.5‑rc1
|
||||
const version = m[1] + (m[2] ?? ''); // 0.31.5-rc.1
|
||||
const isRc = Boolean(m[2]);
|
||||
core.setOutput('is_rc', isRc);
|
||||
const outdated = '${{ steps.semver.outputs.comparison-result }}' === '<';
|
||||
|
||||
45
.github/workflows/pull-requests.yaml
vendored
45
.github/workflows/pull-requests.yaml
vendored
@@ -9,8 +9,8 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
e2e:
|
||||
name: Build and Test
|
||||
build:
|
||||
name: Build
|
||||
runs-on: [self-hosted]
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -33,9 +33,50 @@ jobs:
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
env:
|
||||
DOCKER_CONFIG: ${{ runner.temp }}/.docker
|
||||
|
||||
- name: Build
|
||||
run: make build
|
||||
env:
|
||||
DOCKER_CONFIG: ${{ runner.temp }}/.docker
|
||||
|
||||
- name: Build Talos image
|
||||
run: make -C packages/core/installer talos-nocloud
|
||||
|
||||
- name: Upload installer
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: cozystack-installer
|
||||
path: _out/assets/cozystack-installer.yaml
|
||||
|
||||
- name: Upload Talos image
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: talos-image
|
||||
path: _out/assets/nocloud-amd64.raw.xz
|
||||
|
||||
test:
|
||||
name: Test
|
||||
runs-on: [self-hosted]
|
||||
needs: build
|
||||
|
||||
# Never run when the PR carries the "release" label.
|
||||
if: |
|
||||
!contains(github.event.pull_request.labels.*.name, 'release')
|
||||
|
||||
steps:
|
||||
- name: Download installer
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: cozystack-installer
|
||||
path: _out/assets/
|
||||
|
||||
- name: Download Talos image
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: talos-image
|
||||
path: _out/assets/
|
||||
|
||||
- name: Test
|
||||
run: make test
|
||||
|
||||
47
.github/workflows/tags.yaml
vendored
47
.github/workflows/tags.yaml
vendored
@@ -3,9 +3,10 @@ name: Versioned Tag
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*.*.*' # vX.Y.Z
|
||||
- 'v*.*.*-rc.*' # vX.Y.Z-rc.N
|
||||
|
||||
- 'v*.*.*' # vX.Y.Z
|
||||
- 'v*.*.*-rc.*' # vX.Y.Z-rc.N
|
||||
- 'v*.*.*-beta.*' # vX.Y.Z-beta.N
|
||||
- 'v*.*.*-alpha.*' # vX.Y.Z-alpha.N
|
||||
|
||||
concurrency:
|
||||
group: tags-${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -42,7 +43,7 @@ jobs:
|
||||
if: steps.check_release.outputs.skip == 'true'
|
||||
run: echo "Release already exists, skipping workflow."
|
||||
|
||||
# Parse tag meta‑data (rc?, maintenance line, etc.)
|
||||
# Parse tag meta-data (rc?, maintenance line, etc.)
|
||||
- name: Parse tag
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: tag
|
||||
@@ -50,12 +51,12 @@ jobs:
|
||||
with:
|
||||
script: |
|
||||
const ref = context.ref.replace('refs/tags/', ''); // e.g. v0.31.5-rc.1
|
||||
const m = ref.match(/^v(\d+\.\d+\.\d+)(-rc\.\d+)?$/); // ['0.31.5', '-rc.1']
|
||||
const m = ref.match(/^v(\d+\.\d+\.\d+)(-(?:alpha|beta|rc)\.\d+)?$/); // ['0.31.5', '-rc.1' | '-beta.1' | …]
|
||||
if (!m) {
|
||||
core.setFailed(`❌ tag '${ref}' must match 'vX.Y.Z' or 'vX.Y.Z-rc.N'`);
|
||||
core.setFailed(`❌ tag '${ref}' must match 'vX.Y.Z' or 'vX.Y.Z-(alpha|beta|rc).N'`);
|
||||
return;
|
||||
}
|
||||
const version = m[1] + (m[2] ?? ''); // 0.31.5‑rc.1
|
||||
const version = m[1] + (m[2] ?? ''); // 0.31.5-rc.1
|
||||
const isRc = Boolean(m[2]);
|
||||
const [maj, min] = m[1].split('.');
|
||||
core.setOutput('tag', ref); // v0.31.5-rc.1
|
||||
@@ -63,7 +64,7 @@ jobs:
|
||||
core.setOutput('is_rc', isRc); // true
|
||||
core.setOutput('line', `${maj}.${min}`); // 0.31
|
||||
|
||||
# Detect base branch (main or release‑X.Y) the tag was pushed from
|
||||
# Detect base branch (main or release-X.Y) the tag was pushed from
|
||||
- name: Get base branch
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: get_base
|
||||
@@ -98,11 +99,15 @@ jobs:
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
env:
|
||||
DOCKER_CONFIG: ${{ runner.temp }}/.docker
|
||||
|
||||
# Build project artifacts
|
||||
- name: Build
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: make build
|
||||
env:
|
||||
DOCKER_CONFIG: ${{ runner.temp }}/.docker
|
||||
|
||||
# Commit built artifacts
|
||||
- name: Commit release artifacts
|
||||
@@ -168,7 +173,7 @@ jobs:
|
||||
});
|
||||
console.log(`Draft release created for ${tag}`);
|
||||
} else {
|
||||
console.log(`Re‑using existing release ${tag}`);
|
||||
console.log(`Re-using existing release ${tag}`);
|
||||
}
|
||||
core.setOutput('upload_url', rel.upload_url);
|
||||
|
||||
@@ -181,7 +186,7 @@ jobs:
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# Create release‑X.Y.Z branch and push (force‑update)
|
||||
# Create release-X.Y.Z branch and push (force-update)
|
||||
- name: Create release branch
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: |
|
||||
@@ -225,25 +230,3 @@ jobs:
|
||||
} else {
|
||||
console.log(`PR already exists from ${head} to ${base}`);
|
||||
}
|
||||
|
||||
# Run tests
|
||||
- name: Trigger release-verify tests
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
uses: actions/github-script@v7
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
DISPATCH_REF: ${{ steps.get_base.outputs.branch }} # main or release-X.Y
|
||||
DISPATCH_SHA: ${{ github.sha }} # commit for tests
|
||||
with:
|
||||
github-token: ${{ env.GH_TOKEN }}
|
||||
script: |
|
||||
await github.rest.actions.createWorkflowDispatch({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
workflow_id: 'pull-requests-release.yaml',
|
||||
ref: process.env.DISPATCH_REF,
|
||||
inputs: {
|
||||
sha: process.env.DISPATCH_SHA
|
||||
}
|
||||
});
|
||||
console.log(`🔔 verify-job triggered on ${process.env.DISPATCH_SHA}`);
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,6 +1,7 @@
|
||||
_out
|
||||
.git
|
||||
.idea
|
||||
.vscode
|
||||
|
||||
# User-specific stuff
|
||||
.idea/**/workspace.xml
|
||||
@@ -75,4 +76,4 @@ fabric.properties
|
||||
.idea/caches/build_file_checksums.ser
|
||||
|
||||
.DS_Store
|
||||
**/.DS_Store
|
||||
**/.DS_Store
|
||||
|
||||
@@ -18,6 +18,7 @@ repos:
|
||||
(cd "$dir" && make generate)
|
||||
fi
|
||||
done
|
||||
git diff --color=always | cat
|
||||
'
|
||||
language: script
|
||||
files: ^.*$
|
||||
|
||||
3
Makefile
3
Makefile
@@ -20,6 +20,7 @@ build: build-deps
|
||||
make -C packages/system/kubeovn image
|
||||
make -C packages/system/kubeovn-webhook image
|
||||
make -C packages/system/dashboard image
|
||||
make -C packages/system/metallb image
|
||||
make -C packages/system/kamaji image
|
||||
make -C packages/system/bucket image
|
||||
make -C packages/core/testing image
|
||||
@@ -42,7 +43,7 @@ manifests:
|
||||
(cd packages/core/installer/; helm template -n cozy-installer installer .) > _out/assets/cozystack-installer.yaml
|
||||
|
||||
assets:
|
||||
make -C packages/core/installer/ assets
|
||||
make -C packages/core/installer assets
|
||||
|
||||
test:
|
||||
make -C packages/core/testing apply
|
||||
|
||||
12
README.md
12
README.md
@@ -12,11 +12,15 @@
|
||||
|
||||
**Cozystack** is a free PaaS platform and framework for building clouds.
|
||||
|
||||
Cozystack is a [CNCF Sandbox Level Project](https://www.cncf.io/sandbox-projects/) that was originally built and sponsored by [Ænix](https://aenix.io/).
|
||||
|
||||
With Cozystack, you can transform a bunch of servers into an intelligent system with a simple REST API for spawning Kubernetes clusters,
|
||||
Database-as-a-Service, virtual machines, load balancers, HTTP caching services, and other services with ease.
|
||||
|
||||
Use Cozystack to build your own cloud or provide a cost-effective development environment.
|
||||
|
||||

|
||||
|
||||
## Use-Cases
|
||||
|
||||
* [**Using Cozystack to build a public cloud**](https://cozystack.io/docs/guides/use-cases/public-cloud/)
|
||||
@@ -28,9 +32,6 @@ You can use Cozystack as a platform to build a private cloud powered by Infrastr
|
||||
* [**Using Cozystack as a Kubernetes distribution**](https://cozystack.io/docs/guides/use-cases/kubernetes-distribution/)
|
||||
You can use Cozystack as a Kubernetes distribution for Bare Metal
|
||||
|
||||
## Screenshot
|
||||
|
||||

|
||||
|
||||
## Documentation
|
||||
|
||||
@@ -59,7 +60,10 @@ Commits are used to generate the changelog, and their author will be referenced
|
||||
|
||||
If you have **Feature Requests** please use the [Discussion's Feature Request section](https://github.com/cozystack/cozystack/discussions/categories/feature-requests).
|
||||
|
||||
You are welcome to join our weekly community meetings (just add this events to your [Google Calendar](https://calendar.google.com/calendar?cid=ZTQzZDIxZTVjOWI0NWE5NWYyOGM1ZDY0OWMyY2IxZTFmNDMzZTJlNjUzYjU2ZGJiZGE3NGNhMzA2ZjBkMGY2OEBncm91cC5jYWxlbmRhci5nb29nbGUuY29t) or [iCal](https://calendar.google.com/calendar/ical/e43d21e5c9b45a95f28c5d649c2cb1e1f433e2e653b56dbbda74ca306f0d0f68%40group.calendar.google.com/public/basic.ics)) or [Telegram group](https://t.me/cozystack).
|
||||
## Community
|
||||
|
||||
You are welcome to join our [Telegram group](https://t.me/cozystack) and come to our weekly community meetings.
|
||||
Add them to your [Google Calendar](https://calendar.google.com/calendar?cid=ZTQzZDIxZTVjOWI0NWE5NWYyOGM1ZDY0OWMyY2IxZTFmNDMzZTJlNjUzYjU2ZGJiZGE3NGNhMzA2ZjBkMGY2OEBncm91cC5jYWxlbmRhci5nb29nbGUuY29t) or [iCal](https://calendar.google.com/calendar/ical/e43d21e5c9b45a95f28c5d649c2cb1e1f433e2e653b56dbbda74ca306f0d0f68%40group.calendar.google.com/public/basic.ics) for convenience.
|
||||
|
||||
## License
|
||||
|
||||
|
||||
@@ -39,6 +39,8 @@ import (
|
||||
cozystackiov1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||
"github.com/cozystack/cozystack/internal/controller"
|
||||
"github.com/cozystack/cozystack/internal/telemetry"
|
||||
|
||||
helmv2 "github.com/fluxcd/helm-controller/api/v2"
|
||||
// +kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
@@ -51,6 +53,7 @@ func init() {
|
||||
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
|
||||
|
||||
utilruntime.Must(cozystackiov1alpha1.AddToScheme(scheme))
|
||||
utilruntime.Must(helmv2.AddToScheme(scheme))
|
||||
// +kubebuilder:scaffold:scheme
|
||||
}
|
||||
|
||||
@@ -183,7 +186,23 @@ func main() {
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "Workload")
|
||||
setupLog.Error(err, "unable to create controller", "controller", "WorkloadReconciler")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&controller.TenantHelmReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "TenantHelmReconciler")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&controller.CozystackConfigReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "CozystackConfigReconciler")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
||||
243
docs/changelogs/v0.31.0.md
Normal file
243
docs/changelogs/v0.31.0.md
Normal file
@@ -0,0 +1,243 @@
|
||||
Cozystack v0.31.0 is a significant release that brings new features, key fixes, and updates to underlying components.
|
||||
This version enhances GPU support, improves many components of Cozystack, and introduces a more robust release process to improve stability.
|
||||
Below, we'll go over the highlights in each area for current users, developers, and our community.
|
||||
|
||||
## Major Features and Improvements
|
||||
|
||||
### GPU support for tenant Kubernetes clusters
|
||||
|
||||
Cozystack now integrates NVIDIA GPU Operator support for tenant Kubernetes clusters.
|
||||
This enables platform users to run GPU-powered AI/ML applications in their own clusters.
|
||||
To enable GPU Operator, set `addons.gpuOperator.enabled: true` in the cluster configuration.
|
||||
(@kvaps in https://github.com/cozystack/cozystack/pull/834)
|
||||
|
||||
Check out Andrei Kvapil's CNCF webinar [showcasing the GPU support by running Stable Diffusion in Cozystack](https://www.youtube.com/watch?v=S__h_QaoYEk).
|
||||
|
||||
<!--
|
||||
* [kubernetes] Introduce GPU support for tenant Kubernetes clusters. (@kvaps in https://github.com/cozystack/cozystack/pull/834)
|
||||
-->
|
||||
|
||||
### Cilium Improvements
|
||||
|
||||
Cozystack’s Cilium integration received two significant enhancements.
|
||||
First, Gateway API support in Cilium is now enabled, allowing advanced L4/L7 routing features via Kubernetes Gateway API.
|
||||
We thank Zdenek Janda @zdenekjanda for contributing this feature in https://github.com/cozystack/cozystack/pull/924.
|
||||
|
||||
Second, Cozystack now permits custom user-provided parameters in the tenant cluster’s Cilium configuration.
|
||||
(@lllamnyp in https://github.com/cozystack/cozystack/pull/917)
|
||||
|
||||
<!--
|
||||
* [cilium] Enable Cilium Gateway API. (@zdenekjanda in https://github.com/cozystack/cozystack/pull/924)
|
||||
* [cilium] Enable user-added parameters in a tenant cluster Cilium. (@lllamnyp in https://github.com/cozystack/cozystack/pull/917)
|
||||
-->
|
||||
|
||||
### Cross-Architecture Builds (ARM Support Beta)
|
||||
|
||||
Cozystack's build system was refactored to support multi-architecture binaries and container images.
|
||||
This paves the road to running Cozystack on ARM64 servers.
|
||||
Changes include Makefile improvements (https://github.com/cozystack/cozystack/pull/907)
|
||||
and multi-arch Docker image builds (https://github.com/cozystack/cozystack/pull/932 and https://github.com/cozystack/cozystack/pull/970).
|
||||
|
||||
We thank Nikita Bykov @nbykov0 for his ongoing work on ARM support!
|
||||
|
||||
<!--
|
||||
* Introduce support for cross-architecture builds and Cozystack on ARM:
|
||||
* [build] Refactor Makefiles introducing build variables. (@nbykov0 in https://github.com/cozystack/cozystack/pull/907)
|
||||
* [build] Add support for multi-architecture and cross-platform image builds. (@nbykov0 in https://github.com/cozystack/cozystack/pull/932 and https://github.com/cozystack/cozystack/pull/970)
|
||||
-->
|
||||
|
||||
### VerticalPodAutoscaler (VPA) Expansion
|
||||
|
||||
The VerticalPodAutoscaler is now enabled for more Cozystack components to automate resource tuning.
|
||||
Specifically, VPA was added for tenant Kubernetes control planes (@klinch0 in https://github.com/cozystack/cozystack/pull/806),
|
||||
the Cozystack Dashboard (https://github.com/cozystack/cozystack/pull/828),
|
||||
and the Cozystack etcd-operator (https://github.com/cozystack/cozystack/pull/850).
|
||||
All Cozystack components that have VPA enabled can automatically adjust their CPU and memory requests based on usage, improving platform and application stability.
|
||||
|
||||
<!--
|
||||
* Add VerticalPodAutoscaler to a few more components:
|
||||
* [kubernetes] Kubernetes clusters in user tenants. (@klinch0 in https://github.com/cozystack/cozystack/pull/806)
|
||||
* [platform] Cozystack dashboard. (@klinch0 in https://github.com/cozystack/cozystack/pull/828)
|
||||
* [platform] Cozystack etcd-operator (@klinch0 in https://github.com/cozystack/cozystack/pull/850)
|
||||
-->
|
||||
|
||||
### Tenant HelmRelease Reconcile Controller
|
||||
|
||||
A new controller was introduced to monitor and synchronize HelmRelease resources across tenants.
|
||||
This controller propagates configuration changes to tenant workloads and ensures that any HelmRelease defined in a tenant
|
||||
stays in sync with platform updates.
|
||||
It improves the reliability of deploying managed applications in Cozystack.
|
||||
(@klinch0 in https://github.com/cozystack/cozystack/pull/870)
|
||||
|
||||
<!--
|
||||
* [platform] Introduce a new controller to synchronize tenant HelmReleases and propagate configuration changes. (@klinch0 in https://github.com/cozystack/cozystack/pull/870)
|
||||
-->
|
||||
|
||||
### Virtual Machine Improvements
|
||||
|
||||
**Configurable KubeVirt CPU Overcommit**: The CPU allocation ratio in KubeVirt (how virtual CPUs are overcommitted relative to physical) is now configurable
|
||||
via the `cpu-allocation-ratio` value in the Cozystack configmap.
|
||||
This means Cozystack administrators can now tune CPU overcommitment for VMs to balance performance vs. density.
|
||||
(@lllamnyp in https://github.com/cozystack/cozystack/pull/905)
|
||||
|
||||
**KubeVirt VM Export**: Cozystack now allows exporting KubeVirt virtual machines.
|
||||
This feature, enabled via KubeVirt's `VirtualMachineExport` capability, lets users snapshot or back up VM images.
|
||||
(@kvaps in https://github.com/cozystack/cozystack/pull/808)
|
||||
|
||||
**Support for various storage classes in Virtual Machines**: The `virtual-machine` application (since version 0.9.2) lets you pick any StorageClass for a VM's
|
||||
system disk instead of relying on a hard-coded PVC.
|
||||
Refer to values `systemDisk.storage` and `systemDisk.storageClass` in the [application's configs](https://cozystack.io/docs/reference/applications/virtual-machine/#common-parameters).
|
||||
(@kvaps in https://github.com/cozystack/cozystack/pull/974)
|
||||
|
||||
<!--
|
||||
* [kubevirt] Enable exporting VMs. (@kvaps in https://github.com/cozystack/cozystack/pull/808)
|
||||
* [kubevirt] Make KubeVirt's CPU allocation ratio configurable. (@lllamnyp in https://github.com/cozystack/cozystack/pull/905)
|
||||
* [virtual-machine] Add support for various storages. (@kvaps in https://github.com/cozystack/cozystack/pull/974)
|
||||
-->
|
||||
|
||||
### Other Features and Improvements
|
||||
|
||||
* [platform] Introduce options `expose-services`, `expose-ingress`, and `expose-external-ips` to the ingress service. (@kvaps in https://github.com/cozystack/cozystack/pull/929)
|
||||
* [cozystack-controller] Record the IP address pool and storage class in Workload objects. (@lllamnyp in https://github.com/cozystack/cozystack/pull/831)
|
||||
* [apps] Remove user-facing config of limits and requests. (@lllamnyp in https://github.com/cozystack/cozystack/pull/935)
|
||||
|
||||
## New Release Lifecycle
|
||||
|
||||
Cozystack release lifecycle is changing to provide a more stable and predictable lifecycle to customers running Cozystack in mission-critical environments.
|
||||
|
||||
* **Gradual Release with Alpha, Beta, and Release Candidates**: Cozystack will now publish pre-release versions (alpha, beta, release candidates) before a stable release.
|
||||
Starting with v0.31.0, the team made three release candidates before releasing version v0.31.0.
|
||||
This allows more testing and feedback before marking a release as stable.
|
||||
|
||||
* **Prolonged Release Support with Patch Versions**: After the initial `vX.Y.0` release, a long-lived branch `release-X.Y` will be created to backport fixes.
|
||||
For example, with 0.31.0’s release, a `release-0.31` branch will track patch fixes (`0.31.x`).
|
||||
This strategy lets Cozystack users receive timely patch releases and updates with minimal risks.
|
||||
|
||||
To implement these new changes, we have rebuilt our CI/CD workflows and introduced automation, enabling automatic backports.
|
||||
You can read more about how it's implemented in the Development section below.
|
||||
|
||||
For more information, read the [Cozystack Release Workflow](https://github.com/cozystack/cozystack/blob/main/docs/release.md) documentation.
|
||||
|
||||
## Fixes
|
||||
|
||||
* [virtual-machine] Add GPU names to the virtual machine specifications. (@kvaps in https://github.com/cozystack/cozystack/pull/862)
|
||||
* [virtual-machine] Count Workload resources for pods by requests, not limits. Other improvements to VM resource tracking. (@lllamnyp in https://github.com/cozystack/cozystack/pull/904)
|
||||
* [virtual-machine] Set PortList method by default. (@kvaps in https://github.com/cozystack/cozystack/pull/996)
|
||||
* [virtual-machine] Specify ports even for wholeIP mode. (@kvaps in https://github.com/cozystack/cozystack/pull/1000)
|
||||
* [platform] Fix installing HelmReleases on initial setup. (@kvaps in https://github.com/cozystack/cozystack/pull/833)
|
||||
* [platform] Migration scripts update Kubernetes ConfigMap with the current stack version for improved version tracking. (@klinch0 in https://github.com/cozystack/cozystack/pull/840)
|
||||
* [platform] Reduce requested CPU and RAM for the `kamaji` provider. (@klinch0 in https://github.com/cozystack/cozystack/pull/825)
|
||||
* [platform] Improve the reconciliation loop for the Cozystack system HelmReleases logic. (@klinch0 in https://github.com/cozystack/cozystack/pull/809 and https://github.com/cozystack/cozystack/pull/810, @kvaps in https://github.com/cozystack/cozystack/pull/811)
|
||||
* [platform] Remove extra dependencies for the Piraeus operator. (@klinch0 in https://github.com/cozystack/cozystack/pull/856)
|
||||
* [platform] Refactor dashboard values. (@kvaps in https://github.com/cozystack/cozystack/pull/928, patched by @llamnyp in https://github.com/cozystack/cozystack/pull/952)
|
||||
* [platform] Make FluxCD artifact disabled by default. (@klinch0 in https://github.com/cozystack/cozystack/pull/964)
|
||||
* [kubernetes] Update garbage collection of HelmReleases in tenant Kubernetes clusters. (@kvaps in https://github.com/cozystack/cozystack/pull/835)
|
||||
* [kubernetes] Fix merging `valuesOverride` for tenant clusters. (@kvaps in https://github.com/cozystack/cozystack/pull/879)
|
||||
* [kubernetes] Fix `ubuntu-container-disk` tag. (@kvaps in https://github.com/cozystack/cozystack/pull/887)
|
||||
* [kubernetes] Refactor Helm manifests for tenant Kubernetes clusters. (@kvaps in https://github.com/cozystack/cozystack/pull/866)
|
||||
* [kubernetes] Fix Ingress-NGINX depends on Cert-Manager. (@kvaps in https://github.com/cozystack/cozystack/pull/976)
|
||||
* [kubernetes, apps] Enable `topologySpreadConstraints` for tenant Kubernetes clusters and fix it for managed PostgreSQL. (@klinch0 in https://github.com/cozystack/cozystack/pull/995)
|
||||
* [tenant] Fix an issue with accessing external IPs of a cluster from the cluster itself. (@kvaps in https://github.com/cozystack/cozystack/pull/854)
|
||||
* [cluster-api] Remove the no longer necessary workaround for Kamaji. (@kvaps in https://github.com/cozystack/cozystack/pull/867, patched in https://github.com/cozystack/cozystack/pull/956)
|
||||
* [monitoring] Remove legacy label "POD" from the exclude filter in metrics. (@xy2 in https://github.com/cozystack/cozystack/pull/826)
|
||||
* [monitoring] Refactor management etcd monitoring config. Introduce a migration script for updating monitoring resources (`kube-rbac-proxy` daemonset). (@lllamnyp in https://github.com/cozystack/cozystack/pull/799 and https://github.com/cozystack/cozystack/pull/830)
|
||||
* [monitoring] Fix VerticalPodAutoscaler resource allocation for VMagent. (@klinch0 in https://github.com/cozystack/cozystack/pull/820)
|
||||
* [postgres] Remove duplicated `template` entry from backup manifest. (@etoshutka in https://github.com/cozystack/cozystack/pull/872)
|
||||
* [kube-ovn] Fix versions mapping in Makefile. (@kvaps in https://github.com/cozystack/cozystack/pull/883)
|
||||
* [dx] Automatically detect version for migrations in the installer.sh. (@kvaps in https://github.com/cozystack/cozystack/pull/837)
|
||||
* [dx] remove version_map and building for library charts. (@kvaps in https://github.com/cozystack/cozystack/pull/998)
|
||||
* [docs] Review the tenant Kubernetes cluster docs. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/969)
|
||||
* [docs] Explain that tenants cannot have dashes in their names. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/980)
|
||||
|
||||
## Dependencies
|
||||
|
||||
* MetalLB images are now built in-tree based on version 0.14.9 with additional critical patches. (@lllamnyp in https://github.com/cozystack/cozystack/pull/945)
|
||||
* Update Kubernetes to v1.32.4. (@kvaps in https://github.com/cozystack/cozystack/pull/949)
|
||||
* Update Talos Linux to v1.10.1. (@kvaps in https://github.com/cozystack/cozystack/pull/931)
|
||||
* Update Cilium to v1.17.3. (@kvaps in https://github.com/cozystack/cozystack/pull/848)
|
||||
* Update LINSTOR to v1.31.0. (@kvaps in https://github.com/cozystack/cozystack/pull/846)
|
||||
* Update Kube-OVN to v1.13.11. (@kvaps in https://github.com/cozystack/cozystack/pull/847, @lllamnyp in https://github.com/cozystack/cozystack/pull/922)
|
||||
* Update tenant Kubernetes to v1.32. (@kvaps in https://github.com/cozystack/cozystack/pull/871)
|
||||
* Update flux-operator to 0.20.0. (@kingdonb in https://github.com/cozystack/cozystack/pull/880 and https://github.com/cozystack/cozystack/pull/934)
|
||||
* Update multiple Cluster API components. (@kvaps in https://github.com/cozystack/cozystack/pull/867 and https://github.com/cozystack/cozystack/pull/947)
|
||||
* Update KamajiControlPlane to edge-25.4.1. (@kvaps in https://github.com/cozystack/cozystack/pull/953, fixed by @nbykov0 in https://github.com/cozystack/cozystack/pull/983)
|
||||
* Update cert-manager to v1.17.2. (@kvaps in https://github.com/cozystack/cozystack/pull/975)
|
||||
|
||||
## Documentation
|
||||
|
||||
* [Installing Talos in Air-Gapped Environment](https://cozystack.io/docs/operations/talos/configuration/air-gapped/):
|
||||
new guide for configuring and bootstrapping Talos Linux clusters in air-gapped environments.
|
||||
(@klinch0 in https://github.com/cozystack/website/pull/203)
|
||||
|
||||
* [Cozystack Bundles](https://cozystack.io/docs/guides/bundles/): new page in the learning section explaining how Cozystack bundles work and how to choose a bundle.
|
||||
(@NickVolynkin in https://github.com/cozystack/website/pull/188, https://github.com/cozystack/website/pull/189, and others;
|
||||
updated by @kvaps in https://github.com/cozystack/website/pull/192 and https://github.com/cozystack/website/pull/193)
|
||||
|
||||
* [Managed Application Reference](https://cozystack.io/docs/reference/applications/): A set of new pages in the docs, mirroring application docs from the Cozystack dashboard.
|
||||
(@NickVolynkin in https://github.com/cozystack/website/pull/198, https://github.com/cozystack/website/pull/202, and https://github.com/cozystack/website/pull/204)
|
||||
|
||||
* **LINSTOR Networking**: Guides on [configuring dedicated network for LINSTOR](https://cozystack.io/docs/operations/storage/dedicated-network/)
|
||||
and [configuring network for distributed storage in multi-datacenter setup](https://cozystack.io/docs/operations/stretched/linstor-dedicated-network/).
|
||||
(@xy2, edited by @NickVolynkin in https://github.com/cozystack/website/pull/171, https://github.com/cozystack/website/pull/182, and https://github.com/cozystack/website/pull/184)
|
||||
|
||||
### Fixes
|
||||
|
||||
* Correct error in the doc for the command to edit the configmap. (@lb0o in https://github.com/cozystack/website/pull/207)
|
||||
* Fix group name in OIDC docs (@kingdonb in https://github.com/cozystack/website/pull/179)
|
||||
* A bit more explanation of Docker buildx builders. (@nbykov0 in https://github.com/cozystack/website/pull/187)
|
||||
|
||||
## Development, Testing, and CI/CD
|
||||
|
||||
### Testing
|
||||
|
||||
Improvements:
|
||||
|
||||
* Introduce `cozytest` — a new [BATS-based](https://github.com/bats-core/bats-core) testing framework. (@kvaps in https://github.com/cozystack/cozystack/pull/982)
|
||||
|
||||
Fixes:
|
||||
|
||||
* Fix `device_ownership_from_security_context` CRI. (@dtrdnk in https://github.com/cozystack/cozystack/pull/896)
|
||||
* Increase timeout durations for `capi` and `keycloak` to improve reliability during e2e-tests. (@kvaps in https://github.com/cozystack/cozystack/pull/858)
|
||||
* Return `genisoimage` to the e2e-test Dockerfile (@gwynbleidd2106 in https://github.com/cozystack/cozystack/pull/962)
|
||||
|
||||
### CI/CD Changes
|
||||
|
||||
Improvements:
|
||||
|
||||
* Use release branches `release-X.Y` for gathering and releasing fixes after initial `vX.Y.0` release. (@kvaps in https://github.com/cozystack/cozystack/pull/816)
|
||||
* Automatically create release branches after initial `vX.Y.0` release is published. (@kvaps in https://github.com/cozystack/cozystack/pull/886)
|
||||
* Introduce Release Candidate versions. Automate patch backporting by applying patches from pull requests labeled `[backport]` to the current release branch. (@kvaps in https://github.com/cozystack/cozystack/pull/841 and https://github.com/cozystack/cozystack/pull/901, @nickvolynkin in https://github.com/cozystack/cozystack/pull/890)
|
||||
* Support alpha and beta pre-releases. (@kvaps in https://github.com/cozystack/cozystack/pull/978)
|
||||
* Commit changes in release pipelines under `github-actions <github-actions@github.com>`. (@kvaps in https://github.com/cozystack/cozystack/pull/823)
|
||||
* Describe the Cozystack release workflow. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/817 and https://github.com/cozystack/cozystack/pull/897)
|
||||
|
||||
Fixes:
|
||||
|
||||
* Improve the check for `versions_map` running on pull requests. (@kvaps and @klinch0 in https://github.com/cozystack/cozystack/pull/836, https://github.com/cozystack/cozystack/pull/842, and https://github.com/cozystack/cozystack/pull/845)
|
||||
* If the release step was skipped on a tag, skip tests as well. (@kvaps in https://github.com/cozystack/cozystack/pull/822)
|
||||
* Allow CI to cancel the previous job if a new one is scheduled. (@kvaps in https://github.com/cozystack/cozystack/pull/873)
|
||||
* Use the correct version name when uploading build assets to the release page. (@kvaps in https://github.com/cozystack/cozystack/pull/876)
|
||||
* Stop using `ok-to-test` label to trigger CI in pull requests. (@kvaps in https://github.com/cozystack/cozystack/pull/875)
|
||||
* Do not run tests in the release building pipeline. (@kvaps in https://github.com/cozystack/cozystack/pull/882)
|
||||
* Fix release branch creation. (@kvaps in https://github.com/cozystack/cozystack/pull/884)
|
||||
* Reduce noise in the test logs by suppressing the `wget` progress bar. (@lllamnyp in https://github.com/cozystack/cozystack/pull/865)
|
||||
* Revert "automatically trigger tests in releasing PR". (@kvaps in https://github.com/cozystack/cozystack/pull/900)
|
||||
* Force-update release branch on tagged main commits. (@kvaps in https://github.com/cozystack/cozystack/pull/977)
|
||||
* Show detailed errors in the `pull-request-release` workflow. (@lllamnyp in https://github.com/cozystack/cozystack/pull/992)
|
||||
|
||||
## Community and Maintenance
|
||||
|
||||
### Repository Maintenance
|
||||
|
||||
Added @klinch0 to CODEOWNERS. (@kvaps in https://github.com/cozystack/cozystack/pull/838)
|
||||
|
||||
### New Contributors
|
||||
|
||||
* @etoshutka made their first contribution in https://github.com/cozystack/cozystack/pull/872
|
||||
* @dtrdnk made their first contribution in https://github.com/cozystack/cozystack/pull/896
|
||||
* @zdenekjanda made their first contribution in https://github.com/cozystack/cozystack/pull/924
|
||||
* @gwynbleidd2106 made their first contribution in https://github.com/cozystack/cozystack/pull/962
|
||||
|
||||
## Full Changelog
|
||||
|
||||
See https://github.com/cozystack/cozystack/compare/v0.30.0...v0.31.0
|
||||
@@ -1,10 +1,37 @@
|
||||
# Release Workflow
|
||||
|
||||
This section explains how Cozystack builds and releases are made.
|
||||
This document describes Cozystack’s release process.
|
||||
|
||||
## Introduction
|
||||
|
||||
Cozystack uses a staged release process to ensure stability and flexibility during development.
|
||||
|
||||
There are three types of releases:
|
||||
|
||||
- **Release Candidates (RC)** – Preview versions (e.g., `v0.42.0-rc.1`) used for final testing and validation.
|
||||
- **Regular Releases** – Final versions (e.g., `v0.42.0`) that are feature-complete and thoroughly tested.
|
||||
- **Patch Releases** – Bugfix-only updates (e.g., `v0.42.1`) made after a stable release, based on a dedicated release branch.
|
||||
|
||||
Each type plays a distinct role in delivering reliable and tested updates while allowing ongoing development to continue smoothly.
|
||||
|
||||
## Release Candidates
|
||||
|
||||
Release candidates are Cozystack versions that introduce new features and are published before a stable release.
|
||||
Their purpose is to help validate stability before finalizing a new feature release.
|
||||
They allow for final rounds of testing and bug fixes without freezing development.
|
||||
|
||||
Release candidates are given numbers `vX.Y.0-rc.N`, for example, `v0.42.0-rc.1`.
|
||||
They are created directly in the `main` branch.
|
||||
An RC is typically tagged when all major features for the upcoming release have been merged into main and the release enters its testing phase.
|
||||
However, new features and changes can still be added before the regular release `vX.Y.0`.
|
||||
|
||||
Each RC contributes to a cumulative set of release notes that will be finalized when `vX.Y.0` is released.
|
||||
After testing, if no critical issues remain, the regular release (`vX.Y.0`) is tagged from the last RC or a later commit in main.
|
||||
This begins the regular release process, creates a dedicated `release-X.Y` branch, and opens the way for patch releases.
|
||||
|
||||
## Regular Releases
|
||||
|
||||
When making regular releases, we take a commit in `main` and decide to make it a release `x.y.0`.
|
||||
When making a regular release, we tag the latest RC or a subsequent minimal-change commit as `vX.Y.0`.
|
||||
In this explanation, we'll use version `v0.42.0` as an example:
|
||||
|
||||
```mermaid
|
||||
|
||||
13
go.mod
13
go.mod
@@ -37,6 +37,7 @@ require (
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fluxcd/pkg/apis/kustomize v1.6.1 // indirect
|
||||
@@ -91,14 +92,14 @@ require (
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/crypto v0.28.0 // indirect
|
||||
golang.org/x/crypto v0.31.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/net v0.30.0 // indirect
|
||||
golang.org/x/net v0.33.0 // indirect
|
||||
golang.org/x/oauth2 v0.23.0 // indirect
|
||||
golang.org/x/sync v0.8.0 // indirect
|
||||
golang.org/x/sys v0.26.0 // indirect
|
||||
golang.org/x/term v0.25.0 // indirect
|
||||
golang.org/x/text v0.19.0 // indirect
|
||||
golang.org/x/sync v0.10.0 // indirect
|
||||
golang.org/x/sys v0.28.0 // indirect
|
||||
golang.org/x/term v0.27.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.7.0 // indirect
|
||||
golang.org/x/tools v0.26.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
|
||||
28
go.sum
28
go.sum
@@ -26,8 +26,8 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k=
|
||||
github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
|
||||
github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
@@ -212,8 +212,8 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
|
||||
golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
|
||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
@@ -222,26 +222,26 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
|
||||
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
|
||||
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
|
||||
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
|
||||
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
|
||||
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
|
||||
golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
|
||||
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
|
||||
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
||||
117
hack/cozytest.sh
Executable file
117
hack/cozytest.sh
Executable file
@@ -0,0 +1,117 @@
|
||||
#!/bin/sh
|
||||
###############################################################################
|
||||
# cozytest.sh - Bats-compatible test runner with live trace and enhanced #
|
||||
# output, written in pure shell #
|
||||
###############################################################################
|
||||
set -eu
|
||||
|
||||
TEST_FILE=${1:?Usage: ./cozytest.sh <file.bats> [pattern]}
|
||||
PATTERN=${2:-*}
|
||||
LINE='----------------------------------------------------------------'
|
||||
|
||||
cols() { stty size 2>/dev/null | awk '{print $2}' || echo 80; }
|
||||
MAXW=$(( $(cols) - 12 )); [ "$MAXW" -lt 40 ] && MAXW=70
|
||||
BEGIN=$(date +%s)
|
||||
timestamp() { s=$(( $(date +%s) - BEGIN )); printf '[%02d:%02d]' $((s/60)) $((s%60)); }
|
||||
|
||||
###############################################################################
|
||||
# run_one <fn> <title> #
|
||||
###############################################################################
|
||||
run_one() {
|
||||
fn=$1 title=$2
|
||||
tmp=$(mktemp -d) || { echo "Failed to create temp directory" >&2; exit 1; }
|
||||
log="$tmp/log"
|
||||
|
||||
echo "╭ » Run test: $title"
|
||||
START=$(date +%s)
|
||||
skip_next="+ $fn" # первую строку трассировки с именем функции пропустим
|
||||
|
||||
{
|
||||
(
|
||||
PS4='+ ' # prefix for set -x
|
||||
set -eu -x # strict + trace
|
||||
"$fn"
|
||||
)
|
||||
printf '__RC__%s\n' "$?"
|
||||
} 2>&1 | tee "$log" | while IFS= read -r line; do
|
||||
case "$line" in
|
||||
'__RC__'*) : ;;
|
||||
'+ '*) cmd=${line#'+ '}
|
||||
[ "$cmd" = "${skip_next#+ }" ] && continue
|
||||
case "$cmd" in
|
||||
'set -e'|'set -x'|'set -u'|'return 0') continue ;;
|
||||
esac
|
||||
out=$cmd ;;
|
||||
*) out=$line ;;
|
||||
esac
|
||||
now=$(( $(date +%s) - START ))
|
||||
[ ${#out} -gt "$MAXW" ] && out="$(printf '%.*s…' "$MAXW" "$out")"
|
||||
printf '┊[%02d:%02d] %s\n' $((now/60)) $((now%60)) "$out"
|
||||
done
|
||||
|
||||
rc=$(awk '/^__RC__/ {print substr($0,7)}' "$log" | tail -n1)
|
||||
[ -z "$rc" ] && rc=1
|
||||
now=$(( $(date +%s) - START ))
|
||||
|
||||
if [ "$rc" -eq 0 ]; then
|
||||
printf '╰[%02d:%02d] ✅ Test OK: %s\n' $((now/60)) $((now%60)) "$title"
|
||||
else
|
||||
printf '╰[%02d:%02d] ❌ Test failed: %s (exit %s)\n' \
|
||||
$((now/60)) $((now%60)) "$title" "$rc"
|
||||
echo "----- captured output -----------------------------------------"
|
||||
grep -v '^__RC__' "$log"
|
||||
echo "$LINE"
|
||||
exit "$rc"
|
||||
fi
|
||||
|
||||
rm -rf "$tmp"
|
||||
}
|
||||
|
||||
###############################################################################
|
||||
# convert .bats -> shell-functions #
|
||||
###############################################################################
|
||||
TMP_SH=$(mktemp) || { echo "Failed to create temp file" >&2; exit 1; }
|
||||
trap 'rm -f "$TMP_SH"' EXIT
|
||||
awk '
|
||||
/^@test[[:space:]]+"/ {
|
||||
line = substr($0, index($0, "\"") + 1)
|
||||
title = substr(line, 1, index(line, "\"") - 1)
|
||||
fname = "test_"
|
||||
for (i = 1; i <= length(title); i++) {
|
||||
c = substr(title, i, 1)
|
||||
fname = fname (c ~ /[A-Za-z0-9]/ ? c : "_")
|
||||
}
|
||||
printf("### %s\n", title)
|
||||
printf("%s() {\n", fname)
|
||||
print " set -e" # ошибка → падение теста
|
||||
next
|
||||
}
|
||||
/^}$/ {
|
||||
print " return 0" # если автор не сделал exit 1 — тест ОК
|
||||
print "}"
|
||||
next
|
||||
}
|
||||
{ print }
|
||||
' "$TEST_FILE" > "$TMP_SH"
|
||||
|
||||
[ -f "$TMP_SH" ] || { echo "Failed to generate test functions" >&2; exit 1; }
|
||||
# shellcheck disable=SC1090
|
||||
. "$TMP_SH"
|
||||
|
||||
###############################################################################
|
||||
# run selected tests #
|
||||
###############################################################################
|
||||
awk -v pat="$PATTERN" '
|
||||
/^### / {
|
||||
title = substr($0, 5)
|
||||
name = "test_"
|
||||
for (i = 1; i <= length(title); i++) {
|
||||
c = substr(title, i, 1)
|
||||
name = name (c ~ /[A-Za-z0-9]/ ? c : "_")
|
||||
}
|
||||
if (pat == "*" || index(title, pat) > 0)
|
||||
printf("%s %s\n", name, title)
|
||||
}
|
||||
' "$TMP_SH" | while IFS=' ' read -r fn title; do
|
||||
run_one "$fn" "$title"
|
||||
done
|
||||
339
hack/e2e-apps.bats
Executable file
339
hack/e2e-apps.bats
Executable file
@@ -0,0 +1,339 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Cozystack end‑to‑end provisioning test (Bats)
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
@test "Create tenant with isolated mode enabled" {
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: test
|
||||
namespace: tenant-root
|
||||
spec:
|
||||
etcd: false
|
||||
host: ""
|
||||
ingress: false
|
||||
isolated: true
|
||||
monitoring: false
|
||||
resourceQuotas: {}
|
||||
seaweedfs: false
|
||||
EOF
|
||||
kubectl wait hr/tenant-test -n tenant-root --timeout=1m --for=condition=ready
|
||||
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
|
||||
}
|
||||
|
||||
@test "Create a tenant Kubernetes control plane" {
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Kubernetes
|
||||
metadata:
|
||||
name: test
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
addons:
|
||||
certManager:
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
cilium:
|
||||
valuesOverride: {}
|
||||
fluxcd:
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
gatewayAPI:
|
||||
enabled: false
|
||||
gpuOperator:
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
ingressNginx:
|
||||
enabled: true
|
||||
hosts: []
|
||||
valuesOverride: {}
|
||||
monitoringAgents:
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
verticalPodAutoscaler:
|
||||
valuesOverride: {}
|
||||
controlPlane:
|
||||
apiServer:
|
||||
resources: {}
|
||||
resourcesPreset: small
|
||||
controllerManager:
|
||||
resources: {}
|
||||
resourcesPreset: micro
|
||||
konnectivity:
|
||||
server:
|
||||
resources: {}
|
||||
resourcesPreset: micro
|
||||
replicas: 2
|
||||
scheduler:
|
||||
resources: {}
|
||||
resourcesPreset: micro
|
||||
host: ""
|
||||
nodeGroups:
|
||||
md0:
|
||||
ephemeralStorage: 20Gi
|
||||
gpus: []
|
||||
instanceType: u1.medium
|
||||
maxReplicas: 10
|
||||
minReplicas: 0
|
||||
resources:
|
||||
cpu: ""
|
||||
memory: ""
|
||||
roles:
|
||||
- ingress-nginx
|
||||
storageClass: replicated
|
||||
EOF
|
||||
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
|
||||
timeout 10 sh -ec 'until kubectl get kamajicontrolplane -n tenant-test kubernetes-test; do sleep 1; done'
|
||||
kubectl wait --for=condition=TenantControlPlaneCreated kamajicontrolplane -n tenant-test kubernetes-test --timeout=4m
|
||||
kubectl wait tcp -n tenant-test kubernetes-test --timeout=2m --for=jsonpath='{.status.kubernetesResources.version.status}'=Ready
|
||||
kubectl wait deploy --timeout=4m --for=condition=available -n tenant-test kubernetes-test kubernetes-test-cluster-autoscaler kubernetes-test-kccm kubernetes-test-kcsi-controller
|
||||
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=1m --for=jsonpath='{.status.replicas}'=2
|
||||
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=10m --for=jsonpath='{.status.v1beta2.readyReplicas}'=2
|
||||
}
|
||||
|
||||
@test "Create a VM Disk" {
|
||||
name='test'
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: VMDisk
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
source:
|
||||
http:
|
||||
url: https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img
|
||||
optical: false
|
||||
storage: 5Gi
|
||||
storageClass: replicated
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr vm-disk-$name --timeout=5s --for=condition=ready
|
||||
kubectl -n tenant-test annotate pvc vm-disk-test cdi.kubevirt.io/storage.bind.immediate.requested=true
|
||||
kubectl -n tenant-test wait dv vm-disk-$name --timeout=100s --for=condition=ready
|
||||
kubectl -n tenant-test wait pvc vm-disk-$name --timeout=100s --for=jsonpath='{.status.phase}'=Bound
|
||||
}
|
||||
|
||||
@test "Create a VM Instance" {
|
||||
diskName='test'
|
||||
name='test'
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: VMInstance
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
external: false
|
||||
externalMethod: PortList
|
||||
externalPorts:
|
||||
- 22
|
||||
running: true
|
||||
instanceType: "u1.medium"
|
||||
instanceProfile: ubuntu
|
||||
disks:
|
||||
- name: $diskName
|
||||
gpus: []
|
||||
resources:
|
||||
cpu: ""
|
||||
memory: ""
|
||||
sshKeys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF
|
||||
test@test
|
||||
cloudInit: |
|
||||
#cloud-config
|
||||
users:
|
||||
- name: test
|
||||
shell: /bin/bash
|
||||
sudo: ['ALL=(ALL) NOPASSWD: ALL']
|
||||
groups: sudo
|
||||
ssh_authorized_keys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF test@test
|
||||
cloudInitSeed: ""
|
||||
EOF
|
||||
sleep 5
|
||||
timeout 20 sh -ec "until kubectl -n tenant-test get vmi vm-instance-$name -o jsonpath='{.status.interfaces[0].ipAddress}' | grep -q '[0-9]'; do sleep 5; done"
|
||||
kubectl -n tenant-test wait hr vm-instance-$name --timeout=5s --for=condition=ready
|
||||
kubectl -n tenant-test wait vm vm-instance-$name --timeout=20s --for=condition=ready
|
||||
}
|
||||
|
||||
@test "Create a Virtual Machine" {
|
||||
name='test'
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: VirtualMachine
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
external: false
|
||||
externalMethod: PortList
|
||||
externalPorts:
|
||||
- 22
|
||||
instanceType: "u1.medium"
|
||||
instanceProfile: ubuntu
|
||||
systemDisk:
|
||||
image: ubuntu
|
||||
storage: 5Gi
|
||||
storageClass: replicated
|
||||
gpus: []
|
||||
resources:
|
||||
cpu: ""
|
||||
memory: ""
|
||||
sshKeys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF
|
||||
test@test
|
||||
cloudInit: |
|
||||
#cloud-config
|
||||
users:
|
||||
- name: test
|
||||
shell: /bin/bash
|
||||
sudo: ['ALL=(ALL) NOPASSWD: ALL']
|
||||
groups: sudo
|
||||
ssh_authorized_keys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF test@test
|
||||
cloudInitSeed: ""
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr virtual-machine-$name --timeout=10s --for=condition=ready
|
||||
kubectl -n tenant-test wait dv virtual-machine-$name --timeout=100s --for=condition=ready
|
||||
kubectl -n tenant-test wait pvc virtual-machine-$name --timeout=100s --for=jsonpath='{.status.phase}'=Bound
|
||||
kubectl -n tenant-test wait vm virtual-machine-$name --timeout=100s --for=condition=ready
|
||||
timeout 120 sh -ec "until kubectl -n tenant-test get vmi virtual-machine-$name -o jsonpath='{.status.interfaces[0].ipAddress}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
}
|
||||
|
||||
@test "Create DB PostgreSQL" {
|
||||
name='test'
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Postgres
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
external: false
|
||||
size: 10Gi
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
postgresql:
|
||||
parameters:
|
||||
max_connections: 100
|
||||
quorum:
|
||||
minSyncReplicas: 0
|
||||
maxSyncReplicas: 0
|
||||
users:
|
||||
testuser:
|
||||
password: xai7Wepo
|
||||
databases:
|
||||
testdb:
|
||||
roles:
|
||||
admin:
|
||||
- testuser
|
||||
backup:
|
||||
enabled: false
|
||||
s3Region: us-east-1
|
||||
s3Bucket: s3.example.org/postgres-backups
|
||||
schedule: "0 2 * * *"
|
||||
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr postgres-$name --timeout=50s --for=condition=ready
|
||||
kubectl -n tenant-test wait job.batch postgres-$name-init-job --timeout=50s --for=condition=Complete
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-r -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-ro -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-rw -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
||||
timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-r -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-ro -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-rw -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
}
|
||||
|
||||
@test "Create DB MySQL" {
|
||||
name='test'
|
||||
kubectl create -f- <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: MySQL
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
external: false
|
||||
size: 10Gi
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
users:
|
||||
testuser:
|
||||
maxUserConnections: 1000
|
||||
password: xai7Wepo
|
||||
databases:
|
||||
testdb:
|
||||
roles:
|
||||
admin:
|
||||
- testuser
|
||||
backup:
|
||||
enabled: false
|
||||
s3Region: us-east-1
|
||||
s3Bucket: s3.example.org/postgres-backups
|
||||
schedule: "0 2 * * *"
|
||||
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr mysql-$name --timeout=30s --for=condition=ready
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get svc mysql-$name -o jsonpath='{.spec.ports[0].port}' | grep -q '3306'; do sleep 10; done"
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get endpoints mysql-$name -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test wait statefulset.apps/mysql-$name --timeout=110s --for=jsonpath='{.status.replicas}'=2
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get svc mysql-$name-metrics -o jsonpath='{.spec.ports[0].port}' | grep -q '9104'; do sleep 10; done"
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get endpoints mysql-$name-metrics -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test wait deployment.apps/mysql-$name-metrics --timeout=90s --for=jsonpath='{.status.replicas}'=1
|
||||
}
|
||||
|
||||
@test "Create DB ClickHouse" {
|
||||
name='test'
|
||||
kubectl create -f- <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: ClickHouse
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
size: 10Gi
|
||||
logStorageSize: 2Gi
|
||||
shards: 1
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
logTTL: 15
|
||||
users:
|
||||
testuser:
|
||||
password: xai7Wepo
|
||||
backup:
|
||||
enabled: false
|
||||
s3Region: us-east-1
|
||||
s3Bucket: s3.example.org/clickhouse-backups
|
||||
schedule: "0 2 * * *"
|
||||
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr clickhouse-$name --timeout=20s --for=condition=ready
|
||||
timeout 100 sh -ec "until kubectl -n tenant-test get svc chendpoint-clickhouse-$name -o jsonpath='{.spec.ports[*].port}' | grep -q '8123 9000'; do sleep 10; done"
|
||||
kubectl -n tenant-test wait statefulset.apps/chi-clickhouse-$name-clickhouse-0-0 --timeout=120s --for=jsonpath='{.status.replicas}'=2
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints chi-clickhouse-$name-clickhouse-0-0 -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
timeout 100 sh -ec "until kubectl -n tenant-test get svc chi-clickhouse-$name-clickhouse-0-0 -o jsonpath='{.spec.ports[0].port}' | grep -q '9000 8123 9009'; do sleep 10; done"
|
||||
kubectl -n tenant-test wait statefulset.apps/chi-clickhouse-$name-clickhouse-0-1 --timeout=140s --for=jsonpath='{.status.replicas}'=2
|
||||
}
|
||||
391
hack/e2e-cluster.bats
Executable file
391
hack/e2e-cluster.bats
Executable file
@@ -0,0 +1,391 @@
|
||||
#!/usr/bin/env bats
|
||||
# -----------------------------------------------------------------------------
|
||||
# Cozystack end‑to‑end provisioning test (Bats)
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
@test "Required installer assets exist" {
|
||||
if [ ! -f _out/assets/cozystack-installer.yaml ]; then
|
||||
echo "Missing: _out/assets/cozystack-installer.yaml" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f _out/assets/nocloud-amd64.raw.xz ]; then
|
||||
echo "Missing: _out/assets/nocloud-amd64.raw.xz" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
@test "IPv4 forwarding is enabled" {
|
||||
if [ "$(cat /proc/sys/net/ipv4/ip_forward)" != 1 ]; then
|
||||
echo "IPv4 forwarding is disabled!" >&2
|
||||
echo >&2
|
||||
echo "Enable it with:" >&2
|
||||
echo " echo 1 > /proc/sys/net/ipv4/ip_forward" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
@test "Clean previous VMs" {
|
||||
kill $(cat srv1/qemu.pid srv2/qemu.pid srv3/qemu.pid 2>/dev/null) 2>/dev/null || true
|
||||
rm -rf srv1 srv2 srv3
|
||||
}
|
||||
|
||||
@test "Prepare networking and masquerading" {
|
||||
ip link del cozy-br0 2>/dev/null || true
|
||||
ip link add cozy-br0 type bridge
|
||||
ip link set cozy-br0 up
|
||||
ip address add 192.168.123.1/24 dev cozy-br0
|
||||
|
||||
# Masquerading rule – idempotent (delete first, then add)
|
||||
iptables -t nat -D POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE 2>/dev/null || true
|
||||
iptables -t nat -A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
|
||||
}
|
||||
|
||||
@test "Prepare cloud‑init drive for VMs" {
|
||||
mkdir -p srv1 srv2 srv3
|
||||
|
||||
# Generate cloud‑init ISOs
|
||||
for i in 1 2 3; do
|
||||
echo "hostname: srv${i}" > "srv${i}/meta-data"
|
||||
|
||||
cat > "srv${i}/user-data" <<'EOF'
|
||||
#cloud-config
|
||||
EOF
|
||||
|
||||
cat > "srv${i}/network-config" <<EOF
|
||||
version: 2
|
||||
ethernets:
|
||||
eth0:
|
||||
dhcp4: false
|
||||
addresses:
|
||||
- "192.168.123.1${i}/26"
|
||||
gateway4: "192.168.123.1"
|
||||
nameservers:
|
||||
search: [cluster.local]
|
||||
addresses: [8.8.8.8]
|
||||
EOF
|
||||
|
||||
( cd "srv${i}" && genisoimage \
|
||||
-output seed.img \
|
||||
-volid cidata -rational-rock -joliet \
|
||||
user-data meta-data network-config )
|
||||
done
|
||||
}
|
||||
|
||||
@test "Use Talos NoCloud image from assets" {
|
||||
if [ ! -f _out/assets/nocloud-amd64.raw.xz ]; then
|
||||
echo "Missing _out/assets/nocloud-amd64.raw.xz" 2>&1
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm -f nocloud-amd64.raw
|
||||
cp _out/assets/nocloud-amd64.raw.xz .
|
||||
xz --decompress nocloud-amd64.raw.xz
|
||||
}
|
||||
|
||||
@test "Prepare VM disks" {
|
||||
for i in 1 2 3; do
|
||||
cp nocloud-amd64.raw srv${i}/system.img
|
||||
qemu-img resize srv${i}/system.img 50G
|
||||
qemu-img create srv${i}/data.img 100G
|
||||
done
|
||||
}
|
||||
|
||||
@test "Create tap devices" {
|
||||
for i in 1 2 3; do
|
||||
ip link del cozy-srv${i} 2>/dev/null || true
|
||||
ip tuntap add dev cozy-srv${i} mode tap
|
||||
ip link set cozy-srv${i} up
|
||||
ip link set cozy-srv${i} master cozy-br0
|
||||
done
|
||||
}
|
||||
|
||||
@test "Boot QEMU VMs" {
|
||||
for i in 1 2 3; do
|
||||
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 8 -m 24576 \
|
||||
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5${i} \
|
||||
-netdev tap,id=net0,ifname=cozy-srv${i},script=no,downscript=no \
|
||||
-drive file=srv${i}/system.img,if=virtio,format=raw \
|
||||
-drive file=srv${i}/seed.img,if=virtio,format=raw \
|
||||
-drive file=srv${i}/data.img,if=virtio,format=raw \
|
||||
-display none -daemonize -pidfile srv${i}/qemu.pid
|
||||
done
|
||||
|
||||
# Give qemu a few seconds to start up networking
|
||||
sleep 5
|
||||
}
|
||||
|
||||
@test "Wait until Talos API port 50000 is reachable on all machines" {
|
||||
timeout 60 sh -ec 'until nc -nz 192.168.123.11 50000 && nc -nz 192.168.123.12 50000 && nc -nz 192.168.123.13 50000; do sleep 1; done'
|
||||
}
|
||||
|
||||
@test "Generate Talos cluster configuration" {
|
||||
# Cluster‑wide patches
|
||||
cat > patch.yaml <<'EOF'
|
||||
machine:
|
||||
kubelet:
|
||||
nodeIP:
|
||||
validSubnets:
|
||||
- 192.168.123.0/24
|
||||
extraConfig:
|
||||
maxPods: 512
|
||||
kernel:
|
||||
modules:
|
||||
- name: openvswitch
|
||||
- name: drbd
|
||||
parameters:
|
||||
- usermode_helper=disabled
|
||||
- name: zfs
|
||||
- name: spl
|
||||
registries:
|
||||
mirrors:
|
||||
docker.io:
|
||||
endpoints:
|
||||
- https://mirror.gcr.io
|
||||
files:
|
||||
- content: |
|
||||
[plugins]
|
||||
[plugins."io.containerd.cri.v1.runtime"]
|
||||
device_ownership_from_security_context = true
|
||||
path: /etc/cri/conf.d/20-customization.part
|
||||
op: create
|
||||
|
||||
cluster:
|
||||
apiServer:
|
||||
extraArgs:
|
||||
oidc-issuer-url: "https://keycloak.example.org/realms/cozy"
|
||||
oidc-client-id: "kubernetes"
|
||||
oidc-username-claim: "preferred_username"
|
||||
oidc-groups-claim: "groups"
|
||||
network:
|
||||
cni:
|
||||
name: none
|
||||
dnsDomain: cozy.local
|
||||
podSubnets:
|
||||
- 10.244.0.0/16
|
||||
serviceSubnets:
|
||||
- 10.96.0.0/16
|
||||
EOF
|
||||
|
||||
# Control‑plane‑only patches
|
||||
cat > patch-controlplane.yaml <<'EOF'
|
||||
machine:
|
||||
nodeLabels:
|
||||
node.kubernetes.io/exclude-from-external-load-balancers:
|
||||
$patch: delete
|
||||
network:
|
||||
interfaces:
|
||||
- interface: eth0
|
||||
vip:
|
||||
ip: 192.168.123.10
|
||||
cluster:
|
||||
allowSchedulingOnControlPlanes: true
|
||||
controllerManager:
|
||||
extraArgs:
|
||||
bind-address: 0.0.0.0
|
||||
scheduler:
|
||||
extraArgs:
|
||||
bind-address: 0.0.0.0
|
||||
apiServer:
|
||||
certSANs:
|
||||
- 127.0.0.1
|
||||
proxy:
|
||||
disabled: true
|
||||
discovery:
|
||||
enabled: false
|
||||
etcd:
|
||||
advertisedSubnets:
|
||||
- 192.168.123.0/24
|
||||
EOF
|
||||
|
||||
# Generate secrets once
|
||||
if [ ! -f secrets.yaml ]; then
|
||||
talosctl gen secrets
|
||||
fi
|
||||
|
||||
rm -f controlplane.yaml worker.yaml talosconfig kubeconfig
|
||||
talosctl gen config --with-secrets secrets.yaml cozystack https://192.168.123.10:6443 \
|
||||
--config-patch=@patch.yaml --config-patch-control-plane @patch-controlplane.yaml
|
||||
}
|
||||
|
||||
@test "Apply Talos configuration to the node" {
|
||||
# Apply the configuration to all three nodes
|
||||
for node in 11 12 13; do
|
||||
talosctl apply -f controlplane.yaml -n 192.168.123.${node} -e 192.168.123.${node} -i
|
||||
done
|
||||
|
||||
# Wait for Talos services to come up again
|
||||
timeout 60 sh -ec 'until nc -nz 192.168.123.11 50000 && nc -nz 192.168.123.12 50000 && nc -nz 192.168.123.13 50000; do sleep 1; done'
|
||||
}
|
||||
|
||||
@test "Bootstrap Talos cluster" {
|
||||
# Bootstrap etcd on the first node
|
||||
timeout 10 sh -ec 'until talosctl bootstrap -n 192.168.123.11 -e 192.168.123.11; do sleep 1; done'
|
||||
|
||||
# Wait until etcd is healthy
|
||||
timeout 180 sh -ec 'until talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 >/dev/null 2>&1; do sleep 1; done'
|
||||
timeout 60 sh -ec 'while talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1 | grep -q "rpc error"; do sleep 1; done'
|
||||
|
||||
# Retrieve kubeconfig
|
||||
rm -f kubeconfig
|
||||
talosctl kubeconfig kubeconfig -e 192.168.123.10 -n 192.168.123.10
|
||||
|
||||
# Wait until all three nodes register in Kubernetes
|
||||
timeout 60 sh -ec 'until [ $(kubectl get node --no-headers | wc -l) -eq 3 ]; do sleep 1; done'
|
||||
}
|
||||
|
||||
@test "Install Cozystack" {
|
||||
# Create namespace & configmap required by installer
|
||||
kubectl create namespace cozy-system --dry-run=client -o yaml | kubectl apply -f -
|
||||
kubectl create configmap cozystack -n cozy-system \
|
||||
--from-literal=bundle-name=paas-full \
|
||||
--from-literal=ipv4-pod-cidr=10.244.0.0/16 \
|
||||
--from-literal=ipv4-pod-gateway=10.244.0.1 \
|
||||
--from-literal=ipv4-svc-cidr=10.96.0.0/16 \
|
||||
--from-literal=ipv4-join-cidr=100.64.0.0/16 \
|
||||
--from-literal=root-host=example.org \
|
||||
--from-literal=api-server-endpoint=https://192.168.123.10:6443 \
|
||||
--dry-run=client -o yaml | kubectl apply -f -
|
||||
|
||||
# Apply installer manifests from file
|
||||
kubectl apply -f _out/assets/cozystack-installer.yaml
|
||||
|
||||
# Wait for the installer deployment to become available
|
||||
kubectl wait deployment/cozystack -n cozy-system --timeout=1m --for=condition=Available
|
||||
|
||||
# Wait until HelmReleases appear & reconcile them
|
||||
timeout 60 sh -ec 'until kubectl get hr -A | grep -q cozys; do sleep 1; done'
|
||||
sleep 5
|
||||
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n "$1" hr/"$2" &"} END {print "wait"}' | sh -ex
|
||||
|
||||
# Fail the test if any HelmRelease is not Ready
|
||||
if kubectl get hr -A | grep -v " True " | grep -v NAME; then
|
||||
kubectl get hr -A
|
||||
fail "Some HelmReleases failed to reconcile"
|
||||
fi
|
||||
}
|
||||
|
||||
@test "Wait for Cluster‑API provider deployments" {
|
||||
# Wait for Cluster‑API provider deployments
|
||||
timeout 60 sh -ec 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager >/dev/null 2>&1; do sleep 1; done'
|
||||
kubectl wait deployment/capi-controller-manager deployment/capi-kamaji-controller-manager deployment/capi-kubeadm-bootstrap-controller-manager deployment/capi-operator-cluster-api-operator deployment/capk-controller-manager -n cozy-cluster-api --timeout=1m --for=condition=available
|
||||
}
|
||||
|
||||
@test "Wait for LINSTOR and configure storage" {
|
||||
# Linstor controller and nodes
|
||||
kubectl wait deployment/linstor-controller -n cozy-linstor --timeout=5m --for=condition=available
|
||||
timeout 60 sh -ec 'until [ $(kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor node list | grep -c Online) -eq 3 ]; do sleep 1; done'
|
||||
|
||||
for node in srv1 srv2 srv3; do
|
||||
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs ${node} /dev/vdc --pool-name data --storage-pool data
|
||||
done
|
||||
|
||||
# Storage classes
|
||||
kubectl apply -f - <<'EOF'
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: local
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
provisioner: linstor.csi.linbit.com
|
||||
parameters:
|
||||
linstor.csi.linbit.com/storagePool: "data"
|
||||
linstor.csi.linbit.com/layerList: "storage"
|
||||
linstor.csi.linbit.com/allowRemoteVolumeAccess: "false"
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
allowVolumeExpansion: true
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: replicated
|
||||
provisioner: linstor.csi.linbit.com
|
||||
parameters:
|
||||
linstor.csi.linbit.com/storagePool: "data"
|
||||
linstor.csi.linbit.com/autoPlace: "3"
|
||||
linstor.csi.linbit.com/layerList: "drbd storage"
|
||||
linstor.csi.linbit.com/allowRemoteVolumeAccess: "true"
|
||||
property.linstor.csi.linbit.com/DrbdOptions/auto-quorum: suspend-io
|
||||
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-no-data-accessible: suspend-io
|
||||
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-suspended-primary-outdated: force-secondary
|
||||
property.linstor.csi.linbit.com/DrbdOptions/Net/rr-conflict: retry-connect
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
allowVolumeExpansion: true
|
||||
EOF
|
||||
}
|
||||
|
||||
@test "Wait for MetalLB and configure address pool" {
|
||||
# MetalLB address pool
|
||||
kubectl apply -f - <<'EOF'
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: L2Advertisement
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-metallb
|
||||
spec:
|
||||
ipAddressPools: [cozystack]
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: IPAddressPool
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-metallb
|
||||
spec:
|
||||
addresses: [192.168.123.200-192.168.123.250]
|
||||
autoAssign: true
|
||||
avoidBuggyIPs: false
|
||||
EOF
|
||||
}
|
||||
|
||||
@test "Check Cozystack API service" {
|
||||
kubectl wait --for=condition=Available apiservices/v1alpha1.apps.cozystack.io --timeout=2m
|
||||
}
|
||||
|
||||
@test "Configure Tenant and wait for applications" {
|
||||
# Patch root tenant and wait for its releases
|
||||
kubectl patch tenants/root -n tenant-root --type merge -p '{"spec":{"host":"example.org","ingress":true,"monitoring":true,"etcd":true,"isolated":true}}'
|
||||
|
||||
timeout 60 sh -ec 'until kubectl get hr -n tenant-root etcd ingress monitoring tenant-root >/dev/null 2>&1; do sleep 1; done'
|
||||
kubectl wait hr/etcd hr/ingress hr/tenant-root -n tenant-root --timeout=2m --for=condition=ready
|
||||
|
||||
if ! kubectl wait hr/monitoring -n tenant-root --timeout=2m --for=condition=ready; then
|
||||
flux reconcile hr monitoring -n tenant-root --force
|
||||
kubectl wait hr/monitoring -n tenant-root --timeout=2m --for=condition=ready
|
||||
fi
|
||||
|
||||
# Expose Cozystack services through ingress
|
||||
kubectl patch configmap/cozystack -n cozy-system --type merge -p '{"data":{"expose-services":"api,dashboard,cdi-uploadproxy,vm-exportproxy,keycloak"}}'
|
||||
|
||||
# NGINX ingress controller
|
||||
timeout 60 sh -ec 'until kubectl get deploy root-ingress-controller -n tenant-root >/dev/null 2>&1; do sleep 1; done'
|
||||
kubectl wait deploy/root-ingress-controller -n tenant-root --timeout=5m --for=condition=available
|
||||
|
||||
# etcd statefulset
|
||||
kubectl wait sts/etcd -n tenant-root --for=jsonpath='{.status.readyReplicas}'=3 --timeout=5m
|
||||
|
||||
# VictoriaMetrics components
|
||||
kubectl wait vmalert/vmalert-shortterm vmalertmanager/alertmanager -n tenant-root --for=jsonpath='{.status.updateStatus}'=operational --timeout=5m
|
||||
kubectl wait vlogs/generic -n tenant-root --for=jsonpath='{.status.updateStatus}'=operational --timeout=5m
|
||||
kubectl wait vmcluster/shortterm vmcluster/longterm -n tenant-root --for=jsonpath='{.status.clusterStatus}'=operational --timeout=5m
|
||||
|
||||
# Grafana
|
||||
kubectl wait clusters.postgresql.cnpg.io/grafana-db -n tenant-root --for=condition=ready --timeout=5m
|
||||
kubectl wait deploy/grafana-deployment -n tenant-root --for=condition=available --timeout=5m
|
||||
|
||||
# Verify Grafana via ingress
|
||||
ingress_ip=$(kubectl get svc root-ingress-controller -n tenant-root -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
if ! curl -sS -k "https://${ingress_ip}" -H 'Host: grafana.example.org' --max-time 30 | grep -q Found; then
|
||||
echo "Failed to access Grafana via ingress at ${ingress_ip}" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
@test "Keycloak OIDC stack is healthy" {
|
||||
kubectl patch configmap/cozystack -n cozy-system --type merge -p '{"data":{"oidc-enabled":"true"}}'
|
||||
|
||||
timeout 120 sh -ec 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator >/dev/null 2>&1; do sleep 1; done'
|
||||
kubectl wait hr/keycloak hr/keycloak-configure hr/keycloak-operator -n cozy-keycloak --timeout=10m --for=condition=ready
|
||||
}
|
||||
370
hack/e2e.sh
370
hack/e2e.sh
@@ -1,370 +0,0 @@
|
||||
#!/bin/bash
|
||||
if [ "$COZYSTACK_INSTALLER_YAML" = "" ]; then
|
||||
echo 'COZYSTACK_INSTALLER_YAML variable is not set!' >&2
|
||||
echo 'please set it with following command:' >&2
|
||||
echo >&2
|
||||
echo 'export COZYSTACK_INSTALLER_YAML=$(helm template -n cozy-system installer packages/core/installer)' >&2
|
||||
echo >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$(cat /proc/sys/net/ipv4/ip_forward)" != 1 ]; then
|
||||
echo "IPv4 forwarding is not enabled!" >&2
|
||||
echo 'please enable forwarding with the following command:' >&2
|
||||
echo >&2
|
||||
echo 'echo 1 > /proc/sys/net/ipv4/ip_forward' >&2
|
||||
echo >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
set -x
|
||||
set -e
|
||||
|
||||
kill `cat srv1/qemu.pid srv2/qemu.pid srv3/qemu.pid` || true
|
||||
|
||||
ip link del cozy-br0 || true
|
||||
ip link add cozy-br0 type bridge
|
||||
ip link set cozy-br0 up
|
||||
ip addr add 192.168.123.1/24 dev cozy-br0
|
||||
|
||||
# Enable masquerading
|
||||
iptables -t nat -D POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE 2>/dev/null || true
|
||||
iptables -t nat -A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
|
||||
|
||||
rm -rf srv1 srv2 srv3
|
||||
mkdir -p srv1 srv2 srv3
|
||||
|
||||
# Prepare cloud-init
|
||||
for i in 1 2 3; do
|
||||
echo "hostname: srv$i" > "srv$i/meta-data"
|
||||
echo '#cloud-config' > "srv$i/user-data"
|
||||
cat > "srv$i/network-config" <<EOT
|
||||
version: 2
|
||||
ethernets:
|
||||
eth0:
|
||||
dhcp4: false
|
||||
addresses:
|
||||
- "192.168.123.1$i/26"
|
||||
gateway4: "192.168.123.1"
|
||||
nameservers:
|
||||
search: [cluster.local]
|
||||
addresses: [8.8.8.8]
|
||||
EOT
|
||||
|
||||
( cd srv$i && genisoimage \
|
||||
-output seed.img \
|
||||
-volid cidata -rational-rock -joliet \
|
||||
user-data meta-data network-config
|
||||
)
|
||||
done
|
||||
|
||||
# Prepare system drive
|
||||
if [ ! -f nocloud-amd64.raw ]; then
|
||||
wget https://github.com/cozystack/cozystack/releases/latest/download/nocloud-amd64.raw.xz \
|
||||
-O nocloud-amd64.raw.xz --show-progress --output-file /dev/stdout --progress=dot:giga 2>/dev/null
|
||||
rm -f nocloud-amd64.raw
|
||||
xz --decompress nocloud-amd64.raw.xz
|
||||
fi
|
||||
for i in 1 2 3; do
|
||||
cp nocloud-amd64.raw srv$i/system.img
|
||||
qemu-img resize srv$i/system.img 20G
|
||||
done
|
||||
|
||||
# Prepare data drives
|
||||
for i in 1 2 3; do
|
||||
qemu-img create srv$i/data.img 100G
|
||||
done
|
||||
|
||||
# Prepare networking
|
||||
for i in 1 2 3; do
|
||||
ip link del cozy-srv$i || true
|
||||
ip tuntap add dev cozy-srv$i mode tap
|
||||
ip link set cozy-srv$i up
|
||||
ip link set cozy-srv$i master cozy-br0
|
||||
done
|
||||
|
||||
# Start VMs
|
||||
for i in 1 2 3; do
|
||||
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 8 -m 16384 \
|
||||
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5$i \
|
||||
-netdev tap,id=net0,ifname=cozy-srv$i,script=no,downscript=no \
|
||||
-drive file=srv$i/system.img,if=virtio,format=raw \
|
||||
-drive file=srv$i/seed.img,if=virtio,format=raw \
|
||||
-drive file=srv$i/data.img,if=virtio,format=raw \
|
||||
-display none -daemonize -pidfile srv$i/qemu.pid
|
||||
done
|
||||
|
||||
sleep 5
|
||||
|
||||
# Wait for VM to start up
|
||||
timeout 60 sh -c 'until nc -nzv 192.168.123.11 50000 && nc -nzv 192.168.123.12 50000 && nc -nzv 192.168.123.13 50000; do sleep 1; done'
|
||||
|
||||
cat > patch.yaml <<\EOT
|
||||
machine:
|
||||
kubelet:
|
||||
nodeIP:
|
||||
validSubnets:
|
||||
- 192.168.123.0/24
|
||||
extraConfig:
|
||||
maxPods: 512
|
||||
kernel:
|
||||
modules:
|
||||
- name: openvswitch
|
||||
- name: drbd
|
||||
parameters:
|
||||
- usermode_helper=disabled
|
||||
- name: zfs
|
||||
- name: spl
|
||||
registries:
|
||||
mirrors:
|
||||
docker.io:
|
||||
endpoints:
|
||||
- https://mirror.gcr.io
|
||||
files:
|
||||
- content: |
|
||||
[plugins]
|
||||
[plugins."io.containerd.cri.v1.runtime"]
|
||||
device_ownership_from_security_context = true
|
||||
path: /etc/cri/conf.d/20-customization.part
|
||||
op: create
|
||||
|
||||
cluster:
|
||||
apiServer:
|
||||
extraArgs:
|
||||
oidc-issuer-url: "https://keycloak.example.org/realms/cozy"
|
||||
oidc-client-id: "kubernetes"
|
||||
oidc-username-claim: "preferred_username"
|
||||
oidc-groups-claim: "groups"
|
||||
network:
|
||||
cni:
|
||||
name: none
|
||||
dnsDomain: cozy.local
|
||||
podSubnets:
|
||||
- 10.244.0.0/16
|
||||
serviceSubnets:
|
||||
- 10.96.0.0/16
|
||||
EOT
|
||||
|
||||
cat > patch-controlplane.yaml <<\EOT
|
||||
machine:
|
||||
nodeLabels:
|
||||
node.kubernetes.io/exclude-from-external-load-balancers:
|
||||
$patch: delete
|
||||
network:
|
||||
interfaces:
|
||||
- interface: eth0
|
||||
vip:
|
||||
ip: 192.168.123.10
|
||||
cluster:
|
||||
allowSchedulingOnControlPlanes: true
|
||||
controllerManager:
|
||||
extraArgs:
|
||||
bind-address: 0.0.0.0
|
||||
scheduler:
|
||||
extraArgs:
|
||||
bind-address: 0.0.0.0
|
||||
apiServer:
|
||||
certSANs:
|
||||
- 127.0.0.1
|
||||
proxy:
|
||||
disabled: true
|
||||
discovery:
|
||||
enabled: false
|
||||
etcd:
|
||||
advertisedSubnets:
|
||||
- 192.168.123.0/24
|
||||
EOT
|
||||
|
||||
# Gen configuration
|
||||
if [ ! -f secrets.yaml ]; then
|
||||
talosctl gen secrets
|
||||
fi
|
||||
|
||||
rm -f controlplane.yaml worker.yaml talosconfig kubeconfig
|
||||
talosctl gen config --with-secrets secrets.yaml cozystack https://192.168.123.10:6443 --config-patch=@patch.yaml --config-patch-control-plane @patch-controlplane.yaml
|
||||
export TALOSCONFIG=$PWD/talosconfig
|
||||
|
||||
# Apply configuration
|
||||
talosctl apply -f controlplane.yaml -n 192.168.123.11 -e 192.168.123.11 -i
|
||||
talosctl apply -f controlplane.yaml -n 192.168.123.12 -e 192.168.123.12 -i
|
||||
talosctl apply -f controlplane.yaml -n 192.168.123.13 -e 192.168.123.13 -i
|
||||
|
||||
# Wait for VM to be configured
|
||||
timeout 60 sh -c 'until nc -nzv 192.168.123.11 50000 && nc -nzv 192.168.123.12 50000 && nc -nzv 192.168.123.13 50000; do sleep 1; done'
|
||||
|
||||
# Bootstrap
|
||||
timeout 10 sh -c 'until talosctl bootstrap -n 192.168.123.11 -e 192.168.123.11; do sleep 1; done'
|
||||
|
||||
# Wait for etcd
|
||||
timeout 180 sh -c 'until timeout -s 9 2 talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1; do sleep 1; done'
|
||||
timeout 60 sh -c 'while talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1 | grep "rpc error"; do sleep 1; done'
|
||||
|
||||
rm -f kubeconfig
|
||||
talosctl kubeconfig kubeconfig -e 192.168.123.10 -n 192.168.123.10
|
||||
export KUBECONFIG=$PWD/kubeconfig
|
||||
|
||||
# Wait for kubernetes nodes appear
|
||||
timeout 60 sh -c 'until [ $(kubectl get node -o name | wc -l) = 3 ]; do sleep 1; done'
|
||||
kubectl create ns cozy-system -o yaml | kubectl apply -f -
|
||||
kubectl create -f - <<\EOT
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-system
|
||||
data:
|
||||
bundle-name: "paas-full"
|
||||
ipv4-pod-cidr: "10.244.0.0/16"
|
||||
ipv4-pod-gateway: "10.244.0.1"
|
||||
ipv4-svc-cidr: "10.96.0.0/16"
|
||||
ipv4-join-cidr: "100.64.0.0/16"
|
||||
root-host: example.org
|
||||
api-server-endpoint: https://192.168.123.10:6443
|
||||
EOT
|
||||
|
||||
#
|
||||
echo "$COZYSTACK_INSTALLER_YAML" | kubectl apply -f -
|
||||
|
||||
# wait for cozystack pod to start
|
||||
kubectl wait deploy --timeout=1m --for=condition=available -n cozy-system cozystack
|
||||
|
||||
# wait for helmreleases appear
|
||||
timeout 60 sh -c 'until kubectl get hr -A | grep cozy; do sleep 1; done'
|
||||
|
||||
sleep 5
|
||||
|
||||
# Wait for all HelmReleases to be installed
|
||||
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n " $1 " hr/" $2 " &"} END{print "wait"}' | sh -x
|
||||
|
||||
failed_hrs=$(kubectl get hr -A | grep -v True)
|
||||
if [ -n "$(echo "$failed_hrs" | grep -v NAME)" ]; then
|
||||
printf 'Failed HelmReleases:\n%s\n' "$failed_hrs" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Wait for Cluster-API providers
|
||||
timeout 60 sh -c 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager; do sleep 1; done'
|
||||
kubectl wait deploy --timeout=1m --for=condition=available -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager
|
||||
|
||||
# Wait for linstor controller
|
||||
kubectl wait deploy --timeout=5m --for=condition=available -n cozy-linstor linstor-controller
|
||||
|
||||
# Wait for all linstor nodes become Online
|
||||
timeout 60 sh -c 'until [ $(kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor node list | grep -c Online) = 3 ]; do sleep 1; done'
|
||||
|
||||
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv1 /dev/vdc --pool-name data --storage-pool data
|
||||
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv2 /dev/vdc --pool-name data --storage-pool data
|
||||
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv3 /dev/vdc --pool-name data --storage-pool data
|
||||
|
||||
kubectl create -f- <<EOT
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: local
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
provisioner: linstor.csi.linbit.com
|
||||
parameters:
|
||||
linstor.csi.linbit.com/storagePool: "data"
|
||||
linstor.csi.linbit.com/layerList: "storage"
|
||||
linstor.csi.linbit.com/allowRemoteVolumeAccess: "false"
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
allowVolumeExpansion: true
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: replicated
|
||||
provisioner: linstor.csi.linbit.com
|
||||
parameters:
|
||||
linstor.csi.linbit.com/storagePool: "data"
|
||||
linstor.csi.linbit.com/autoPlace: "3"
|
||||
linstor.csi.linbit.com/layerList: "drbd storage"
|
||||
linstor.csi.linbit.com/allowRemoteVolumeAccess: "true"
|
||||
property.linstor.csi.linbit.com/DrbdOptions/auto-quorum: suspend-io
|
||||
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-no-data-accessible: suspend-io
|
||||
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-suspended-primary-outdated: force-secondary
|
||||
property.linstor.csi.linbit.com/DrbdOptions/Net/rr-conflict: retry-connect
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
allowVolumeExpansion: true
|
||||
EOT
|
||||
kubectl create -f- <<EOT
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: L2Advertisement
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-metallb
|
||||
spec:
|
||||
ipAddressPools:
|
||||
- cozystack
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: IPAddressPool
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-metallb
|
||||
spec:
|
||||
addresses:
|
||||
- 192.168.123.200-192.168.123.250
|
||||
autoAssign: true
|
||||
avoidBuggyIPs: false
|
||||
EOT
|
||||
|
||||
# Wait for cozystack-api
|
||||
kubectl wait --for=condition=Available apiservices v1alpha1.apps.cozystack.io --timeout=2m
|
||||
|
||||
kubectl patch -n tenant-root tenants.apps.cozystack.io root --type=merge -p '{"spec":{
|
||||
"host": "example.org",
|
||||
"ingress": true,
|
||||
"monitoring": true,
|
||||
"etcd": true,
|
||||
"isolated": true
|
||||
}}'
|
||||
|
||||
# Wait for HelmRelease be created
|
||||
timeout 60 sh -c 'until kubectl get hr -n tenant-root etcd ingress monitoring tenant-root; do sleep 1; done'
|
||||
|
||||
# Wait for HelmReleases be installed
|
||||
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr etcd ingress tenant-root
|
||||
|
||||
if ! kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr monitoring; then
|
||||
flux reconcile hr monitoring -n tenant-root --force
|
||||
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr monitoring
|
||||
fi
|
||||
|
||||
kubectl patch -n tenant-root ingresses.apps.cozystack.io ingress --type=merge -p '{"spec":{
|
||||
"dashboard": true
|
||||
}}'
|
||||
|
||||
# Wait for nginx-ingress-controller
|
||||
timeout 60 sh -c 'until kubectl get deploy -n tenant-root root-ingress-controller; do sleep 1; done'
|
||||
kubectl wait --timeout=5m --for=condition=available -n tenant-root deploy root-ingress-controller
|
||||
|
||||
# Wait for etcd
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.readyReplicas=3 -n tenant-root sts etcd
|
||||
|
||||
# Wait for Victoria metrics
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.updateStatus=operational -n tenant-root vmalert/vmalert-shortterm vmalertmanager/alertmanager
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.updateStatus=operational -n tenant-root vlogs/generic
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.clusterStatus=operational -n tenant-root vmcluster/shortterm vmcluster/longterm
|
||||
|
||||
# Wait for grafana
|
||||
kubectl wait --timeout=5m --for=condition=ready -n tenant-root clusters.postgresql.cnpg.io grafana-db
|
||||
kubectl wait --timeout=5m --for=condition=available -n tenant-root deploy grafana-deployment
|
||||
|
||||
# Get IP of nginx-ingress
|
||||
ip=$(kubectl get svc -n tenant-root root-ingress-controller -o jsonpath='{.status.loadBalancer.ingress..ip}')
|
||||
|
||||
# Check Grafana
|
||||
curl -sS -k "https://$ip" -H 'Host: grafana.example.org' | grep Found
|
||||
|
||||
|
||||
# Test OIDC
|
||||
kubectl patch -n cozy-system cm/cozystack --type=merge -p '{"data":{
|
||||
"oidc-enabled": "true"
|
||||
}}'
|
||||
|
||||
timeout 120 sh -c 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator; do sleep 1; done'
|
||||
kubectl wait --timeout=10m --for=condition=ready -n cozy-keycloak hr keycloak keycloak-configure keycloak-operator
|
||||
@@ -16,13 +16,15 @@ if [ ! -f "$file" ] || [ ! -s "$file" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
miss_map=$(echo "$new_map" | awk 'NR==FNR { nm[$1 " " $2] = $3; next } { if (!($1 " " $2 in nm)) print $1, $2, $3}' - "$file")
|
||||
miss_map=$(mktemp)
|
||||
trap 'rm -f "$miss_map"' EXIT
|
||||
echo -n "$new_map" | awk 'NR==FNR { nm[$1 " " $2] = $3; next } { if (!($1 " " $2 in nm)) print $1, $2, $3}' - "$file" > $miss_map
|
||||
|
||||
# search accross all tags sorted by version
|
||||
search_commits=$(git ls-remote --tags origin | awk -F/ '$3 ~ /v[0-9]+.[0-9]+.[0-9]+/ {print}' | sort -k2,2 -rV | awk '{print $1}')
|
||||
|
||||
resolved_miss_map=$(
|
||||
echo "$miss_map" | while read -r chart version commit; do
|
||||
while read -r chart version commit; do
|
||||
# if version is found in HEAD, it's HEAD
|
||||
if [ "$(awk '$1 == "version:" {print $2}' ./${chart}/Chart.yaml)" = "${version}" ]; then
|
||||
echo "$chart $version HEAD"
|
||||
@@ -56,7 +58,7 @@ resolved_miss_map=$(
|
||||
fi
|
||||
|
||||
echo "$chart $version $found_tag"
|
||||
done
|
||||
done < $miss_map
|
||||
)
|
||||
|
||||
printf "%s\n" "$new_map" "$resolved_miss_map" | sort -k1,1 -k2,2 -V | awk '$1' > "$file"
|
||||
|
||||
65
hack/package_chart.sh
Executable file
65
hack/package_chart.sh
Executable file
@@ -0,0 +1,65 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
usage() {
|
||||
printf "%s\n" "Usage:" >&2 ;
|
||||
printf -- "%s\n" '---' >&2 ;
|
||||
printf "%s %s\n" "$0" "INPUT_DIR OUTPUT_DIR TMP_DIR [DEPENDENCY_DIR]" >&2 ;
|
||||
printf -- "%s\n" '---' >&2 ;
|
||||
printf "%s\n" "Takes a helm repository from INPUT_DIR, with an optional library repository in" >&2 ;
|
||||
printf "%s\n" "DEPENDENCY_DIR, prepares a view of the git archive at select points in history" >&2 ;
|
||||
printf "%s\n" "in TMP_DIR and packages helm charts, outputting the tarballs to OUTPUT_DIR" >&2 ;
|
||||
}
|
||||
|
||||
if [ "x$(basename $PWD)" != "xpackages" ]
|
||||
then
|
||||
echo "Error: This script must run from the ./packages/ directory" >&2
|
||||
echo >&2
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "x$#" != "x3" ] && [ "x$#" != "x4" ]
|
||||
then
|
||||
echo "Error: This script takes 3 or 4 arguments" >&2
|
||||
echo "Got $# arguments:" "$@" >&2
|
||||
echo >&2
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
input_dir=$1
|
||||
output_dir=$2
|
||||
tmp_dir=$3
|
||||
|
||||
if [ "x$#" = "x4" ]
|
||||
then
|
||||
dependency_dir=$4
|
||||
fi
|
||||
|
||||
rm -rf "${output_dir:?}"
|
||||
mkdir -p "${output_dir}"
|
||||
while read package _ commit
|
||||
do
|
||||
# this lets devs build the packages from a dirty repo for quick local testing
|
||||
if [ "x$commit" = "xHEAD" ]
|
||||
then
|
||||
helm package "${input_dir}/${package}" -d "${output_dir}"
|
||||
continue
|
||||
fi
|
||||
git archive --format tar "${commit}" "${input_dir}/${package}" | tar -xf- -C "${tmp_dir}/"
|
||||
|
||||
# the library chart is not present in older commits and git archive doesn't fail gracefully if the path is not found
|
||||
if [ "x${dependency_dir}" != "x" ] && git ls-tree --name-only "${commit}" "${dependency_dir}" | grep -qx "${dependency_dir}"
|
||||
then
|
||||
git archive --format tar "${commit}" "${dependency_dir}" | tar -xf- -C "${tmp_dir}/"
|
||||
fi
|
||||
helm package "${tmp_dir}/${input_dir}/${package}" -d "${output_dir}"
|
||||
rm -rf "${tmp_dir:?}/${input_dir:?}/${package:?}"
|
||||
if [ "x${dependency_dir}" != "x" ]
|
||||
then
|
||||
rm -rf "${tmp_dir:?}/${dependency_dir:?}"
|
||||
fi
|
||||
done < "${input_dir}/versions_map"
|
||||
helm repo index "${output_dir}"
|
||||
139
internal/controller/system_helm_reconciler.go
Normal file
139
internal/controller/system_helm_reconciler.go
Normal file
@@ -0,0 +1,139 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
helmv2 "github.com/fluxcd/helm-controller/api/v2"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
)
|
||||
|
||||
type CozystackConfigReconciler struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
var configMapNames = []string{"cozystack", "cozystack-branding", "cozystack-scheduling"}
|
||||
|
||||
const configMapNamespace = "cozy-system"
|
||||
const digestAnnotation = "cozystack.io/cozy-config-digest"
|
||||
const forceReconcileKey = "reconcile.fluxcd.io/forceAt"
|
||||
const requestedAt = "reconcile.fluxcd.io/requestedAt"
|
||||
|
||||
func (r *CozystackConfigReconciler) Reconcile(ctx context.Context, _ ctrl.Request) (ctrl.Result, error) {
|
||||
log := log.FromContext(ctx)
|
||||
|
||||
digest, err := r.computeDigest(ctx)
|
||||
if err != nil {
|
||||
log.Error(err, "failed to compute config digest")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
var helmList helmv2.HelmReleaseList
|
||||
if err := r.List(ctx, &helmList); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to list HelmReleases: %w", err)
|
||||
}
|
||||
|
||||
now := time.Now().Format(time.RFC3339Nano)
|
||||
updated := 0
|
||||
|
||||
for _, hr := range helmList.Items {
|
||||
isSystemApp := hr.Labels["cozystack.io/system-app"] == "true"
|
||||
isTenantRoot := hr.Namespace == "tenant-root" && hr.Name == "tenant-root"
|
||||
if !isSystemApp && !isTenantRoot {
|
||||
continue
|
||||
}
|
||||
|
||||
if hr.Annotations == nil {
|
||||
hr.Annotations = map[string]string{}
|
||||
}
|
||||
|
||||
if hr.Annotations[digestAnnotation] == digest {
|
||||
continue
|
||||
}
|
||||
|
||||
patch := client.MergeFrom(hr.DeepCopy())
|
||||
hr.Annotations[digestAnnotation] = digest
|
||||
hr.Annotations[forceReconcileKey] = now
|
||||
hr.Annotations[requestedAt] = now
|
||||
|
||||
if err := r.Patch(ctx, &hr, patch); err != nil {
|
||||
log.Error(err, "failed to patch HelmRelease", "name", hr.Name, "namespace", hr.Namespace)
|
||||
continue
|
||||
}
|
||||
updated++
|
||||
log.Info("patched HelmRelease with new config digest", "name", hr.Name, "namespace", hr.Namespace)
|
||||
}
|
||||
|
||||
log.Info("finished reconciliation", "updatedHelmReleases", updated)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *CozystackConfigReconciler) computeDigest(ctx context.Context) (string, error) {
|
||||
hash := sha256.New()
|
||||
|
||||
for _, name := range configMapNames {
|
||||
var cm corev1.ConfigMap
|
||||
err := r.Get(ctx, client.ObjectKey{Namespace: configMapNamespace, Name: name}, &cm)
|
||||
if err != nil {
|
||||
if kerrors.IsNotFound(err) {
|
||||
continue // ignore missing
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Sort keys for consistent hashing
|
||||
var keys []string
|
||||
for k := range cm.Data {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, k := range keys {
|
||||
v := cm.Data[k]
|
||||
fmt.Fprintf(hash, "%s:%s=%s\n", name, k, v)
|
||||
}
|
||||
}
|
||||
|
||||
return hex.EncodeToString(hash.Sum(nil)), nil
|
||||
}
|
||||
|
||||
func (r *CozystackConfigReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
WithEventFilter(predicate.Funcs{
|
||||
UpdateFunc: func(e event.UpdateEvent) bool {
|
||||
cm, ok := e.ObjectNew.(*corev1.ConfigMap)
|
||||
return ok && cm.Namespace == configMapNamespace && contains(configMapNames, cm.Name)
|
||||
},
|
||||
CreateFunc: func(e event.CreateEvent) bool {
|
||||
cm, ok := e.Object.(*corev1.ConfigMap)
|
||||
return ok && cm.Namespace == configMapNamespace && contains(configMapNames, cm.Name)
|
||||
},
|
||||
DeleteFunc: func(e event.DeleteEvent) bool {
|
||||
cm, ok := e.Object.(*corev1.ConfigMap)
|
||||
return ok && cm.Namespace == configMapNamespace && contains(configMapNames, cm.Name)
|
||||
},
|
||||
}).
|
||||
For(&corev1.ConfigMap{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func contains(slice []string, val string) bool {
|
||||
for _, s := range slice {
|
||||
if s == val {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
158
internal/controller/tenant_helm_reconciler.go
Normal file
158
internal/controller/tenant_helm_reconciler.go
Normal file
@@ -0,0 +1,158 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
e "errors"
|
||||
|
||||
helmv2 "github.com/fluxcd/helm-controller/api/v2"
|
||||
"gopkg.in/yaml.v2"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
)
|
||||
|
||||
type TenantHelmReconciler struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
func (r *TenantHelmReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
hr := &helmv2.HelmRelease{}
|
||||
if err := r.Get(ctx, req.NamespacedName, hr); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
logger.Error(err, "unable to fetch HelmRelease")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(hr.Name, "tenant-") {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if len(hr.Status.Conditions) == 0 || hr.Status.Conditions[0].Type != "Ready" {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if len(hr.Status.History) == 0 {
|
||||
logger.Info("no history in HelmRelease status", "name", hr.Name)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if hr.Status.History[0].Status != "deployed" {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
newDigest := hr.Status.History[0].Digest
|
||||
var hrList helmv2.HelmReleaseList
|
||||
childNamespace := getChildNamespace(hr.Namespace, hr.Name)
|
||||
if childNamespace == "tenant-root" && hr.Name == "tenant-root" {
|
||||
if hr.Spec.Values == nil {
|
||||
logger.Error(e.New("hr.Spec.Values is nil"), "cant annotate tenant-root ns")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
err := annotateTenantRootNs(*hr.Spec.Values, r.Client)
|
||||
if err != nil {
|
||||
logger.Error(err, "cant annotate tenant-root ns")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
logger.Info("namespace 'tenant-root' annotated")
|
||||
}
|
||||
|
||||
if err := r.List(ctx, &hrList, client.InNamespace(childNamespace)); err != nil {
|
||||
logger.Error(err, "unable to list HelmReleases in namespace", "namespace", hr.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
for _, item := range hrList.Items {
|
||||
if item.Name == hr.Name {
|
||||
continue
|
||||
}
|
||||
oldDigest := item.GetAnnotations()["cozystack.io/tenant-config-digest"]
|
||||
if oldDigest == newDigest {
|
||||
continue
|
||||
}
|
||||
patchTarget := item.DeepCopy()
|
||||
|
||||
if patchTarget.Annotations == nil {
|
||||
patchTarget.Annotations = map[string]string{}
|
||||
}
|
||||
ts := time.Now().Format(time.RFC3339Nano)
|
||||
|
||||
patchTarget.Annotations["cozystack.io/tenant-config-digest"] = newDigest
|
||||
patchTarget.Annotations["reconcile.fluxcd.io/forceAt"] = ts
|
||||
patchTarget.Annotations["reconcile.fluxcd.io/requestedAt"] = ts
|
||||
|
||||
patch := client.MergeFrom(item.DeepCopy())
|
||||
if err := r.Patch(ctx, patchTarget, patch); err != nil {
|
||||
logger.Error(err, "failed to patch HelmRelease", "name", patchTarget.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Info("patched HelmRelease with new digest", "name", patchTarget.Name, "digest", newDigest, "version", hr.Status.History[0].Version)
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *TenantHelmReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&helmv2.HelmRelease{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func getChildNamespace(currentNamespace, hrName string) string {
|
||||
tenantName := strings.TrimPrefix(hrName, "tenant-")
|
||||
|
||||
switch {
|
||||
case currentNamespace == "tenant-root" && hrName == "tenant-root":
|
||||
// 1) root tenant inside root namespace
|
||||
return "tenant-root"
|
||||
|
||||
case currentNamespace == "tenant-root":
|
||||
// 2) any other tenant in root namespace
|
||||
return fmt.Sprintf("tenant-%s", tenantName)
|
||||
|
||||
default:
|
||||
// 3) tenant in a dedicated namespace
|
||||
return fmt.Sprintf("%s-%s", currentNamespace, tenantName)
|
||||
}
|
||||
}
|
||||
|
||||
func annotateTenantRootNs(values apiextensionsv1.JSON, c client.Client) error {
|
||||
var data map[string]interface{}
|
||||
if err := yaml.Unmarshal(values.Raw, &data); err != nil {
|
||||
return fmt.Errorf("failed to parse HelmRelease values: %w", err)
|
||||
}
|
||||
|
||||
host, ok := data["host"].(string)
|
||||
if !ok || host == "" {
|
||||
return fmt.Errorf("host field not found or not a string")
|
||||
}
|
||||
|
||||
var ns corev1.Namespace
|
||||
if err := c.Get(context.TODO(), client.ObjectKey{Name: "tenant-root"}, &ns); err != nil {
|
||||
return fmt.Errorf("failed to get namespace tenant-root: %w", err)
|
||||
}
|
||||
|
||||
if ns.Annotations == nil {
|
||||
ns.Annotations = map[string]string{}
|
||||
}
|
||||
ns.Annotations["namespace.cozystack.io/host"] = host
|
||||
|
||||
if err := c.Update(context.TODO(), &ns); err != nil {
|
||||
return fmt.Errorf("failed to update namespace: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -39,6 +39,15 @@ func (r *WorkloadReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c
|
||||
}
|
||||
|
||||
t := getMonitoredObject(w)
|
||||
|
||||
if t == nil {
|
||||
err = r.Delete(ctx, w)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to delete workload")
|
||||
}
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
err = r.Get(ctx, types.NamespacedName{Name: t.GetName(), Namespace: t.GetNamespace()}, t)
|
||||
|
||||
// found object, nothing to do
|
||||
@@ -68,20 +77,23 @@ func (r *WorkloadReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
}
|
||||
|
||||
func getMonitoredObject(w *cozyv1alpha1.Workload) client.Object {
|
||||
if strings.HasPrefix(w.Name, "pvc-") {
|
||||
switch {
|
||||
case strings.HasPrefix(w.Name, "pvc-"):
|
||||
obj := &corev1.PersistentVolumeClaim{}
|
||||
obj.Name = strings.TrimPrefix(w.Name, "pvc-")
|
||||
obj.Namespace = w.Namespace
|
||||
return obj
|
||||
}
|
||||
if strings.HasPrefix(w.Name, "svc-") {
|
||||
case strings.HasPrefix(w.Name, "svc-"):
|
||||
obj := &corev1.Service{}
|
||||
obj.Name = strings.TrimPrefix(w.Name, "svc-")
|
||||
obj.Namespace = w.Namespace
|
||||
return obj
|
||||
case strings.HasPrefix(w.Name, "pod-"):
|
||||
obj := &corev1.Pod{}
|
||||
obj.Name = strings.TrimPrefix(w.Name, "pod-")
|
||||
obj.Namespace = w.Namespace
|
||||
return obj
|
||||
}
|
||||
obj := &corev1.Pod{}
|
||||
obj.Name = w.Name
|
||||
obj.Namespace = w.Namespace
|
||||
var obj client.Object
|
||||
return obj
|
||||
}
|
||||
|
||||
26
internal/controller/workload_controller_test.go
Normal file
26
internal/controller/workload_controller_test.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestUnprefixedMonitoredObjectReturnsNil(t *testing.T) {
|
||||
w := &cozyv1alpha1.Workload{}
|
||||
w.Name = "unprefixed-name"
|
||||
obj := getMonitoredObject(w)
|
||||
if obj != nil {
|
||||
t.Errorf(`getMonitoredObject(&Workload{Name: "%s"}) == %v, want nil`, w.Name, obj)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodMonitoredObject(t *testing.T) {
|
||||
w := &cozyv1alpha1.Workload{}
|
||||
w.Name = "pod-mypod"
|
||||
obj := getMonitoredObject(w)
|
||||
if pod, ok := obj.(*corev1.Pod); !ok || pod.Name != "mypod" {
|
||||
t.Errorf(`getMonitoredObject(&Workload{Name: "%s"}) == %v, want &Pod{Name: "mypod"}`, w.Name, obj)
|
||||
}
|
||||
}
|
||||
@@ -212,15 +212,12 @@ func (r *WorkloadMonitorReconciler) reconcilePodForMonitor(
|
||||
) error {
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
// Combine both init containers and normal containers to sum resources properly
|
||||
combinedContainers := append(pod.Spec.InitContainers, pod.Spec.Containers...)
|
||||
|
||||
// totalResources will store the sum of all container resource limits
|
||||
// totalResources will store the sum of all container resource requests
|
||||
totalResources := make(map[string]resource.Quantity)
|
||||
|
||||
// Iterate over all containers to aggregate their Limits
|
||||
for _, container := range combinedContainers {
|
||||
for name, qty := range container.Resources.Limits {
|
||||
// Iterate over all containers to aggregate their requests
|
||||
for _, container := range pod.Spec.Containers {
|
||||
for name, qty := range container.Resources.Requests {
|
||||
if existing, exists := totalResources[name.String()]; exists {
|
||||
existing.Add(qty)
|
||||
totalResources[name.String()] = existing
|
||||
@@ -249,17 +246,26 @@ func (r *WorkloadMonitorReconciler) reconcilePodForMonitor(
|
||||
|
||||
workload := &cozyv1alpha1.Workload{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pod.Name,
|
||||
Name: fmt.Sprintf("pod-%s", pod.Name),
|
||||
Namespace: pod.Namespace,
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
}
|
||||
|
||||
metaLabels := r.getWorkloadMetadata(&pod)
|
||||
_, err := ctrl.CreateOrUpdate(ctx, r.Client, workload, func() error {
|
||||
// Update owner references with the new monitor
|
||||
updateOwnerReferences(workload.GetObjectMeta(), monitor)
|
||||
|
||||
// Copy labels from the Pod if needed
|
||||
workload.Labels = pod.Labels
|
||||
for k, v := range pod.Labels {
|
||||
workload.Labels[k] = v
|
||||
}
|
||||
|
||||
// Add workload meta to labels
|
||||
for k, v := range metaLabels {
|
||||
workload.Labels[k] = v
|
||||
}
|
||||
|
||||
// Fill Workload status fields:
|
||||
workload.Status.Kind = monitor.Spec.Kind
|
||||
@@ -436,3 +442,12 @@ func mapObjectToMonitor[T client.Object](_ T, c client.Client) func(ctx context.
|
||||
return requests
|
||||
}
|
||||
}
|
||||
|
||||
func (r *WorkloadMonitorReconciler) getWorkloadMetadata(obj client.Object) map[string]string {
|
||||
labels := make(map[string]string)
|
||||
annotations := obj.GetAnnotations()
|
||||
if instanceType, ok := annotations["kubevirt.io/cluster-instancetype-name"]; ok {
|
||||
labels["workloads.cozystack.io/kubevirt-vmi-instance-type"] = instanceType
|
||||
}
|
||||
return labels
|
||||
}
|
||||
|
||||
@@ -1,14 +1,8 @@
|
||||
OUT=../../_out/repos/apps
|
||||
TMP=../../_out/repos/apps/historical
|
||||
OUT=../_out/repos/apps
|
||||
TMP := $(shell mktemp -d)
|
||||
|
||||
repo:
|
||||
rm -rf "$(OUT)"
|
||||
mkdir -p "$(OUT)"
|
||||
awk '$$3 != "HEAD" {print "mkdir -p $(TMP)/" $$1 "-" $$2}' versions_map | sh -ex
|
||||
awk '$$3 != "HEAD" {print "git archive " $$3 " " $$1 " | tar -xf- --strip-components=1 -C $(TMP)/" $$1 "-" $$2 }' versions_map | sh -ex
|
||||
helm package -d "$(OUT)" $$(find . $(TMP) -mindepth 2 -maxdepth 2 -name Chart.yaml | awk 'sub("/Chart.yaml", "")' | sort -V)
|
||||
cd "$(OUT)" && helm repo index . --url http://cozystack.cozy-system.svc/repos/apps
|
||||
rm -rf "$(TMP)"
|
||||
cd .. && ../hack/package_chart.sh apps $(OUT) $(TMP) library
|
||||
|
||||
fix-chartnames:
|
||||
find . -maxdepth 2 -name Chart.yaml | awk -F/ '{print $$2}' | while read i; do sed -i "s/^name: .*/name: $$i/" "$$i/Chart.yaml"; done
|
||||
|
||||
@@ -16,10 +16,10 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.0
|
||||
version: 0.2.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "0.1.0"
|
||||
appVersion: "0.2.0"
|
||||
|
||||
3
packages/apps/bucket/README.md
Normal file
3
packages/apps/bucket/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# S3 bucket
|
||||
|
||||
## Parameters
|
||||
1
packages/apps/bucket/charts/cozy-lib
Symbolic link
1
packages/apps/bucket/charts/cozy-lib
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../library/cozy-lib
|
||||
@@ -18,3 +18,14 @@ rules:
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}-ui
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
subjects:
|
||||
{{ include "cozy-lib.rbac.subjectsForTenantAndAccessLevel" (list "use" .Release.Namespace) }}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
5
packages/apps/bucket/values.schema.json
Normal file
5
packages/apps/bucket/values.schema.json
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"title": "Chart Values",
|
||||
"type": "object",
|
||||
"properties": {}
|
||||
}
|
||||
1
packages/apps/bucket/values.yaml
Normal file
1
packages/apps/bucket/values.yaml
Normal file
@@ -0,0 +1 @@
|
||||
{}
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.7.0
|
||||
version: 0.10.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
CLICKHOUSE_BACKUP_TAG = $(shell awk '$$1 == "version:" {print $$2}' Chart.yaml)
|
||||
CLICKHOUSE_BACKUP_TAG = $(shell awk '$$0 ~ /^version:/ {print $$2}' Chart.yaml)
|
||||
|
||||
include ../../../scripts/common-envs.mk
|
||||
include ../../../scripts/package.mk
|
||||
@@ -7,8 +7,10 @@ generate:
|
||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||
|
||||
image:
|
||||
docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/clickhouse-backup \
|
||||
docker buildx build images/clickhouse-backup \
|
||||
--provenance false \
|
||||
--builder=$(BUILDER) \
|
||||
--platform=$(PLATFORM) \
|
||||
--tag $(REGISTRY)/clickhouse-backup:$(call settag,$(CLICKHOUSE_BACKUP_TAG)) \
|
||||
--cache-from type=registry,ref=$(REGISTRY)/clickhouse-backup:latest \
|
||||
--cache-to type=inline \
|
||||
|
||||
@@ -1,32 +1,35 @@
|
||||
# Managed Clickhouse Service
|
||||
|
||||
ClickHouse is an open source high-performance and column-oriented SQL database management system (DBMS).
|
||||
It is used for online analytical processing (OLAP).
|
||||
Cozystack platform uses Altinity operator to provide ClickHouse.
|
||||
|
||||
### How to restore backup:
|
||||
|
||||
find snapshot:
|
||||
```
|
||||
restic -r s3:s3.example.org/clickhouse-backups/table_name snapshots
|
||||
```
|
||||
1. Find a snapshot:
|
||||
```
|
||||
restic -r s3:s3.example.org/clickhouse-backups/table_name snapshots
|
||||
```
|
||||
|
||||
restore:
|
||||
```
|
||||
restic -r s3:s3.example.org/clickhouse-backups/table_name restore latest --target /tmp/
|
||||
```
|
||||
2. Restore it:
|
||||
```
|
||||
restic -r s3:s3.example.org/clickhouse-backups/table_name restore latest --target /tmp/
|
||||
```
|
||||
|
||||
more details:
|
||||
- https://itnext.io/restic-effective-backup-from-stdin-4bc1e8f083c1
|
||||
For more details, read [Restic: Effective Backup from Stdin](https://blog.aenix.io/restic-effective-backup-from-stdin-4bc1e8f083c1).
|
||||
|
||||
## Parameters
|
||||
|
||||
### Common parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ---------------- | ----------------------------------- | ------ |
|
||||
| `size` | Persistent Volume size | `10Gi` |
|
||||
| `logStorageSize` | Persistent Volume for logs size | `2Gi` |
|
||||
| `shards` | Number of Clickhouse replicas | `1` |
|
||||
| `replicas` | Number of Clickhouse shards | `2` |
|
||||
| `storageClass` | StorageClass used to store the data | `""` |
|
||||
| `logTTL` | for query_log and query_thread_log | `15` |
|
||||
| Name | Description | Value |
|
||||
| ---------------- | -------------------------------------------------------- | ------ |
|
||||
| `size` | Size of Persistent Volume for data | `10Gi` |
|
||||
| `logStorageSize` | Size of Persistent Volume for logs | `2Gi` |
|
||||
| `shards` | Number of Clickhouse shards | `1` |
|
||||
| `replicas` | Number of Clickhouse replicas | `2` |
|
||||
| `storageClass` | StorageClass used to store the data | `""` |
|
||||
| `logTTL` | TTL (expiration time) for query_log and query_thread_log | `15` |
|
||||
|
||||
### Configuration parameters
|
||||
|
||||
@@ -36,15 +39,32 @@ more details:
|
||||
|
||||
### Backup parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ |
|
||||
| `backup.enabled` | Enable pereiodic backups | `false` |
|
||||
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
|
||||
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/clickhouse-backups` |
|
||||
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
|
||||
| `backup.cleanupStrategy` | The strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
|
||||
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
||||
| `resources` | Resources | `{}` |
|
||||
| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | --------------------------------------------------------------------------- | ------------------------------------------------------ |
|
||||
| `backup.enabled` | Enable periodic backups | `false` |
|
||||
| `backup.s3Region` | AWS S3 region where backups are stored | `us-east-1` |
|
||||
| `backup.s3Bucket` | S3 bucket used for storing backups | `s3.example.org/clickhouse-backups` |
|
||||
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
|
||||
| `backup.cleanupStrategy` | Retention strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
|
||||
| `backup.s3AccessKey` | Access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||
| `backup.s3SecretKey` | Secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||
| `backup.resticPassword` | Password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
||||
| `resources` | Explicit CPU/memory resource requests and limits for the Clickhouse service | `{}` |
|
||||
| `resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `nano` |
|
||||
|
||||
|
||||
In production environments, it's recommended to set `resources` explicitly.
|
||||
Example of `resources`:
|
||||
|
||||
```yaml
|
||||
resources:
|
||||
limits:
|
||||
cpu: 4000m
|
||||
memory: 4Gi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 512Mi
|
||||
```
|
||||
|
||||
Allowed values for `resourcesPreset` are `none`, `nano`, `micro`, `small`, `medium`, `large`, `xlarge`, `2xlarge`.
|
||||
This value is ignored if `resources` value is set.
|
||||
|
||||
1
packages/apps/clickhouse/charts/cozy-lib
Symbolic link
1
packages/apps/clickhouse/charts/cozy-lib
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../library/cozy-lib
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/clickhouse-backup:0.7.0@sha256:3faf7a4cebf390b9053763107482de175aa0fdb88c1e77424fd81100b1c3a205
|
||||
ghcr.io/cozystack/cozystack/clickhouse-backup:0.10.0@sha256:3faf7a4cebf390b9053763107482de175aa0fdb88c1e77424fd81100b1c3a205
|
||||
|
||||
@@ -11,35 +11,34 @@ These presets are for basic testing and not meant to be used in production
|
||||
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||
*/}}
|
||||
{{- define "resources.preset" -}}
|
||||
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
|
||||
{{- $presets := dict
|
||||
"nano" (dict
|
||||
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"micro" (dict
|
||||
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"small" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"medium" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"large" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"xlarge" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"2xlarge" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
}}
|
||||
{{- if hasKey $presets .type -}}
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
|
||||
{{- $clusterDomain := (index $cozyConfig.data "cluster-domain") | default "cozy.local" }}
|
||||
{{- $existingSecret := lookup "v1" "Secret" .Release.Namespace (printf "%s-credentials" .Release.Name) }}
|
||||
{{- $passwords := dict }}
|
||||
{{- $users := .Values.users }}
|
||||
@@ -32,7 +34,7 @@ kind: "ClickHouseInstallation"
|
||||
metadata:
|
||||
name: "{{ .Release.Name }}"
|
||||
spec:
|
||||
namespaceDomainPattern: "%s.svc.cozy.local"
|
||||
namespaceDomainPattern: "%s.svc.{{ $clusterDomain }}"
|
||||
defaults:
|
||||
templates:
|
||||
dataVolumeClaimTemplate: data-volume-template
|
||||
@@ -92,6 +94,9 @@ spec:
|
||||
templates:
|
||||
volumeClaimTemplates:
|
||||
- name: data-volume-template
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
@@ -99,6 +104,9 @@ spec:
|
||||
requests:
|
||||
storage: {{ .Values.size }}
|
||||
- name: log-volume-template
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
@@ -107,6 +115,9 @@ spec:
|
||||
storage: {{ .Values.logStorageSize }}
|
||||
podTemplates:
|
||||
- name: clickhouse-per-host
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
@@ -122,9 +133,9 @@ spec:
|
||||
- name: clickhouse
|
||||
image: clickhouse/clickhouse-server:24.9.2.42
|
||||
{{- if .Values.resources }}
|
||||
resources: {{- toYaml .Values.resources | nindent 16 }}
|
||||
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.resources $) | nindent 16 }}
|
||||
{{- else if ne .Values.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.resourcesPreset "Release" .Release) | nindent 16 }}
|
||||
resources: {{- include "cozy-lib.resources.preset" (list .Values.resourcesPreset $) | nindent 16 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: data-volume-template
|
||||
@@ -133,6 +144,9 @@ spec:
|
||||
mountPath: /var/log/clickhouse-server
|
||||
serviceTemplates:
|
||||
- name: svc-template
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
generateName: chendpoint-{chi}
|
||||
spec:
|
||||
ports:
|
||||
|
||||
@@ -24,3 +24,14 @@ rules:
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
subjects:
|
||||
{{ include "cozy-lib.rbac.subjectsForTenantAndAccessLevel" (list "use" .Release.Namespace) }}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
@@ -9,5 +9,5 @@ spec:
|
||||
kind: clickhouse
|
||||
type: clickhouse
|
||||
selector:
|
||||
clickhouse.altinity.com/chi: {{ $.Release.Name }}
|
||||
app.kubernetes.io/instance: {{ $.Release.Name }}
|
||||
version: {{ $.Chart.Version }}
|
||||
|
||||
@@ -4,22 +4,22 @@
|
||||
"properties": {
|
||||
"size": {
|
||||
"type": "string",
|
||||
"description": "Persistent Volume size",
|
||||
"description": "Size of Persistent Volume for data",
|
||||
"default": "10Gi"
|
||||
},
|
||||
"logStorageSize": {
|
||||
"type": "string",
|
||||
"description": "Persistent Volume for logs size",
|
||||
"description": "Size of Persistent Volume for logs",
|
||||
"default": "2Gi"
|
||||
},
|
||||
"shards": {
|
||||
"type": "number",
|
||||
"description": "Number of Clickhouse replicas",
|
||||
"description": "Number of Clickhouse shards",
|
||||
"default": 1
|
||||
},
|
||||
"replicas": {
|
||||
"type": "number",
|
||||
"description": "Number of Clickhouse shards",
|
||||
"description": "Number of Clickhouse replicas",
|
||||
"default": 2
|
||||
},
|
||||
"storageClass": {
|
||||
@@ -29,7 +29,7 @@
|
||||
},
|
||||
"logTTL": {
|
||||
"type": "number",
|
||||
"description": "for query_log and query_thread_log",
|
||||
"description": "TTL (expiration time) for query_log and query_thread_log",
|
||||
"default": 15
|
||||
},
|
||||
"backup": {
|
||||
@@ -37,17 +37,17 @@
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enable pereiodic backups",
|
||||
"description": "Enable periodic backups",
|
||||
"default": false
|
||||
},
|
||||
"s3Region": {
|
||||
"type": "string",
|
||||
"description": "The AWS S3 region where backups are stored",
|
||||
"description": "AWS S3 region where backups are stored",
|
||||
"default": "us-east-1"
|
||||
},
|
||||
"s3Bucket": {
|
||||
"type": "string",
|
||||
"description": "The S3 bucket used for storing backups",
|
||||
"description": "S3 bucket used for storing backups",
|
||||
"default": "s3.example.org/clickhouse-backups"
|
||||
},
|
||||
"schedule": {
|
||||
@@ -57,34 +57,34 @@
|
||||
},
|
||||
"cleanupStrategy": {
|
||||
"type": "string",
|
||||
"description": "The strategy for cleaning up old backups",
|
||||
"description": "Retention strategy for cleaning up old backups",
|
||||
"default": "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
|
||||
},
|
||||
"s3AccessKey": {
|
||||
"type": "string",
|
||||
"description": "The access key for S3, used for authentication",
|
||||
"description": "Access key for S3, used for authentication",
|
||||
"default": "oobaiRus9pah8PhohL1ThaeTa4UVa7gu"
|
||||
},
|
||||
"s3SecretKey": {
|
||||
"type": "string",
|
||||
"description": "The secret key for S3, used for authentication",
|
||||
"description": "Secret key for S3, used for authentication",
|
||||
"default": "ju3eum4dekeich9ahM1te8waeGai0oog"
|
||||
},
|
||||
"resticPassword": {
|
||||
"type": "string",
|
||||
"description": "The password for Restic backup encryption",
|
||||
"description": "Password for Restic backup encryption",
|
||||
"default": "ChaXoveekoh6eigh4siesheeda2quai0"
|
||||
}
|
||||
}
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"description": "Explicit CPU/memory resource requests and limits for the Clickhouse service",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"description": "Use a common resources preset when `resources` is not set explicitly.",
|
||||
"default": "nano"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
## @section Common parameters
|
||||
|
||||
## @param size Persistent Volume size
|
||||
## @param logStorageSize Persistent Volume for logs size
|
||||
## @param shards Number of Clickhouse replicas
|
||||
## @param replicas Number of Clickhouse shards
|
||||
## @param size Size of Persistent Volume for data
|
||||
## @param logStorageSize Size of Persistent Volume for logs
|
||||
## @param shards Number of Clickhouse shards
|
||||
## @param replicas Number of Clickhouse replicas
|
||||
## @param storageClass StorageClass used to store the data
|
||||
## @param logTTL for query_log and query_thread_log
|
||||
## @param logTTL TTL (expiration time) for query_log and query_thread_log
|
||||
##
|
||||
size: 10Gi
|
||||
logStorageSize: 2Gi
|
||||
@@ -29,14 +29,14 @@ users: {}
|
||||
|
||||
## @section Backup parameters
|
||||
|
||||
## @param backup.enabled Enable pereiodic backups
|
||||
## @param backup.s3Region The AWS S3 region where backups are stored
|
||||
## @param backup.s3Bucket The S3 bucket used for storing backups
|
||||
## @param backup.enabled Enable periodic backups
|
||||
## @param backup.s3Region AWS S3 region where backups are stored
|
||||
## @param backup.s3Bucket S3 bucket used for storing backups
|
||||
## @param backup.schedule Cron schedule for automated backups
|
||||
## @param backup.cleanupStrategy The strategy for cleaning up old backups
|
||||
## @param backup.s3AccessKey The access key for S3, used for authentication
|
||||
## @param backup.s3SecretKey The secret key for S3, used for authentication
|
||||
## @param backup.resticPassword The password for Restic backup encryption
|
||||
## @param backup.cleanupStrategy Retention strategy for cleaning up old backups
|
||||
## @param backup.s3AccessKey Access key for S3, used for authentication
|
||||
## @param backup.s3SecretKey Secret key for S3, used for authentication
|
||||
## @param backup.resticPassword Password for Restic backup encryption
|
||||
backup:
|
||||
enabled: false
|
||||
s3Region: us-east-1
|
||||
@@ -47,7 +47,7 @@ backup:
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
|
||||
## @param resources Resources
|
||||
## @param resources Explicit CPU/memory resource requests and limits for the Clickhouse service
|
||||
resources: {}
|
||||
# resources:
|
||||
# limits:
|
||||
@@ -56,6 +56,6 @@ resources: {}
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 512Mi
|
||||
|
||||
## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
|
||||
## @param resourcesPreset Use a common resources preset when `resources` is not set explicitly.
|
||||
resourcesPreset: "nano"
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.5.0
|
||||
version: 0.7.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
1
packages/apps/ferretdb/charts/cozy-lib
Symbolic link
1
packages/apps/ferretdb/charts/cozy-lib
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../library/cozy-lib
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/postgres-backup:0.10.1@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f
|
||||
ghcr.io/cozystack/cozystack/postgres-backup:0.14.0@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f
|
||||
|
||||
@@ -11,35 +11,34 @@ These presets are for basic testing and not meant to be used in production
|
||||
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||
*/}}
|
||||
{{- define "resources.preset" -}}
|
||||
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
|
||||
{{- $presets := dict
|
||||
"nano" (dict
|
||||
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"micro" (dict
|
||||
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"small" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"medium" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"large" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"xlarge" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"2xlarge" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
}}
|
||||
{{- if hasKey $presets .type -}}
|
||||
|
||||
@@ -24,3 +24,14 @@ rules:
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
subjects:
|
||||
{{ include "cozy-lib.rbac.subjectsForTenantAndAccessLevel" (list "use" .Release.Namespace) }}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
@@ -2,6 +2,8 @@ apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ .Release.Name }}
|
||||
labels:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
type: {{ ternary "LoadBalancer" "ClusterIP" .Values.external }}
|
||||
{{- if .Values.external }}
|
||||
|
||||
@@ -12,6 +12,7 @@ spec:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ .Release.Name }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
containers:
|
||||
- name: ferretdb
|
||||
|
||||
@@ -11,14 +11,17 @@ spec:
|
||||
{{- $rawConstraints := get $configMap.data "globalAppTopologySpreadConstraints" }}
|
||||
{{- if $rawConstraints }}
|
||||
{{- $rawConstraints | fromYaml | toYaml | nindent 2 }}
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
cnpg.io/cluster: {{ .Release.Name }}-postgres
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
minSyncReplicas: {{ .Values.quorum.minSyncReplicas }}
|
||||
maxSyncReplicas: {{ .Values.quorum.maxSyncReplicas }}
|
||||
{{- if .Values.resources }}
|
||||
resources: {{- toYaml .Values.resources | nindent 4 }}
|
||||
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.resources $) | nindent 4 }}
|
||||
{{- else if ne .Values.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.resourcesPreset "Release" .Release) | nindent 4 }}
|
||||
resources: {{- include "cozy-lib.resources.preset" (list .Values.resourcesPreset $) | nindent 4 }}
|
||||
{{- end }}
|
||||
monitoring:
|
||||
enablePodMonitor: true
|
||||
@@ -32,6 +35,7 @@ spec:
|
||||
inheritedMetadata:
|
||||
labels:
|
||||
policy.cozystack.io/allow-to-apiserver: "true"
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
|
||||
{{- if .Values.users }}
|
||||
managed:
|
||||
|
||||
@@ -9,5 +9,5 @@ spec:
|
||||
kind: ferretdb
|
||||
type: ferretdb
|
||||
selector:
|
||||
app: {{ $.Release.Name }}
|
||||
app.kubernetes.io/instance: {{ $.Release.Name }}
|
||||
version: {{ $.Chart.Version }}
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.4.0
|
||||
version: 0.5.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -6,8 +6,10 @@ include ../../../scripts/package.mk
|
||||
image: image-nginx
|
||||
|
||||
image-nginx:
|
||||
docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/nginx-cache \
|
||||
docker buildx build images/nginx-cache \
|
||||
--provenance false \
|
||||
--builder=$(BUILDER) \
|
||||
--platform=$(PLATFORM) \
|
||||
--tag $(REGISTRY)/nginx-cache:$(call settag,$(NGINX_CACHE_TAG)) \
|
||||
--cache-from type=registry,ref=$(REGISTRY)/nginx-cache:latest \
|
||||
--cache-to type=inline \
|
||||
|
||||
1
packages/apps/http-cache/charts/cozy-lib
Symbolic link
1
packages/apps/http-cache/charts/cozy-lib
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../library/cozy-lib
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/nginx-cache:0.4.0@sha256:4e1f5153d2673a399b315252238f4dc3eb5d6c59295aef594691710cc5b72eb4
|
||||
ghcr.io/cozystack/cozystack/nginx-cache:0.5.1@sha256:50ac1581e3100bd6c477a71161cb455a341ffaf9e5e2f6086802e4e25271e8af
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM ubuntu:22.04 as stage
|
||||
FROM ubuntu:22.04 AS stage
|
||||
|
||||
ARG NGINX_VERSION=1.25.3
|
||||
ARG IP2LOCATION_C_VERSION=8.6.1
|
||||
@@ -9,11 +9,15 @@ ARG FIFTYONEDEGREES_NGINX_VERSION=3.2.21.1
|
||||
ARG NGINX_CACHE_PURGE_VERSION=2.5.3
|
||||
ARG NGINX_VTS_VERSION=0.2.2
|
||||
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
|
||||
# Install required packages for development
|
||||
RUN apt-get update -q \
|
||||
&& apt-get install -yq \
|
||||
RUN apt update -q \
|
||||
&& apt install -yq --no-install-recommends \
|
||||
ca-certificates \
|
||||
unzip \
|
||||
autoconf \
|
||||
automake \
|
||||
build-essential \
|
||||
libtool \
|
||||
libpcre3 \
|
||||
@@ -68,7 +72,7 @@ RUN checkinstall \
|
||||
--default \
|
||||
--pkgname=ip2location-c \
|
||||
--pkgversion=${IP2LOCATION_C_VERSION} \
|
||||
--pkgarch=amd64 \
|
||||
--pkgarch=${TARGETARCH} \
|
||||
--pkggroup=lib \
|
||||
--pkgsource="https://github.com/chrislim2888/IP2Location-C-Library" \
|
||||
--maintainer="Eduard Generalov <eduard@generalov.net>" \
|
||||
@@ -97,7 +101,7 @@ RUN checkinstall \
|
||||
--default \
|
||||
--pkgname=ip2proxy-c \
|
||||
--pkgversion=${IP2PROXY_C_VERSION} \
|
||||
--pkgarch=amd64 \
|
||||
--pkgarch=${TARGETARCH} \
|
||||
--pkggroup=lib \
|
||||
--pkgsource="https://github.com/ip2location/ip2proxy-c" \
|
||||
--maintainer="Eduard Generalov <eduard@generalov.net>" \
|
||||
@@ -144,7 +148,7 @@ RUN checkinstall \
|
||||
--default \
|
||||
--pkgname=nginx \
|
||||
--pkgversion=$VERS \
|
||||
--pkgarch=amd64 \
|
||||
--pkgarch=${TARGETARCH} \
|
||||
--pkggroup=web \
|
||||
--provides=nginx \
|
||||
--requires=ip2location-c,ip2proxy-c,libssl3,libc-bin,libc6,libzstd1,libpcre++0v5,libpcre16-3,libpcre2-8-0,libpcre3,libpcre32-3,libpcrecpp0v5,libmaxminddb0 \
|
||||
@@ -165,10 +169,9 @@ COPY nginx-reloader.sh /usr/bin/nginx-reloader.sh
|
||||
RUN set -x \
|
||||
&& groupadd --system --gid 101 nginx \
|
||||
&& useradd --system --gid nginx --no-create-home --home /nonexistent --comment "nginx user" --shell /bin/false --uid 101 nginx \
|
||||
&& apt update \
|
||||
&& apt-get install --no-install-recommends --no-install-suggests -y gnupg1 ca-certificates inotify-tools \
|
||||
&& apt -y install /packages/*.deb \
|
||||
&& apt-get clean \
|
||||
&& apt update -q \
|
||||
&& apt install -yq --no-install-recommends --no-install-suggests gnupg1 ca-certificates inotify-tools \
|
||||
&& apt install -y /packages/*.deb \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& mkdir -p /var/lib/nginx /var/log/nginx \
|
||||
&& ln -sf /dev/stdout /var/log/nginx/access.log \
|
||||
|
||||
@@ -11,35 +11,34 @@ These presets are for basic testing and not meant to be used in production
|
||||
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||
*/}}
|
||||
{{- define "resources.preset" -}}
|
||||
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
|
||||
{{- $presets := dict
|
||||
"nano" (dict
|
||||
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"micro" (dict
|
||||
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"small" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"medium" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"large" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"xlarge" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"2xlarge" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
}}
|
||||
{{- if hasKey $presets .type -}}
|
||||
|
||||
@@ -34,9 +34,9 @@ spec:
|
||||
- image: haproxy:latest
|
||||
name: haproxy
|
||||
{{- if .Values.haproxy.resources }}
|
||||
resources: {{- toYaml .Values.haproxy.resources | nindent 10 }}
|
||||
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.haproxy.resources $) | nindent 10 }}
|
||||
{{- else if ne .Values.haproxy.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.haproxy.resourcesPreset "Release" .Release) | nindent 10 }}
|
||||
resources: {{- include "cozy-lib.resources.preset" (list .Values.haproxy.resourcesPreset $) | nindent 10 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
|
||||
@@ -53,9 +53,9 @@ spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
{{- if $.Values.nginx.resources }}
|
||||
resources: {{- toYaml $.Values.nginx.resources | nindent 10 }}
|
||||
resources: {{- include "cozy-lib.resources.sanitize" (list $.Values.nginx.resources $) | nindent 10 }}
|
||||
{{- else if ne $.Values.nginx.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" $.Values.nginx.resourcesPreset "Release" $.Release) | nindent 10 }}
|
||||
resources: {{- include "cozy-lib.resources.preset" (list $.Values.nginx.resourcesPreset $) | nindent 10 }}
|
||||
{{- end }}
|
||||
image: "{{ $.Files.Get "images/nginx-cache.tag" | trim }}"
|
||||
readinessProbe:
|
||||
|
||||
39
packages/apps/http-cache/templates/workloadmonitor.yaml
Normal file
39
packages/apps/http-cache/templates/workloadmonitor.yaml
Normal file
@@ -0,0 +1,39 @@
|
||||
---
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}-haproxy
|
||||
spec:
|
||||
replicas: {{ .Values.haproxy.replicas }}
|
||||
minReplicas: 1
|
||||
kind: http-cache
|
||||
type: http-cache
|
||||
selector:
|
||||
app: {{ $.Release.Name }}-haproxy
|
||||
version: {{ $.Chart.Version }}
|
||||
---
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}-nginx
|
||||
spec:
|
||||
replicas: {{ .Values.nginx.replicas }}
|
||||
minReplicas: 1
|
||||
kind: http-cache
|
||||
type: http-cache
|
||||
selector:
|
||||
app: {{ $.Release.Name }}-nginx-cache
|
||||
version: {{ $.Chart.Version }}
|
||||
---
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicas }}
|
||||
minReplicas: 1
|
||||
kind: http-cache
|
||||
type: http-cache
|
||||
selector:
|
||||
app.kubernetes.io/instance: {{ $.Release.Name }}
|
||||
version: {{ $.Chart.Version }}
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.5.0
|
||||
version: 0.7.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -14,9 +14,9 @@
|
||||
| `zookeeper.replicas` | Number of ZooKeeper replicas | `3` |
|
||||
| `zookeeper.storageClass` | StorageClass used to store the ZooKeeper data | `""` |
|
||||
| `kafka.resources` | Resources | `{}` |
|
||||
| `kafka.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||
| `kafka.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `small` |
|
||||
| `zookeeper.resources` | Resources | `{}` |
|
||||
| `zookeeper.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||
| `zookeeper.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` |
|
||||
|
||||
### Configuration parameters
|
||||
|
||||
|
||||
1
packages/apps/kafka/charts/cozy-lib
Symbolic link
1
packages/apps/kafka/charts/cozy-lib
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../library/cozy-lib
|
||||
@@ -11,35 +11,34 @@ These presets are for basic testing and not meant to be used in production
|
||||
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||
*/}}
|
||||
{{- define "resources.preset" -}}
|
||||
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
|
||||
{{- $presets := dict
|
||||
"nano" (dict
|
||||
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"micro" (dict
|
||||
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"small" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"medium" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"large" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"xlarge" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"2xlarge" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
}}
|
||||
{{- if hasKey $presets .type -}}
|
||||
|
||||
@@ -25,3 +25,14 @@ rules:
|
||||
- {{ .Release.Name }}
|
||||
- {{ $.Release.Name }}-zookeeper
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
subjects:
|
||||
{{ include "cozy-lib.rbac.subjectsForTenantAndAccessLevel" (list "use" .Release.Namespace) }}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
@@ -9,9 +9,9 @@ spec:
|
||||
kafka:
|
||||
replicas: {{ .Values.kafka.replicas }}
|
||||
{{- if .Values.kafka.resources }}
|
||||
resources: {{- toYaml .Values.kafka.resources | nindent 6 }}
|
||||
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.kafka.resources $) | nindent 6 }}
|
||||
{{- else if ne .Values.kafka.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.kafka.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
resources: {{- include "cozy-lib.resources.preset" (list .Values.kafka.resourcesPreset $) | nindent 6 }}
|
||||
{{- end }}
|
||||
listeners:
|
||||
- name: plain
|
||||
@@ -71,9 +71,9 @@ spec:
|
||||
zookeeper:
|
||||
replicas: {{ .Values.zookeeper.replicas }}
|
||||
{{- if .Values.zookeeper.resources }}
|
||||
resources: {{- toYaml .Values.zookeeper.resources | nindent 6 }}
|
||||
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.zookeeper.resources $) | nindent 6 }}
|
||||
{{- else if ne .Values.zookeeper.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.zookeeper.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
resources: {{- include "cozy-lib.resources.preset" (list .Values.zookeeper.resourcesPreset $) | nindent 6 }}
|
||||
{{- end }}
|
||||
storage:
|
||||
type: persistent-claim
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "nano"
|
||||
"default": "small"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -63,7 +63,7 @@
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "nano"
|
||||
"default": "micro"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@@ -25,7 +25,7 @@ kafka:
|
||||
# memory: 512Mi
|
||||
|
||||
## @param kafka.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
resourcesPreset: "nano"
|
||||
resourcesPreset: "small"
|
||||
|
||||
zookeeper:
|
||||
size: 5Gi
|
||||
@@ -42,7 +42,7 @@ zookeeper:
|
||||
# memory: 512Mi
|
||||
|
||||
## @param zookeeper.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
resourcesPreset: "nano"
|
||||
resourcesPreset: "micro"
|
||||
|
||||
## @section Configuration parameters
|
||||
|
||||
|
||||
@@ -16,10 +16,10 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.19.0
|
||||
version: 0.24.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "1.30.1"
|
||||
appVersion: 1.32.4
|
||||
|
||||
@@ -14,8 +14,10 @@ generate:
|
||||
image: image-ubuntu-container-disk image-kubevirt-cloud-provider image-kubevirt-csi-driver image-cluster-autoscaler
|
||||
|
||||
image-ubuntu-container-disk:
|
||||
docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/ubuntu-container-disk \
|
||||
docker buildx build images/ubuntu-container-disk \
|
||||
--provenance false \
|
||||
--builder=$(BUILDER) \
|
||||
--platform=$(PLATFORM) \
|
||||
--build-arg KUBERNETES_VERSION=${KUBERNETES_VERSION} \
|
||||
--tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(KUBERNETES_VERSION)) \
|
||||
--tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(KUBERNETES_VERSION)-$(TAG)) \
|
||||
@@ -30,8 +32,10 @@ image-ubuntu-container-disk:
|
||||
rm -f images/ubuntu-container-disk.json
|
||||
|
||||
image-kubevirt-cloud-provider:
|
||||
docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/kubevirt-cloud-provider \
|
||||
docker buildx build images/kubevirt-cloud-provider \
|
||||
--provenance false \
|
||||
--builder=$(BUILDER) \
|
||||
--platform=$(PLATFORM) \
|
||||
--tag $(REGISTRY)/kubevirt-cloud-provider:$(call settag,$(KUBERNETES_PKG_TAG)) \
|
||||
--tag $(REGISTRY)/kubevirt-cloud-provider:$(call settag,$(KUBERNETES_PKG_TAG)-$(TAG)) \
|
||||
--cache-from type=registry,ref=$(REGISTRY)/kubevirt-cloud-provider:latest \
|
||||
@@ -45,8 +49,10 @@ image-kubevirt-cloud-provider:
|
||||
rm -f images/kubevirt-cloud-provider.json
|
||||
|
||||
image-kubevirt-csi-driver:
|
||||
docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/kubevirt-csi-driver \
|
||||
docker buildx build images/kubevirt-csi-driver \
|
||||
--provenance false \
|
||||
--builder=$(BUILDER) \
|
||||
--platform=$(PLATFORM) \
|
||||
--tag $(REGISTRY)/kubevirt-csi-driver:$(call settag,$(KUBERNETES_PKG_TAG)) \
|
||||
--tag $(REGISTRY)/kubevirt-csi-driver:$(call settag,$(KUBERNETES_PKG_TAG)-$(TAG)) \
|
||||
--cache-from type=registry,ref=$(REGISTRY)/kubevirt-csi-driver:latest \
|
||||
@@ -61,8 +67,10 @@ image-kubevirt-csi-driver:
|
||||
|
||||
|
||||
image-cluster-autoscaler:
|
||||
docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/cluster-autoscaler \
|
||||
docker buildx build images/cluster-autoscaler \
|
||||
--provenance false \
|
||||
--builder=$(BUILDER) \
|
||||
--platform=$(PLATFORM) \
|
||||
--tag $(REGISTRY)/cluster-autoscaler:$(call settag,$(KUBERNETES_PKG_TAG)) \
|
||||
--tag $(REGISTRY)/cluster-autoscaler:$(call settag,$(KUBERNETES_PKG_TAG)-$(TAG)) \
|
||||
--cache-from type=registry,ref=$(REGISTRY)/cluster-autoscaler:latest \
|
||||
|
||||
@@ -1,75 +1,200 @@
|
||||
# Managed Kubernetes Service
|
||||
|
||||
## Overview
|
||||
## Managed Kubernetes in Cozystack
|
||||
|
||||
The Managed Kubernetes Service offers a streamlined solution for efficiently managing server workloads. Kubernetes has emerged as the industry standard, providing a unified and accessible API, primarily utilizing YAML for configuration. This means that teams can easily understand and work with Kubernetes, streamlining infrastructure management.
|
||||
Whenever you want to deploy a custom containerized application in Cozystack, it's best to deploy it to a managed Kubernetes cluster.
|
||||
|
||||
The Kubernetes leverages robust software design patterns, enabling continuous recovery in any scenario through the reconciliation method. Additionally, it ensures seamless scaling across a multitude of servers, addressing the challenges posed by complex and outdated APIs found in traditional virtualization platforms. This managed service eliminates the need for developing custom solutions or modifying source code, saving valuable time and effort.
|
||||
Cozystack deploys and manages Kubernetes-as-a-service as standalone applications within each tenant’s isolated environment.
|
||||
In Cozystack, such clusters are named tenant Kubernetes clusters, while the base Cozystack cluster is called a management or root cluster.
|
||||
Tenant clusters are fully separated from the management cluster and are intended for deploying tenant-specific or customer-developed applications.
|
||||
|
||||
## Deployment Details
|
||||
Within a tenant cluster, users can take advantage of LoadBalancer services and easily provision physical volumes as needed.
|
||||
The control-plane operates within containers, while the worker nodes are deployed as virtual machines, all seamlessly managed by the application.
|
||||
|
||||
The managed Kubernetes service deploys a standard Kubernetes cluster utilizing the Cluster API, Kamaji as control-plane provicer and the KubeVirt infrastructure provider. This ensures a consistent and reliable setup for workloads.
|
||||
## Why Use a Managed Kubernetes Cluster?
|
||||
|
||||
Within this cluster, users can take advantage of LoadBalancer services and easily provision physical volumes as needed. The control-plane operates within containers, while the worker nodes are deployed as virtual machines, all seamlessly managed by the application.
|
||||
Kubernetes has emerged as the industry standard, providing a unified and accessible API, primarily utilizing YAML for configuration.
|
||||
This means that teams can easily understand and work with Kubernetes, streamlining infrastructure management.
|
||||
|
||||
- Docs: https://github.com/clastix/kamaji
|
||||
- Docs: https://cluster-api.sigs.k8s.io/
|
||||
- GitHub: https://github.com/clastix/kamaji
|
||||
- GitHub: https://github.com/kubernetes-sigs/cluster-api-provider-kubevirt
|
||||
- GitHub: https://github.com/kubevirt/csi-driver
|
||||
Kubernetes leverages robust software design patterns, enabling continuous recovery in any scenario through the reconciliation method.
|
||||
Additionally, it ensures seamless scaling across a multitude of servers,
|
||||
addressing the challenges posed by complex and outdated APIs found in traditional virtualization platforms.
|
||||
This managed service eliminates the need for developing custom solutions or modifying source code, saving valuable time and effort.
|
||||
|
||||
The Managed Kubernetes Service in Cozystack offers a streamlined solution for efficiently managing server workloads.
|
||||
|
||||
## How-Tos
|
||||
## Starting Work
|
||||
|
||||
How to access to deployed cluster:
|
||||
Once the tenant Kubernetes cluster is ready, you can get a kubeconfig file to work with it.
|
||||
It can be done via UI or a `kubectl` request:
|
||||
|
||||
```
|
||||
kubectl get secret -n <namespace> kubernetes-<clusterName>-admin-kubeconfig -o go-template='{{ printf "%s\n" (index .data "super-admin.conf" | base64decode) }}' > test
|
||||
```
|
||||
- Open the Cozystack dashboard, switch to your tenant, find and open the application page. Copy one of the config files from the **Secrets** section.
|
||||
- Run the following command (using the management cluster kubeconfig):
|
||||
|
||||
```bash
|
||||
kubectl get secret -n tenant-<name> kubernetes-<clusterName>-admin-kubeconfig -o go-template='{{ printf "%s\n" (index .data "admin.conf" | base64decode) }}' > admin.conf
|
||||
```
|
||||
|
||||
There are several kubeconfig options available:
|
||||
|
||||
- `admin.conf` — The standard kubeconfig for accessing your new cluster.
|
||||
You can create additional Kubernetes users using this configuration.
|
||||
- `admin.svc` — Same token as `admin.conf`, but with the API server address set to the internal service name.
|
||||
Use it for applications running inside the cluster that need API access.
|
||||
- `super-admin.conf` — Similar to `admin.conf`, but with extended administrative permissions.
|
||||
Intended for troubleshooting and cluster maintenance tasks.
|
||||
- `super-admin.svc` — Same as `super-admin.conf`, but pointing to the internal API server address.
|
||||
|
||||
## Implementation Details
|
||||
|
||||
A tenant Kubernetes cluster in Cozystack is essentially Kubernetes-in-Kubernetes.
|
||||
Deploying it involves the following components:
|
||||
|
||||
- **Kamaji Control Plane**: [Kamaji](https://kamaji.clastix.io/) is an open-source project that facilitates the deployment
|
||||
of Kubernetes control planes as pods within a root cluster.
|
||||
Each control plane pod includes essential components like `kube-apiserver`, `controller-manager`, and `scheduler`,
|
||||
allowing for efficient multi-tenancy and resource utilization.
|
||||
|
||||
- **Etcd Cluster**: A dedicated etcd cluster is deployed using Ænix's [etcd-operator](https://github.com/aenix-io/etcd-operator).
|
||||
It provides reliable and scalable key-value storage for the Kubernetes control plane.
|
||||
|
||||
- **Worker Nodes**: Virtual Machines are provisioned to serve as worker nodes using KubeVirt.
|
||||
These nodes are configured to join the tenant Kubernetes cluster, enabling the deployment and management of workloads.
|
||||
|
||||
- **Cluster API**: Cozystack is using the [Kubernetes Cluster API](https://cluster-api.sigs.k8s.io/) to provision the components of a cluster.
|
||||
|
||||
This architecture ensures isolated, scalable, and efficient tenant Kubernetes environments.
|
||||
|
||||
See the reference for components utilized in this service:
|
||||
|
||||
- [Kamaji Control Plane](https://kamaji.clastix.io)
|
||||
- [Kamaji — Cluster API](https://kamaji.clastix.io/cluster-api/)
|
||||
- [github.com/clastix/kamaji](https://github.com/clastix/kamaji)
|
||||
- [KubeVirt](https://kubevirt.io/)
|
||||
- [github.com/kubevirt/kubevirt](https://github.com/kubevirt/kubevirt)
|
||||
- [github.com/aenix-io/etcd-operator](https://github.com/aenix-io/etcd-operator)
|
||||
- [Kubernetes Cluster API](https://cluster-api.sigs.k8s.io/)
|
||||
- [github.com/kubernetes-sigs/cluster-api-provider-kubevirt](https://github.com/kubernetes-sigs/cluster-api-provider-kubevirt)
|
||||
- [github.com/kubevirt/csi-driver](https://github.com/kubevirt/csi-driver)
|
||||
|
||||
## Parameters
|
||||
|
||||
### Common parameters
|
||||
### Common Parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | ------------ |
|
||||
| `host` | The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host). | `""` |
|
||||
| `controlPlane.replicas` | Number of replicas for Kubernetes control-plane components | `2` |
|
||||
| `storageClass` | StorageClass used to store user data | `replicated` |
|
||||
| `nodeGroups` | nodeGroups configuration | `{}` |
|
||||
| Name | Description | Value |
|
||||
| ----------------------------------- | ----------------------------------------------------------------------------------------------------------------- | ------------ |
|
||||
| `host` | Hostname used to access the Kubernetes cluster externally. Defaults to `<cluster-name>.<tenant-host>` when empty. | `""` |
|
||||
| `controlPlane.replicas` | Number of replicas for Kubernetes control-plane components. | `2` |
|
||||
| `storageClass` | StorageClass used to store user data. | `replicated` |
|
||||
| `useCustomSecretForPatchContainerd` | if true, for patch containerd will be used secret: {{ .Release.Name }}-patch-containerd | `false` |
|
||||
| `nodeGroups` | nodeGroups configuration | `{}` |
|
||||
|
||||
### Cluster Addons
|
||||
|
||||
| Name | Description | Value |
|
||||
| --------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| `addons.certManager.enabled` | Enables the cert-manager | `false` |
|
||||
| `addons.certManager.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.ingressNginx.enabled` | Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role) | `false` |
|
||||
| `addons.ingressNginx.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.ingressNginx.hosts` | List of domain names that should be passed through to the cluster by upper cluster | `[]` |
|
||||
| `addons.gpuOperator.enabled` | Enables the gpu-operator | `false` |
|
||||
| `addons.gpuOperator.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.fluxcd.enabled` | Enables Flux CD | `false` |
|
||||
| `addons.fluxcd.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.monitoringAgents.enabled` | Enables MonitoringAgents (fluentbit, vmagents for sending logs and metrics to storage) if tenant monitoring enabled, send to tenant storage, else to root storage | `false` |
|
||||
| `addons.monitoringAgents.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.verticalPodAutoscaler.valuesOverride` | Custom values to override | `{}` |
|
||||
| Name | Description | Value |
|
||||
| --------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| `addons.certManager.enabled` | Enable cert-manager, which automatically creates and manages SSL/TLS certificates. | `false` |
|
||||
| `addons.certManager.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.cilium.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.gatewayAPI.enabled` | Enable the Gateway API | `false` |
|
||||
| `addons.ingressNginx.enabled` | Enable the Ingress-NGINX controller (requires nodes labeled with the 'ingress-nginx' role). | `false` |
|
||||
| `addons.ingressNginx.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.ingressNginx.hosts` | List of domain names that the parent cluster should route to this tenant cluster. | `[]` |
|
||||
| `addons.gpuOperator.enabled` | Enable the GPU-operator | `false` |
|
||||
| `addons.gpuOperator.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.fluxcd.enabled` | Enable FluxCD | `false` |
|
||||
| `addons.fluxcd.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.monitoringAgents.enabled` | Enable monitoring agents (Fluent Bit and VMAgents) to send logs and metrics. If tenant monitoring is enabled, data is sent to tenant storage; otherwise, it goes to root storage. | `false` |
|
||||
| `addons.monitoringAgents.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.verticalPodAutoscaler.valuesOverride` | Custom values to override | `{}` |
|
||||
|
||||
### Kubernetes control plane configuration
|
||||
### Kubernetes Control Plane Configuration
|
||||
|
||||
| Name | Description | Value |
|
||||
| -------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| `controlPlane.apiServer.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `small` |
|
||||
| `controlPlane.apiServer.resources` | Resources | `{}` |
|
||||
| `controlPlane.controllerManager.resources` | Resources | `{}` |
|
||||
| `controlPlane.controllerManager.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` |
|
||||
| `controlPlane.scheduler.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` |
|
||||
| `controlPlane.scheduler.resources` | Resources | `{}` |
|
||||
| `controlPlane.konnectivity.server.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` |
|
||||
| `controlPlane.konnectivity.server.resources` | Resources | `{}` |
|
||||
| Name | Description | Value |
|
||||
| -------------------------------------------------- | ---------------------------------------------------------------------------- | -------- |
|
||||
| `controlPlane.apiServer.resources` | Explicit CPU/memory resource requests and limits for the API server. | `{}` |
|
||||
| `controlPlane.apiServer.resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `medium` |
|
||||
| `controlPlane.controllerManager.resources` | Explicit CPU/memory resource requests and limits for the controller manager. | `{}` |
|
||||
| `controlPlane.controllerManager.resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `micro` |
|
||||
| `controlPlane.scheduler.resources` | Explicit CPU/memory resource requests and limits for the scheduler. | `{}` |
|
||||
| `controlPlane.scheduler.resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `micro` |
|
||||
| `controlPlane.konnectivity.server.resources` | Explicit CPU/memory resource requests and limits for the Konnectivity. | `{}` |
|
||||
| `controlPlane.konnectivity.server.resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `micro` |
|
||||
|
||||
In production environments, it's recommended to set `resources` explicitly.
|
||||
Example of `controlPlane.*.resources`:
|
||||
|
||||
## U Series
|
||||
```yaml
|
||||
resources:
|
||||
limits:
|
||||
cpu: 4000m
|
||||
memory: 4Gi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 512Mi
|
||||
```
|
||||
|
||||
Allowed values for `controlPlane.*.resourcesPreset` are `none`, `nano`, `micro`, `small`, `medium`, `large`, `xlarge`, `2xlarge`.
|
||||
This value is ignored if the corresponding `resources` value is set.
|
||||
|
||||
## Resources Reference
|
||||
|
||||
### instanceType Resources
|
||||
|
||||
The following instanceType resources are provided by Cozystack:
|
||||
|
||||
| Name | vCPUs | Memory |
|
||||
|---------------|-------|--------|
|
||||
| `cx1.2xlarge` | 8 | 16Gi |
|
||||
| `cx1.4xlarge` | 16 | 32Gi |
|
||||
| `cx1.8xlarge` | 32 | 64Gi |
|
||||
| `cx1.large` | 2 | 4Gi |
|
||||
| `cx1.medium` | 1 | 2Gi |
|
||||
| `cx1.xlarge` | 4 | 8Gi |
|
||||
| `gn1.2xlarge` | 8 | 32Gi |
|
||||
| `gn1.4xlarge` | 16 | 64Gi |
|
||||
| `gn1.8xlarge` | 32 | 128Gi |
|
||||
| `gn1.xlarge` | 4 | 16Gi |
|
||||
| `m1.2xlarge` | 8 | 64Gi |
|
||||
| `m1.4xlarge` | 16 | 128Gi |
|
||||
| `m1.8xlarge` | 32 | 256Gi |
|
||||
| `m1.large` | 2 | 16Gi |
|
||||
| `m1.xlarge` | 4 | 32Gi |
|
||||
| `n1.2xlarge` | 16 | 32Gi |
|
||||
| `n1.4xlarge` | 32 | 64Gi |
|
||||
| `n1.8xlarge` | 64 | 128Gi |
|
||||
| `n1.large` | 4 | 8Gi |
|
||||
| `n1.medium` | 4 | 4Gi |
|
||||
| `n1.xlarge` | 8 | 16Gi |
|
||||
| `o1.2xlarge` | 8 | 32Gi |
|
||||
| `o1.4xlarge` | 16 | 64Gi |
|
||||
| `o1.8xlarge` | 32 | 128Gi |
|
||||
| `o1.large` | 2 | 8Gi |
|
||||
| `o1.medium` | 1 | 4Gi |
|
||||
| `o1.micro` | 1 | 1Gi |
|
||||
| `o1.nano` | 1 | 512Mi |
|
||||
| `o1.small` | 1 | 2Gi |
|
||||
| `o1.xlarge` | 4 | 16Gi |
|
||||
| `rt1.2xlarge` | 8 | 32Gi |
|
||||
| `rt1.4xlarge` | 16 | 64Gi |
|
||||
| `rt1.8xlarge` | 32 | 128Gi |
|
||||
| `rt1.large` | 2 | 8Gi |
|
||||
| `rt1.medium` | 1 | 4Gi |
|
||||
| `rt1.micro` | 1 | 1Gi |
|
||||
| `rt1.small` | 1 | 2Gi |
|
||||
| `rt1.xlarge` | 4 | 16Gi |
|
||||
| `u1.2xlarge` | 8 | 32Gi |
|
||||
| `u1.2xmedium` | 2 | 4Gi |
|
||||
| `u1.4xlarge` | 16 | 64Gi |
|
||||
| `u1.8xlarge` | 32 | 128Gi |
|
||||
| `u1.large` | 2 | 8Gi |
|
||||
| `u1.medium` | 1 | 4Gi |
|
||||
| `u1.micro` | 1 | 1Gi |
|
||||
| `u1.nano` | 1 | 512Mi |
|
||||
| `u1.small` | 1 | 2Gi |
|
||||
| `u1.xlarge` | 4 | 16Gi |
|
||||
|
||||
### U Series: Universal
|
||||
|
||||
The U Series is quite neutral and provides resources for
|
||||
general purpose applications.
|
||||
@@ -80,7 +205,7 @@ attitude towards workloads.
|
||||
VMs of instance types will share physical CPU cores on a
|
||||
time-slice basis with other VMs.
|
||||
|
||||
### U Series Characteristics
|
||||
#### U Series Characteristics
|
||||
|
||||
Specific characteristics of this series are:
|
||||
- *Burstable CPU performance* - The workload has a baseline compute
|
||||
@@ -89,14 +214,14 @@ Specific characteristics of this series are:
|
||||
- *vCPU-To-Memory Ratio (1:4)* - A vCPU-to-Memory ratio of 1:4, for less
|
||||
noise per node.
|
||||
|
||||
## O Series
|
||||
### O Series: Overcommitted
|
||||
|
||||
The O Series is based on the U Series, with the only difference
|
||||
being that memory is overcommitted.
|
||||
|
||||
*O* is the abbreviation for "Overcommitted".
|
||||
|
||||
### UO Series Characteristics
|
||||
#### O Series Characteristics
|
||||
|
||||
Specific characteristics of this series are:
|
||||
- *Burstable CPU performance* - The workload has a baseline compute
|
||||
@@ -107,7 +232,7 @@ Specific characteristics of this series are:
|
||||
- *vCPU-To-Memory Ratio (1:4)* - A vCPU-to-Memory ratio of 1:4, for less
|
||||
noise per node.
|
||||
|
||||
## CX Series
|
||||
### CX Series: Compute Exclusive
|
||||
|
||||
The CX Series provides exclusive compute resources for compute
|
||||
intensive applications.
|
||||
@@ -121,7 +246,7 @@ the IO threading from cores dedicated to the workload.
|
||||
In addition, in this series, the NUMA topology of the used
|
||||
cores is provided to the VM.
|
||||
|
||||
### CX Series Characteristics
|
||||
#### CX Series Characteristics
|
||||
|
||||
Specific characteristics of this series are:
|
||||
- *Hugepages* - Hugepages are used in order to improve memory
|
||||
@@ -136,14 +261,14 @@ Specific characteristics of this series are:
|
||||
optimize guest sided cache utilization.
|
||||
- *vCPU-To-Memory Ratio (1:2)* - A vCPU-to-Memory ratio of 1:2.
|
||||
|
||||
## M Series
|
||||
### M Series: Memory
|
||||
|
||||
The M Series provides resources for memory intensive
|
||||
applications.
|
||||
|
||||
*M* is the abbreviation of "Memory".
|
||||
|
||||
### M Series Characteristics
|
||||
#### M Series Characteristics
|
||||
|
||||
Specific characteristics of this series are:
|
||||
- *Hugepages* - Hugepages are used in order to improve memory
|
||||
@@ -154,7 +279,7 @@ Specific characteristics of this series are:
|
||||
- *vCPU-To-Memory Ratio (1:8)* - A vCPU-to-Memory ratio of 1:8, for much
|
||||
less noise per node.
|
||||
|
||||
## RT Series
|
||||
### RT Series: RealTime
|
||||
|
||||
The RT Series provides resources for realtime applications, like Oslat.
|
||||
|
||||
@@ -163,7 +288,7 @@ The RT Series provides resources for realtime applications, like Oslat.
|
||||
This series of instance types requires nodes capable of running
|
||||
realtime applications.
|
||||
|
||||
### RT Series Characteristics
|
||||
#### RT Series Characteristics
|
||||
|
||||
Specific characteristics of this series are:
|
||||
- *Hugepages* - Hugepages are used in order to improve memory
|
||||
@@ -177,57 +302,3 @@ Specific characteristics of this series are:
|
||||
- *vCPU-To-Memory Ratio (1:4)* - A vCPU-to-Memory ratio of 1:4 starting from
|
||||
the medium size.
|
||||
|
||||
## Resources
|
||||
|
||||
The following instancetype resources are provided by Cozystack:
|
||||
|
||||
Name | vCPUs | Memory
|
||||
-----|-------|-------
|
||||
cx1.2xlarge | 8 | 16Gi
|
||||
cx1.4xlarge | 16 | 32Gi
|
||||
cx1.8xlarge | 32 | 64Gi
|
||||
cx1.large | 2 | 4Gi
|
||||
cx1.medium | 1 | 2Gi
|
||||
cx1.xlarge | 4 | 8Gi
|
||||
gn1.2xlarge | 8 | 32Gi
|
||||
gn1.4xlarge | 16 | 64Gi
|
||||
gn1.8xlarge | 32 | 128Gi
|
||||
gn1.xlarge | 4 | 16Gi
|
||||
m1.2xlarge | 8 | 64Gi
|
||||
m1.4xlarge | 16 | 128Gi
|
||||
m1.8xlarge | 32 | 256Gi
|
||||
m1.large | 2 | 16Gi
|
||||
m1.xlarge | 4 | 32Gi
|
||||
n1.2xlarge | 16 | 32Gi
|
||||
n1.4xlarge | 32 | 64Gi
|
||||
n1.8xlarge | 64 | 128Gi
|
||||
n1.large | 4 | 8Gi
|
||||
n1.medium | 4 | 4Gi
|
||||
n1.xlarge | 8 | 16Gi
|
||||
o1.2xlarge | 8 | 32Gi
|
||||
o1.4xlarge | 16 | 64Gi
|
||||
o1.8xlarge | 32 | 128Gi
|
||||
o1.large | 2 | 8Gi
|
||||
o1.medium | 1 | 4Gi
|
||||
o1.micro | 1 | 1Gi
|
||||
o1.nano | 1 | 512Mi
|
||||
o1.small | 1 | 2Gi
|
||||
o1.xlarge | 4 | 16Gi
|
||||
rt1.2xlarge | 8 | 32Gi
|
||||
rt1.4xlarge | 16 | 64Gi
|
||||
rt1.8xlarge | 32 | 128Gi
|
||||
rt1.large | 2 | 8Gi
|
||||
rt1.medium | 1 | 4Gi
|
||||
rt1.micro | 1 | 1Gi
|
||||
rt1.small | 1 | 2Gi
|
||||
rt1.xlarge | 4 | 16Gi
|
||||
u1.2xlarge | 8 | 32Gi
|
||||
u1.2xmedium | 2 | 4Gi
|
||||
u1.4xlarge | 16 | 64Gi
|
||||
u1.8xlarge | 32 | 128Gi
|
||||
u1.large | 2 | 8Gi
|
||||
u1.medium | 1 | 4Gi
|
||||
u1.micro | 1 | 1Gi
|
||||
u1.nano | 1 | 512Mi
|
||||
u1.small | 1 | 2Gi
|
||||
u1.xlarge | 4 | 16Gi
|
||||
|
||||
1
packages/apps/kubernetes/charts/cozy-lib
Symbolic link
1
packages/apps/kubernetes/charts/cozy-lib
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../library/cozy-lib
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.19.0@sha256:85371c6aabf5a7fea2214556deac930c600e362f92673464fe2443784e2869c3
|
||||
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.24.0@sha256:3a8170433e1632e5cc2b6d9db34d0605e8e6c63c158282c38450415e700e932e
|
||||
|
||||
@@ -1,7 +1,14 @@
|
||||
# Source: https://raw.githubusercontent.com/kubernetes/autoscaler/refs/heads/master/cluster-autoscaler/Dockerfile.amd64
|
||||
ARG builder_image=docker.io/library/golang:1.23.4
|
||||
ARG BASEIMAGE=gcr.io/distroless/static:nonroot-amd64
|
||||
ARG BASEIMAGE=gcr.io/distroless/static:nonroot-${TARGETARCH}
|
||||
|
||||
FROM ${builder_image} AS builder
|
||||
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
ENV GOOS=$TARGETOS
|
||||
ENV GOARCH=$TARGETARCH
|
||||
|
||||
RUN git clone https://github.com/kubernetes/autoscaler /src/autoscaler \
|
||||
&& cd /src/autoscaler/cluster-autoscaler \
|
||||
&& git checkout cluster-autoscaler-1.32.0
|
||||
@@ -14,6 +21,8 @@ RUN make build
|
||||
FROM $BASEIMAGE
|
||||
LABEL maintainer="Marcin Wielgus <mwielgus@google.com>"
|
||||
|
||||
COPY --from=builder /src/autoscaler/cluster-autoscaler/cluster-autoscaler-amd64 /cluster-autoscaler
|
||||
ARG TARGETARCH
|
||||
|
||||
COPY --from=builder /src/autoscaler/cluster-autoscaler/cluster-autoscaler-${TARGETARCH} /cluster-autoscaler
|
||||
WORKDIR /
|
||||
CMD ["/cluster-autoscaler"]
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.19.0@sha256:795d8e1ef4b2b0df2aa1e09d96cd13476ebb545b4bf4b5779b7547a70ef64cf9
|
||||
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.24.0@sha256:b478952fab735f85c3ba15835012b1de8af5578b33a8a2670eaf532ffc17681e
|
||||
|
||||
@@ -1,9 +1,14 @@
|
||||
# Source: https://github.com/kubevirt/cloud-provider-kubevirt/blob/main/build/images/kubevirt-cloud-controller-manager/Dockerfile
|
||||
FROM --platform=linux/amd64 golang:1.20.6 AS builder
|
||||
FROM golang:1.20.6 AS builder
|
||||
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
ENV GOOS=$TARGETOS
|
||||
ENV GOARCH=$TARGETARCH
|
||||
|
||||
RUN git clone https://github.com/kubevirt/cloud-provider-kubevirt /go/src/kubevirt.io/cloud-provider-kubevirt \
|
||||
&& cd /go/src/kubevirt.io/cloud-provider-kubevirt \
|
||||
&& git checkout 443a1fe
|
||||
&& git checkout a0acf33
|
||||
|
||||
WORKDIR /go/src/kubevirt.io/cloud-provider-kubevirt
|
||||
|
||||
@@ -14,7 +19,7 @@ RUN go get 'k8s.io/endpointslice/util@v0.28' 'k8s.io/apiserver@v0.28'
|
||||
RUN go mod tidy
|
||||
RUN go mod vendor
|
||||
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -mod=vendor -ldflags="-s -w" -o bin/kubevirt-cloud-controller-manager ./cmd/kubevirt-cloud-controller-manager
|
||||
RUN CGO_ENABLED=0 go build -mod=vendor -ldflags="-s -w" -o bin/kubevirt-cloud-controller-manager ./cmd/kubevirt-cloud-controller-manager
|
||||
|
||||
FROM registry.access.redhat.com/ubi9/ubi-micro
|
||||
COPY --from=builder /go/src/kubevirt.io/cloud-provider-kubevirt/bin/kubevirt-cloud-controller-manager /bin/kubevirt-cloud-controller-manager
|
||||
|
||||
@@ -37,7 +37,7 @@ index 74166b5d9..4e744f8de 100644
|
||||
klog.Infof("Initializing kubevirtEPSController")
|
||||
|
||||
diff --git a/pkg/controller/kubevirteps/kubevirteps_controller.go b/pkg/controller/kubevirteps/kubevirteps_controller.go
|
||||
index 6f6e3d322..b56882c12 100644
|
||||
index 53388eb8e..b56882c12 100644
|
||||
--- a/pkg/controller/kubevirteps/kubevirteps_controller.go
|
||||
+++ b/pkg/controller/kubevirteps/kubevirteps_controller.go
|
||||
@@ -54,10 +54,10 @@ type Controller struct {
|
||||
@@ -286,22 +286,6 @@ index 6f6e3d322..b56882c12 100644
|
||||
for _, eps := range slicesToDelete {
|
||||
err := c.infraClient.DiscoveryV1().EndpointSlices(eps.Namespace).Delete(context.TODO(), eps.Name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
@@ -474,11 +538,11 @@ func (c *Controller) reconcileByAddressType(service *v1.Service, tenantSlices []
|
||||
// Create the desired port configuration
|
||||
var desiredPorts []discovery.EndpointPort
|
||||
|
||||
- for _, port := range service.Spec.Ports {
|
||||
+ for i := range service.Spec.Ports {
|
||||
desiredPorts = append(desiredPorts, discovery.EndpointPort{
|
||||
- Port: &port.TargetPort.IntVal,
|
||||
- Protocol: &port.Protocol,
|
||||
- Name: &port.Name,
|
||||
+ Port: &service.Spec.Ports[i].TargetPort.IntVal,
|
||||
+ Protocol: &service.Spec.Ports[i].Protocol,
|
||||
+ Name: &service.Spec.Ports[i].Name,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -588,55 +652,114 @@ func ownedBy(endpointSlice *discovery.EndpointSlice, svc *v1.Service) bool {
|
||||
return false
|
||||
}
|
||||
@@ -437,18 +421,10 @@ index 6f6e3d322..b56882c12 100644
|
||||
|
||||
return nil
|
||||
diff --git a/pkg/controller/kubevirteps/kubevirteps_controller_test.go b/pkg/controller/kubevirteps/kubevirteps_controller_test.go
|
||||
index 1fb86e25f..14d92d340 100644
|
||||
index 1c97035b4..14d92d340 100644
|
||||
--- a/pkg/controller/kubevirteps/kubevirteps_controller_test.go
|
||||
+++ b/pkg/controller/kubevirteps/kubevirteps_controller_test.go
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
+ "k8s.io/apimachinery/pkg/util/sets"
|
||||
dfake "k8s.io/client-go/dynamic/fake"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/testing"
|
||||
@@ -189,7 +190,7 @@ func setupTestKubevirtEPSController() *testKubevirtEPSController {
|
||||
@@ -190,7 +190,7 @@ func setupTestKubevirtEPSController() *testKubevirtEPSController {
|
||||
}: "VirtualMachineInstanceList",
|
||||
})
|
||||
|
||||
@@ -457,83 +433,87 @@ index 1fb86e25f..14d92d340 100644
|
||||
|
||||
err := controller.Init()
|
||||
if err != nil {
|
||||
@@ -686,5 +687,229 @@ var _ = g.Describe("KubevirtEPSController", g.Ordered, func() {
|
||||
return false, err
|
||||
}).Should(BeTrue(), "EndpointSlice in infra cluster should be recreated by the controller after deletion")
|
||||
})
|
||||
+
|
||||
+ g.It("Should correctly handle multiple unique ports in EndpointSlice", func() {
|
||||
+ // Create a VMI in the infra cluster
|
||||
+ createAndAssertVMI("worker-0-test", "ip-10-32-5-13", "123.45.67.89")
|
||||
+
|
||||
+ // Create an EndpointSlice in the tenant cluster
|
||||
+ createAndAssertTenantSlice("test-epslice", "tenant-service-name", discoveryv1.AddressTypeIPv4,
|
||||
+ *createPort("http", 80, v1.ProtocolTCP),
|
||||
+ []discoveryv1.Endpoint{*createEndpoint("123.45.67.89", "worker-0-test", true, true, false)})
|
||||
+
|
||||
@@ -697,51 +697,43 @@ var _ = g.Describe("KubevirtEPSController", g.Ordered, func() {
|
||||
*createPort("http", 80, v1.ProtocolTCP),
|
||||
[]discoveryv1.Endpoint{*createEndpoint("123.45.67.89", "worker-0-test", true, true, false)})
|
||||
|
||||
- // Define several unique ports for the Service
|
||||
+ // Define multiple ports for the Service
|
||||
+ servicePorts := []v1.ServicePort{
|
||||
+ {
|
||||
servicePorts := []v1.ServicePort{
|
||||
{
|
||||
- Name: "client",
|
||||
- Protocol: v1.ProtocolTCP,
|
||||
- Port: 10001,
|
||||
- TargetPort: intstr.FromInt(30396),
|
||||
- NodePort: 30396,
|
||||
- AppProtocol: nil,
|
||||
+ Name: "client",
|
||||
+ Protocol: v1.ProtocolTCP,
|
||||
+ Port: 10001,
|
||||
+ TargetPort: intstr.FromInt(30396),
|
||||
+ NodePort: 30396,
|
||||
+ },
|
||||
+ {
|
||||
},
|
||||
{
|
||||
- Name: "dashboard",
|
||||
- Protocol: v1.ProtocolTCP,
|
||||
- Port: 8265,
|
||||
- TargetPort: intstr.FromInt(31003),
|
||||
- NodePort: 31003,
|
||||
- AppProtocol: nil,
|
||||
+ Name: "dashboard",
|
||||
+ Protocol: v1.ProtocolTCP,
|
||||
+ Port: 8265,
|
||||
+ TargetPort: intstr.FromInt(31003),
|
||||
+ NodePort: 31003,
|
||||
+ },
|
||||
+ {
|
||||
},
|
||||
{
|
||||
- Name: "metrics",
|
||||
- Protocol: v1.ProtocolTCP,
|
||||
- Port: 8080,
|
||||
- TargetPort: intstr.FromInt(30452),
|
||||
- NodePort: 30452,
|
||||
- AppProtocol: nil,
|
||||
+ Name: "metrics",
|
||||
+ Protocol: v1.ProtocolTCP,
|
||||
+ Port: 8080,
|
||||
+ TargetPort: intstr.FromInt(30452),
|
||||
+ NodePort: 30452,
|
||||
+ },
|
||||
+ }
|
||||
+
|
||||
+ createAndAssertInfraServiceLB("infra-multiport-service", "tenant-service-name", "test-cluster",
|
||||
},
|
||||
}
|
||||
|
||||
- // Create a Service with the first port
|
||||
createAndAssertInfraServiceLB("infra-multiport-service", "tenant-service-name", "test-cluster",
|
||||
- servicePorts[0],
|
||||
- v1.ServiceExternalTrafficPolicyLocal)
|
||||
+ servicePorts[0], v1.ServiceExternalTrafficPolicyLocal)
|
||||
+
|
||||
+ svc, err := testVals.infraClient.CoreV1().Services(infraNamespace).Get(context.TODO(), "infra-multiport-service", metav1.GetOptions{})
|
||||
+ Expect(err).To(BeNil())
|
||||
+
|
||||
+ svc.Spec.Ports = servicePorts
|
||||
+ _, err = testVals.infraClient.CoreV1().Services(infraNamespace).Update(context.TODO(), svc, metav1.UpdateOptions{})
|
||||
+ Expect(err).To(BeNil())
|
||||
+
|
||||
+ var epsListMultiPort *discoveryv1.EndpointSliceList
|
||||
+
|
||||
+ Eventually(func() (bool, error) {
|
||||
+ epsListMultiPort, err = testVals.infraClient.DiscoveryV1().EndpointSlices(infraNamespace).List(context.TODO(), metav1.ListOptions{})
|
||||
+ if len(epsListMultiPort.Items) != 1 {
|
||||
+ return false, err
|
||||
+ }
|
||||
+
|
||||
+ createdSlice := epsListMultiPort.Items[0]
|
||||
+ expectedPortNames := []string{"client", "dashboard", "metrics"}
|
||||
+ foundPortNames := []string{}
|
||||
+
|
||||
+ for _, port := range createdSlice.Ports {
|
||||
+ if port.Name != nil {
|
||||
+ foundPortNames = append(foundPortNames, *port.Name)
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if len(foundPortNames) != len(expectedPortNames) {
|
||||
+ return false, err
|
||||
+ }
|
||||
+
|
||||
+ portSet := sets.NewString(foundPortNames...)
|
||||
+ expectedPortSet := sets.NewString(expectedPortNames...)
|
||||
+ return portSet.Equal(expectedPortSet), err
|
||||
+ }).Should(BeTrue(), "EndpointSlice should contain all unique ports from the Service without duplicates")
|
||||
+ })
|
||||
+
|
||||
|
||||
- // Update the Service by adding the remaining ports
|
||||
svc, err := testVals.infraClient.CoreV1().Services(infraNamespace).Get(context.TODO(), "infra-multiport-service", metav1.GetOptions{})
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
svc.Spec.Ports = servicePorts
|
||||
-
|
||||
_, err = testVals.infraClient.CoreV1().Services(infraNamespace).Update(context.TODO(), svc, metav1.UpdateOptions{})
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
var epsListMultiPort *discoveryv1.EndpointSliceList
|
||||
|
||||
- // Verify that the EndpointSlice is created with correct unique ports
|
||||
Eventually(func() (bool, error) {
|
||||
epsListMultiPort, err = testVals.infraClient.DiscoveryV1().EndpointSlices(infraNamespace).List(context.TODO(), metav1.ListOptions{})
|
||||
if len(epsListMultiPort.Items) != 1 {
|
||||
@@ -758,7 +750,6 @@ var _ = g.Describe("KubevirtEPSController", g.Ordered, func() {
|
||||
}
|
||||
}
|
||||
|
||||
- // Verify that all expected ports are present and without duplicates
|
||||
if len(foundPortNames) != len(expectedPortNames) {
|
||||
return false, err
|
||||
}
|
||||
@@ -769,5 +760,156 @@ var _ = g.Describe("KubevirtEPSController", g.Ordered, func() {
|
||||
}).Should(BeTrue(), "EndpointSlice should contain all unique ports from the Service without duplicates")
|
||||
})
|
||||
|
||||
+ g.It("Should not panic when Service changes to have a non-nil selector, causing EndpointSlice deletion with no new slices to create", func() {
|
||||
+ createAndAssertVMI("worker-0-test", "ip-10-32-5-13", "123.45.67.89")
|
||||
+ createAndAssertTenantSlice("test-epslice", "tenant-service-name", discoveryv1.AddressTypeIPv4,
|
||||
|
||||
@@ -0,0 +1,142 @@
|
||||
diff --git a/pkg/controller/kubevirteps/kubevirteps_controller.go b/pkg/controller/kubevirteps/kubevirteps_controller.go
|
||||
index 53388eb8e..28644236f 100644
|
||||
--- a/pkg/controller/kubevirteps/kubevirteps_controller.go
|
||||
+++ b/pkg/controller/kubevirteps/kubevirteps_controller.go
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
- "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
@@ -669,35 +668,50 @@ func (c *Controller) getDesiredEndpoints(service *v1.Service, tenantSlices []*di
|
||||
for _, slice := range tenantSlices {
|
||||
for _, endpoint := range slice.Endpoints {
|
||||
// find all unique nodes that correspond to an endpoint in a tenant slice
|
||||
+ if endpoint.NodeName == nil {
|
||||
+ klog.Warningf("Skipping endpoint without NodeName in slice %s/%s", slice.Namespace, slice.Name)
|
||||
+ continue
|
||||
+ }
|
||||
nodeSet.Insert(*endpoint.NodeName)
|
||||
}
|
||||
}
|
||||
|
||||
- klog.Infof("Desired nodes for service %s in namespace %s: %v", service.Name, service.Namespace, sets.List(nodeSet))
|
||||
+ klog.Infof("Desired nodes for service %s/%s: %v", service.Namespace, service.Name, sets.List(nodeSet))
|
||||
|
||||
for _, node := range sets.List(nodeSet) {
|
||||
// find vmi for node name
|
||||
- obj := &unstructured.Unstructured{}
|
||||
- vmi := &kubevirtv1.VirtualMachineInstance{}
|
||||
-
|
||||
- obj, err := c.infraDynamic.Resource(kubevirtv1.VirtualMachineInstanceGroupVersionKind.GroupVersion().WithResource("virtualmachineinstances")).Namespace(c.infraNamespace).Get(context.TODO(), node, metav1.GetOptions{})
|
||||
+ obj, err := c.infraDynamic.
|
||||
+ Resource(kubevirtv1.VirtualMachineInstanceGroupVersionKind.GroupVersion().WithResource("virtualmachineinstances")).
|
||||
+ Namespace(c.infraNamespace).
|
||||
+ Get(context.TODO(), node, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
- klog.Errorf("Failed to get VirtualMachineInstance %s in namespace %s:%v", node, c.infraNamespace, err)
|
||||
+ klog.Errorf("Failed to get VMI %q in namespace %q: %v", node, c.infraNamespace, err)
|
||||
continue
|
||||
}
|
||||
|
||||
+ vmi := &kubevirtv1.VirtualMachineInstance{}
|
||||
err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, vmi)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to convert Unstructured to VirtualMachineInstance: %v", err)
|
||||
- klog.Fatal(err)
|
||||
+ continue
|
||||
}
|
||||
|
||||
+ if vmi.Status.NodeName == "" {
|
||||
+ klog.Warningf("Skipping VMI %s/%s: NodeName is empty", vmi.Namespace, vmi.Name)
|
||||
+ continue
|
||||
+ }
|
||||
+ nodeNamePtr := &vmi.Status.NodeName
|
||||
+
|
||||
ready := vmi.Status.Phase == kubevirtv1.Running
|
||||
serving := vmi.Status.Phase == kubevirtv1.Running
|
||||
terminating := vmi.Status.Phase == kubevirtv1.Failed || vmi.Status.Phase == kubevirtv1.Succeeded
|
||||
|
||||
for _, i := range vmi.Status.Interfaces {
|
||||
if i.Name == "default" {
|
||||
+ if i.IP == "" {
|
||||
+ klog.Warningf("VMI %s/%s interface %q has no IP, skipping", vmi.Namespace, vmi.Name, i.Name)
|
||||
+ continue
|
||||
+ }
|
||||
desiredEndpoints = append(desiredEndpoints, &discovery.Endpoint{
|
||||
Addresses: []string{i.IP},
|
||||
Conditions: discovery.EndpointConditions{
|
||||
@@ -705,9 +719,9 @@ func (c *Controller) getDesiredEndpoints(service *v1.Service, tenantSlices []*di
|
||||
Serving: &serving,
|
||||
Terminating: &terminating,
|
||||
},
|
||||
- NodeName: &vmi.Status.NodeName,
|
||||
+ NodeName: nodeNamePtr,
|
||||
})
|
||||
- continue
|
||||
+ break
|
||||
}
|
||||
}
|
||||
}
|
||||
diff --git a/pkg/controller/kubevirteps/kubevirteps_controller_test.go b/pkg/controller/kubevirteps/kubevirteps_controller_test.go
|
||||
index 1c97035b4..d205d0bed 100644
|
||||
--- a/pkg/controller/kubevirteps/kubevirteps_controller_test.go
|
||||
+++ b/pkg/controller/kubevirteps/kubevirteps_controller_test.go
|
||||
@@ -771,3 +771,55 @@ var _ = g.Describe("KubevirtEPSController", g.Ordered, func() {
|
||||
|
||||
})
|
||||
})
|
||||
+
|
||||
+var _ = g.Describe("getDesiredEndpoints", func() {
|
||||
+ g.It("should skip endpoints without NodeName and VMIs without NodeName or IP", func() {
|
||||
+ // Setup controller
|
||||
+ ctrl := setupTestKubevirtEPSController().controller
|
||||
+
|
||||
+ // Manually inject dynamic client content (1 VMI with missing NodeName)
|
||||
+ vmi := createUnstructuredVMINode("vmi-without-node", "", "10.0.0.1") // empty NodeName
|
||||
+ _, err := ctrl.infraDynamic.
|
||||
+ Resource(kubevirtv1.VirtualMachineInstanceGroupVersionKind.GroupVersion().WithResource("virtualmachineinstances")).
|
||||
+ Namespace(infraNamespace).
|
||||
+ Create(context.TODO(), vmi, metav1.CreateOptions{})
|
||||
+ Expect(err).To(BeNil())
|
||||
+
|
||||
+ // Create service
|
||||
+ svc := createInfraServiceLB("test-svc", "test-svc", "test-cluster",
|
||||
+ v1.ServicePort{
|
||||
+ Name: "http",
|
||||
+ Port: 80,
|
||||
+ TargetPort: intstr.FromInt(8080),
|
||||
+ Protocol: v1.ProtocolTCP,
|
||||
+ },
|
||||
+ v1.ServiceExternalTrafficPolicyLocal,
|
||||
+ )
|
||||
+
|
||||
+ // One endpoint has nil NodeName, another is valid
|
||||
+ nodeName := "vmi-without-node"
|
||||
+ tenantSlice := &discoveryv1.EndpointSlice{
|
||||
+ ObjectMeta: metav1.ObjectMeta{
|
||||
+ Name: "slice",
|
||||
+ Namespace: tenantNamespace,
|
||||
+ Labels: map[string]string{
|
||||
+ discoveryv1.LabelServiceName: "test-svc",
|
||||
+ },
|
||||
+ },
|
||||
+ AddressType: discoveryv1.AddressTypeIPv4,
|
||||
+ Endpoints: []discoveryv1.Endpoint{
|
||||
+ { // should be skipped due to nil NodeName
|
||||
+ Addresses: []string{"10.1.1.1"},
|
||||
+ NodeName: nil,
|
||||
+ },
|
||||
+ { // will hit VMI without NodeName and also be skipped
|
||||
+ Addresses: []string{"10.1.1.2"},
|
||||
+ NodeName: &nodeName,
|
||||
+ },
|
||||
+ },
|
||||
+ }
|
||||
+
|
||||
+ endpoints := ctrl.getDesiredEndpoints(svc, []*discoveryv1.EndpointSlice{tenantSlice})
|
||||
+ Expect(endpoints).To(HaveLen(0), "Expected no endpoints due to missing NodeName or IP")
|
||||
+ })
|
||||
+})
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.19.0@sha256:5717919c75e609902c6d67138311a2a8fd07be822e2173f3802b67cf5f3486e9
|
||||
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.24.0@sha256:4d3728b2050d4e0adb00b9f4abbb4a020b29e1a39f24ca1447806fc81f110fa6
|
||||
|
||||
@@ -5,6 +5,11 @@ RUN git clone https://github.com/kubevirt/csi-driver /src/kubevirt-csi-driver \
|
||||
&& cd /src/kubevirt-csi-driver \
|
||||
&& git checkout 35836e0c8b68d9916d29a838ea60cdd3fc6199cf
|
||||
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
ENV GOOS=$TARGETOS
|
||||
ENV GOARCH=$TARGETARCH
|
||||
|
||||
WORKDIR /src/kubevirt-csi-driver
|
||||
RUN make build
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/ubuntu-container-disk:v1.32@sha256:4a4f8bee150e04d1efcd5ff1ea83e12f495a98851cc5fd47ef41ac7aebce9b74
|
||||
ghcr.io/cozystack/cozystack/ubuntu-container-disk:v1.32@sha256:e53f2394c7aa76ad10818ffb945e40006cd77406999e47e036d41b8b0bf094cc
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# TODO: Here we use ubuntu:22.04, as guestfish has some network issues running in ubuntu:24.04
|
||||
FROM ubuntu:22.04 as guestfish
|
||||
FROM ubuntu:22.04 AS guestfish
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt-get update \
|
||||
@@ -8,15 +8,17 @@ RUN apt-get update \
|
||||
linux-image-generic \
|
||||
wget \
|
||||
make \
|
||||
bash-completion \
|
||||
&& apt-get clean
|
||||
bash-completion
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
FROM guestfish as builder
|
||||
FROM guestfish AS builder
|
||||
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
|
||||
# noble is a code name for the Ubuntu 24.04 LTS release
|
||||
RUN wget -O image.img https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img --show-progress --output-file /dev/stdout --progress=dot:giga 2>/dev/null
|
||||
RUN wget -O image.img https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-${TARGETARCH}.img --show-progress --output-file /dev/stdout --progress=dot:giga 2>/dev/null
|
||||
|
||||
ARG KUBERNETES_VERSION
|
||||
|
||||
@@ -29,19 +31,21 @@ RUN qemu-img resize image.img 5G \
|
||||
&& guestfish --remote command "resize2fs /dev/sda1" \
|
||||
# docker repo
|
||||
&& guestfish --remote sh "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg" \
|
||||
&& guestfish --remote sh 'echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list' \
|
||||
&& guestfish --remote sh 'echo "deb [signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list' \
|
||||
# kubernetes repo
|
||||
&& guestfish --remote sh "curl -fsSL https://pkgs.k8s.io/core:/stable:/${KUBERNETES_VERSION}/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg" \
|
||||
&& guestfish --remote sh "echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/${KUBERNETES_VERSION}/deb/ /' | tee /etc/apt/sources.list.d/kubernetes.list" \
|
||||
&& guestfish --remote command "apt-get check -q" \
|
||||
# install containerd
|
||||
&& guestfish --remote command "apt-get update -y" \
|
||||
&& guestfish --remote command "apt-get install -y containerd.io" \
|
||||
&& guestfish --remote command "apt-get update -q" \
|
||||
&& guestfish --remote command "apt-get install -yq containerd.io" \
|
||||
# configure containerd
|
||||
&& guestfish --remote command "mkdir -p /etc/containerd" \
|
||||
&& guestfish --remote sh "containerd config default | tee /etc/containerd/config.toml" \
|
||||
&& guestfish --remote command "sed -i '/SystemdCgroup/ s/=.*/= true/' /etc/containerd/config.toml" \
|
||||
&& guestfish --remote command "containerd config dump >/dev/null" \
|
||||
# install kubernetes
|
||||
&& guestfish --remote command "apt-get install -y kubelet kubeadm" \
|
||||
&& guestfish --remote command "apt-get install -yq kubelet kubeadm" \
|
||||
# clean apt cache
|
||||
&& guestfish --remote sh 'apt-get clean && rm -rf /var/lib/apt/lists/*' \
|
||||
# write system configuration
|
||||
|
||||
@@ -11,35 +11,34 @@ These presets are for basic testing and not meant to be used in production
|
||||
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||
*/}}
|
||||
{{- define "resources.preset" -}}
|
||||
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
|
||||
{{- $presets := dict
|
||||
"nano" (dict
|
||||
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"micro" (dict
|
||||
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"small" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"medium" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"large" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"xlarge" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"2xlarge" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
}}
|
||||
{{- if hasKey $presets .type -}}
|
||||
|
||||
@@ -31,6 +31,16 @@ spec:
|
||||
{{- end }}
|
||||
cluster.x-k8s.io/deployment-name: {{ $.Release.Name }}-{{ .groupName }}
|
||||
spec:
|
||||
{{- $configMap := lookup "v1" "ConfigMap" "cozy-system" "cozystack-scheduling" }}
|
||||
{{- if $configMap }}
|
||||
{{- $rawConstraints := get $configMap.data "globalAppTopologySpreadConstraints" }}
|
||||
{{- if $rawConstraints }}
|
||||
{{- $rawConstraints | fromYaml | toYaml | nindent 10 }}
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
cluster.x-k8s.io/cluster-name: {{ $.Release.Name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
domain:
|
||||
{{- if and .group.resources .group.resources.cpu }}
|
||||
cpu:
|
||||
@@ -111,21 +121,21 @@ metadata:
|
||||
spec:
|
||||
apiServer:
|
||||
{{- if .Values.controlPlane.apiServer.resources }}
|
||||
resources: {{- toYaml .Values.controlPlane.apiServer.resources | nindent 6 }}
|
||||
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.controlPlane.apiServer.resources $) | nindent 6 }}
|
||||
{{- else if ne .Values.controlPlane.apiServer.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.controlPlane.apiServer.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
resources: {{- include "cozy-lib.resources.preset" (list .Values.controlPlane.apiServer.resourcesPreset $) | nindent 6 }}
|
||||
{{- end }}
|
||||
controllerManager:
|
||||
{{- if .Values.controlPlane.controllerManager.resources }}
|
||||
resources: {{- toYaml .Values.controlPlane.controllerManager.resources | nindent 6 }}
|
||||
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.controlPlane.controllerManager.resources $) | nindent 6 }}
|
||||
{{- else if ne .Values.controlPlane.controllerManager.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.controlPlane.controllerManager.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
resources: {{- include "cozy-lib.resources.preset" (list .Values.controlPlane.controllerManager.resourcesPreset $) | nindent 6 }}
|
||||
{{- end }}
|
||||
scheduler:
|
||||
{{- if .Values.controlPlane.scheduler.resources }}
|
||||
resources: {{- toYaml .Values.controlPlane.scheduler.resources | nindent 6 }}
|
||||
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.controlPlane.scheduler.resources $) | nindent 6 }}
|
||||
{{- else if ne .Values.controlPlane.scheduler.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.controlPlane.scheduler.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
resources: {{- include "cozy-lib.resources.preset" (list .Values.controlPlane.scheduler.resourcesPreset $) | nindent 6 }}
|
||||
{{- end }}
|
||||
dataStoreName: "{{ $etcd }}"
|
||||
addons:
|
||||
@@ -136,9 +146,9 @@ spec:
|
||||
server:
|
||||
port: 8132
|
||||
{{- if .Values.controlPlane.konnectivity.server.resources }}
|
||||
resources: {{- toYaml .Values.controlPlane.konnectivity.server.resources | nindent 10 }}
|
||||
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.controlPlane.konnectivity.server.resources $) | nindent 10 }}
|
||||
{{- else if ne .Values.controlPlane.konnectivity.server.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.controlPlane.konnectivity.server.resourcesPreset "Release" .Release) | nindent 10 }}
|
||||
resources: {{- include "cozy-lib.resources.preset" (list .Values.controlPlane.konnectivity.server.resourcesPreset $) | nindent 10 }}
|
||||
{{- end }}
|
||||
kubelet:
|
||||
cgroupfs: systemd
|
||||
@@ -150,14 +160,14 @@ spec:
|
||||
ingress:
|
||||
extraAnnotations:
|
||||
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
|
||||
hostname: {{ .Values.host | default (printf "%s.%s" .Release.Name $host) }}
|
||||
hostname: {{ .Values.host | default (printf "%s.%s" .Release.Name $host) }}:443
|
||||
className: "{{ $ingress }}"
|
||||
deployment:
|
||||
podAdditionalMetadata:
|
||||
labels:
|
||||
policy.cozystack.io/allow-to-etcd: "true"
|
||||
replicas: 2
|
||||
version: 1.30.1
|
||||
version: {{ $.Chart.AppVersion }}
|
||||
---
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
@@ -201,12 +211,25 @@ spec:
|
||||
- ["LABEL=ephemeral", "/ephemeral"]
|
||||
- ["/ephemeral/kubelet", "/var/lib/kubelet", "none", "bind,nofail"]
|
||||
- ["/ephemeral/containerd", "/var/lib/containerd", "none", "bind,nofail"]
|
||||
{{- $sec := lookup "v1" "Secret" $.Release.Namespace (printf "%s-patch-containerd" $.Release.Name) }}
|
||||
{{- if $sec }}
|
||||
files:
|
||||
{{- range $key, $_ := $sec.data }}
|
||||
- path: /etc/containerd/certs.d/{{ trimSuffix ".toml" $key }}/hosts.toml
|
||||
contentFrom:
|
||||
secret:
|
||||
name: {{ $.Release.Name }}-patch-containerd
|
||||
key: {{ $key }}
|
||||
permissions: "0400"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
preKubeadmCommands:
|
||||
- sed -i 's|root:x:|root::|' /etc/passwd
|
||||
- systemctl stop containerd.service
|
||||
- mkdir -p /ephemeral/kubelet /ephemeral/containerd
|
||||
- mount -o bind /ephemeral/kubelet /var/lib/kubelet
|
||||
- mount -o bind /ephemeral/containerd /var/lib/containerd
|
||||
- sudo sed -i '/\[plugins."io.containerd.grpc.v1.cri".registry\]/,/^\[/ s|^\(\s*config_path\s*=\s*\).*|\1"/etc/containerd/certs.d"|' /etc/containerd/config.toml
|
||||
- systemctl start containerd.service
|
||||
joinConfiguration:
|
||||
nodeRegistration:
|
||||
@@ -283,7 +306,7 @@ spec:
|
||||
kind: KubevirtMachineTemplate
|
||||
name: {{ $.Release.Name }}-{{ $groupName }}-{{ $kubevirtmachinetemplateHash }}
|
||||
namespace: {{ $.Release.Namespace }}
|
||||
version: v1.32.3
|
||||
version: v{{ $.Chart.AppVersion }}
|
||||
---
|
||||
apiVersion: cluster.x-k8s.io/v1beta1
|
||||
kind: MachineHealthCheck
|
||||
|
||||
@@ -0,0 +1,15 @@
|
||||
{{- if not .Values.useCustomSecretForPatchContainerd }}
|
||||
{{- $sourceSecret := lookup "v1" "Secret" "cozy-system" "patch-containerd" }}
|
||||
{{- if $sourceSecret }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-patch-containerd
|
||||
namespace: {{ .Release.Namespace }}
|
||||
type: {{ $sourceSecret.type }}
|
||||
data:
|
||||
{{- range $key, $value := $sourceSecret.data }}
|
||||
{{ printf "%s: %s" $key ($value | quote) | indent 2 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -34,3 +34,14 @@ rules:
|
||||
- {{ $.Release.Name }}-{{ $groupName }}
|
||||
{{- end }}
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
subjects:
|
||||
{{ include "cozy-lib.rbac.subjectsForTenantAndAccessLevel" (list "use" .Release.Namespace) }}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
@@ -1,3 +1,18 @@
|
||||
{{- define "cozystack.defaultCiliumValues" -}}
|
||||
cilium:
|
||||
k8sServiceHost: {{ .Release.Name }}.{{ .Release.Namespace }}.svc
|
||||
k8sServicePort: 6443
|
||||
routingMode: tunnel
|
||||
enableIPv4Masquerade: true
|
||||
ipv4NativeRoutingCIDR: ""
|
||||
{{- if $.Values.addons.gatewayAPI.enabled }}
|
||||
gatewayAPI:
|
||||
enabled: true
|
||||
envoy:
|
||||
enabled: true
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
@@ -31,14 +46,13 @@ spec:
|
||||
remediation:
|
||||
retries: -1
|
||||
values:
|
||||
cilium:
|
||||
k8sServiceHost: {{ .Release.Name }}.{{ .Release.Namespace }}.svc
|
||||
k8sServicePort: 6443
|
||||
routingMode: tunnel
|
||||
enableIPv4Masquerade: true
|
||||
ipv4NativeRoutingCIDR: ""
|
||||
{{- toYaml (deepCopy .Values.addons.cilium.valuesOverride | mergeOverwrite (fromYaml (include "cozystack.defaultCiliumValues" .))) | nindent 4 }}
|
||||
dependsOn:
|
||||
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
|
||||
- name: {{ .Release.Name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
{{- if $.Values.addons.gatewayAPI.enabled }}
|
||||
- name: {{ .Release.Name }}-gateway-api-crds
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
|
||||
@@ -30,6 +30,7 @@ spec:
|
||||
patch
|
||||
helmrelease
|
||||
{{ .Release.Name }}-cilium
|
||||
{{ .Release.Name }}-gateway-api-crds
|
||||
{{ .Release.Name }}-csi
|
||||
{{ .Release.Name }}-cert-manager
|
||||
{{ .Release.Name }}-cert-manager-crds
|
||||
|
||||
@@ -0,0 +1,39 @@
|
||||
{{- if $.Values.addons.gatewayAPI.enabled }}
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-gateway-api-crds
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: gateway-api-crds
|
||||
chart:
|
||||
spec:
|
||||
chart: cozy-gateway-api-crds
|
||||
reconcileStrategy: Revision
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '>= 0.0.0-0'
|
||||
kubeConfig:
|
||||
secretRef:
|
||||
name: {{ .Release.Name }}-admin-kubeconfig
|
||||
key: super-admin.svc
|
||||
targetNamespace: kube-system
|
||||
storageNamespace: kube-system
|
||||
install:
|
||||
createNamespace: false
|
||||
remediation:
|
||||
retries: -1
|
||||
upgrade:
|
||||
remediation:
|
||||
retries: -1
|
||||
dependsOn:
|
||||
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
|
||||
- name: {{ .Release.Name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -6,6 +6,11 @@ ingress-nginx:
|
||||
hostNetwork: true
|
||||
service:
|
||||
enabled: false
|
||||
{{- if not .Values.addons.certManager.enabled }}
|
||||
admissionWebhooks:
|
||||
certManager:
|
||||
enabled: false
|
||||
{{- end }}
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/ingress-nginx: ""
|
||||
{{- end }}
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
{{- define "cozystack.defaultVPAValues" -}}
|
||||
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
|
||||
{{- $clusterDomain := (index $cozyConfig.data "cluster-domain") | default "cozy.local" }}
|
||||
{{- $myNS := lookup "v1" "Namespace" "" .Release.Namespace }}
|
||||
{{- $targetTenant := index $myNS.metadata.annotations "namespace.cozystack.io/monitoring" }}
|
||||
vertical-pod-autoscaler:
|
||||
@@ -13,7 +15,7 @@ vertical-pod-autoscaler:
|
||||
metric-for-pod-labels: kube_pod_labels{job="kube-state-metrics", tenant="{{ .Release.Namespace }}", cluster="{{ .Release.Name }}"}[8d]
|
||||
pod-name-label: pod
|
||||
pod-namespace-label: namespace
|
||||
prometheus-address: http://vmselect-shortterm.{{ $targetTenant }}.svc.cozy.local:8481/select/0/prometheus/
|
||||
prometheus-address: http://vmselect-shortterm.{{ $targetTenant }}.svc.{{ $clusterDomain }}:8481/select/0/prometheus/
|
||||
prometheus-cadvisor-job-name: cadvisor
|
||||
resources:
|
||||
limits:
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"properties": {
|
||||
"host": {
|
||||
"type": "string",
|
||||
"description": "The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host).",
|
||||
"description": "Hostname used to access the Kubernetes cluster externally. Defaults to `<cluster-name>.<tenant-host>` when empty.",
|
||||
"default": ""
|
||||
},
|
||||
"controlPlane": {
|
||||
@@ -12,16 +12,21 @@
|
||||
"properties": {
|
||||
"replicas": {
|
||||
"type": "number",
|
||||
"description": "Number of replicas for Kubernetes control-plane components",
|
||||
"description": "Number of replicas for Kubernetes control-plane components.",
|
||||
"default": 2
|
||||
},
|
||||
"apiServer": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Explicit CPU/memory resource requests and limits for the API server.",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "small",
|
||||
"description": "Use a common resources preset when `resources` is not set explicitly.",
|
||||
"default": "medium",
|
||||
"enum": [
|
||||
"none",
|
||||
"nano",
|
||||
@@ -32,11 +37,6 @@
|
||||
"xlarge",
|
||||
"2xlarge"
|
||||
]
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -45,12 +45,12 @@
|
||||
"properties": {
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"description": "Explicit CPU/memory resource requests and limits for the controller manager.",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"description": "Use a common resources preset when `resources` is not set explicitly.",
|
||||
"default": "micro",
|
||||
"enum": [
|
||||
"none",
|
||||
@@ -68,9 +68,14 @@
|
||||
"scheduler": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Explicit CPU/memory resource requests and limits for the scheduler.",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"description": "Use a common resources preset when `resources` is not set explicitly.",
|
||||
"default": "micro",
|
||||
"enum": [
|
||||
"none",
|
||||
@@ -82,11 +87,6 @@
|
||||
"xlarge",
|
||||
"2xlarge"
|
||||
]
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -96,9 +96,14 @@
|
||||
"server": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Explicit CPU/memory resource requests and limits for the Konnectivity.",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"description": "Use a common resources preset when `resources` is not set explicitly.",
|
||||
"default": "micro",
|
||||
"enum": [
|
||||
"none",
|
||||
@@ -110,11 +115,6 @@
|
||||
"xlarge",
|
||||
"2xlarge"
|
||||
]
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -124,9 +124,14 @@
|
||||
},
|
||||
"storageClass": {
|
||||
"type": "string",
|
||||
"description": "StorageClass used to store user data",
|
||||
"description": "StorageClass used to store user data.",
|
||||
"default": "replicated"
|
||||
},
|
||||
"useCustomSecretForPatchContainerd": {
|
||||
"type": "boolean",
|
||||
"description": "if true, for patch containerd will be used secret: {{ .Release.Name }}-patch-containerd",
|
||||
"default": false
|
||||
},
|
||||
"addons": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -135,7 +140,7 @@
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enables the cert-manager",
|
||||
"description": "Enable cert-manager, which automatically creates and manages SSL/TLS certificates.",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
@@ -145,12 +150,32 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"cilium": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"valuesOverride": {
|
||||
"type": "object",
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"gatewayAPI": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enable the Gateway API",
|
||||
"default": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"ingressNginx": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role)",
|
||||
"description": "Enable the Ingress-NGINX controller (requires nodes labeled with the 'ingress-nginx' role).",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
@@ -160,7 +185,7 @@
|
||||
},
|
||||
"hosts": {
|
||||
"type": "array",
|
||||
"description": "List of domain names that should be passed through to the cluster by upper cluster",
|
||||
"description": "List of domain names that the parent cluster should route to this tenant cluster.",
|
||||
"default": [],
|
||||
"items": {}
|
||||
}
|
||||
@@ -171,7 +196,7 @@
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enables the gpu-operator",
|
||||
"description": "Enable the GPU-operator",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
@@ -186,7 +211,7 @@
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enables Flux CD",
|
||||
"description": "Enable FluxCD",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
@@ -201,7 +226,7 @@
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enables MonitoringAgents (fluentbit, vmagents for sending logs and metrics to storage) if tenant monitoring enabled, send to tenant storage, else to root storage",
|
||||
"description": "Enable monitoring agents (Fluent Bit and VMAgents) to send logs and metrics. If tenant monitoring is enabled, data is sent to tenant storage; otherwise, it goes to root storage.",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
## @section Common parameters
|
||||
## @section Common Parameters
|
||||
|
||||
## @param host The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host).
|
||||
## @param controlPlane.replicas Number of replicas for Kubernetes control-plane components
|
||||
## @param storageClass StorageClass used to store user data
|
||||
## @param host Hostname used to access the Kubernetes cluster externally. Defaults to `<cluster-name>.<tenant-host>` when empty.
|
||||
## @param controlPlane.replicas Number of replicas for Kubernetes control-plane components.
|
||||
## @param storageClass StorageClass used to store user data.
|
||||
## @param useCustomSecretForPatchContainerd if true, for patch containerd will be used secret: {{ .Release.Name }}-patch-containerd
|
||||
##
|
||||
host: ""
|
||||
storageClass: replicated
|
||||
useCustomSecretForPatchContainerd: false
|
||||
|
||||
## @param nodeGroups [object] nodeGroups configuration
|
||||
##
|
||||
@@ -37,19 +39,31 @@ addons:
|
||||
## Cert-manager: automatically creates and manages SSL/TLS certificate
|
||||
##
|
||||
certManager:
|
||||
## @param addons.certManager.enabled Enables the cert-manager
|
||||
## @param addons.certManager.enabled Enable cert-manager, which automatically creates and manages SSL/TLS certificates.
|
||||
## @param addons.certManager.valuesOverride Custom values to override
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
|
||||
## Cilium CNI plugin
|
||||
##
|
||||
cilium:
|
||||
## @param addons.cilium.valuesOverride Custom values to override
|
||||
valuesOverride: {}
|
||||
|
||||
## Gateway API
|
||||
##
|
||||
gatewayAPI:
|
||||
## @param addons.gatewayAPI.enabled Enable the Gateway API
|
||||
enabled: false
|
||||
|
||||
## Ingress-NGINX Controller
|
||||
##
|
||||
ingressNginx:
|
||||
## @param addons.ingressNginx.enabled Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role)
|
||||
## @param addons.ingressNginx.enabled Enable the Ingress-NGINX controller (requires nodes labeled with the 'ingress-nginx' role).
|
||||
## @param addons.ingressNginx.valuesOverride Custom values to override
|
||||
##
|
||||
enabled: false
|
||||
## @param addons.ingressNginx.hosts List of domain names that should be passed through to the cluster by upper cluster
|
||||
## @param addons.ingressNginx.hosts List of domain names that the parent cluster should route to this tenant cluster.
|
||||
## e.g:
|
||||
## hosts:
|
||||
## - example.org
|
||||
@@ -61,7 +75,7 @@ addons:
|
||||
## GPU-operator: NVIDIA GPU Operator
|
||||
##
|
||||
gpuOperator:
|
||||
## @param addons.gpuOperator.enabled Enables the gpu-operator
|
||||
## @param addons.gpuOperator.enabled Enable the GPU-operator
|
||||
## @param addons.gpuOperator.valuesOverride Custom values to override
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
@@ -69,7 +83,7 @@ addons:
|
||||
## Flux CD
|
||||
##
|
||||
fluxcd:
|
||||
## @param addons.fluxcd.enabled Enables Flux CD
|
||||
## @param addons.fluxcd.enabled Enable FluxCD
|
||||
## @param addons.fluxcd.valuesOverride Custom values to override
|
||||
##
|
||||
enabled: false
|
||||
@@ -78,7 +92,7 @@ addons:
|
||||
## MonitoringAgents
|
||||
##
|
||||
monitoringAgents:
|
||||
## @param addons.monitoringAgents.enabled Enables MonitoringAgents (fluentbit, vmagents for sending logs and metrics to storage) if tenant monitoring enabled, send to tenant storage, else to root storage
|
||||
## @param addons.monitoringAgents.enabled Enable monitoring agents (Fluent Bit and VMAgents) to send logs and metrics. If tenant monitoring is enabled, data is sent to tenant storage; otherwise, it goes to root storage.
|
||||
## @param addons.monitoringAgents.valuesOverride Custom values to override
|
||||
##
|
||||
enabled: false
|
||||
@@ -91,15 +105,15 @@ addons:
|
||||
##
|
||||
valuesOverride: {}
|
||||
|
||||
## @section Kubernetes control plane configuration
|
||||
## @section Kubernetes Control Plane Configuration
|
||||
##
|
||||
|
||||
controlPlane:
|
||||
replicas: 2
|
||||
|
||||
apiServer:
|
||||
## @param controlPlane.apiServer.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
## @param controlPlane.apiServer.resources Resources
|
||||
## @param controlPlane.apiServer.resources Explicit CPU/memory resource requests and limits for the API server.
|
||||
## @param controlPlane.apiServer.resourcesPreset Use a common resources preset when `resources` is not set explicitly.
|
||||
## e.g:
|
||||
## resources:
|
||||
## limits:
|
||||
@@ -109,24 +123,24 @@ controlPlane:
|
||||
## cpu: 100m
|
||||
## memory: 512Mi
|
||||
##
|
||||
resourcesPreset: "small"
|
||||
resourcesPreset: "medium"
|
||||
resources: {}
|
||||
|
||||
controllerManager:
|
||||
## @param controlPlane.controllerManager.resources Resources
|
||||
## @param controlPlane.controllerManager.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
## @param controlPlane.controllerManager.resources Explicit CPU/memory resource requests and limits for the controller manager.
|
||||
## @param controlPlane.controllerManager.resourcesPreset Use a common resources preset when `resources` is not set explicitly.
|
||||
resourcesPreset: "micro"
|
||||
resources: {}
|
||||
|
||||
scheduler:
|
||||
## @param controlPlane.scheduler.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
## @param controlPlane.scheduler.resources Resources
|
||||
## @param controlPlane.scheduler.resources Explicit CPU/memory resource requests and limits for the scheduler.
|
||||
## @param controlPlane.scheduler.resourcesPreset Use a common resources preset when `resources` is not set explicitly.
|
||||
resourcesPreset: "micro"
|
||||
resources: {}
|
||||
|
||||
konnectivity:
|
||||
server:
|
||||
## @param controlPlane.konnectivity.server.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
## @param controlPlane.konnectivity.server.resources Resources
|
||||
## @param controlPlane.konnectivity.server.resources Explicit CPU/memory resource requests and limits for the Konnectivity.
|
||||
## @param controlPlane.konnectivity.server.resourcesPreset Use a common resources preset when `resources` is not set explicitly.
|
||||
resourcesPreset: "micro"
|
||||
resources: {}
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.6.0
|
||||
version: 0.8.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -7,8 +7,10 @@ generate:
|
||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||
|
||||
image:
|
||||
docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/mariadb-backup \
|
||||
docker buildx build images/mariadb-backup \
|
||||
--provenance false \
|
||||
--builder=$(BUILDER) \
|
||||
--platform=$(PLATFORM) \
|
||||
--tag $(REGISTRY)/mariadb-backup:$(call settag,$(MARIADB_BACKUP_TAG)) \
|
||||
--cache-from type=registry,ref=$(REGISTRY)/mariadb-backup:latest \
|
||||
--cache-to type=inline \
|
||||
|
||||
1
packages/apps/mysql/charts/cozy-lib
Symbolic link
1
packages/apps/mysql/charts/cozy-lib
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../library/cozy-lib
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/mariadb-backup:0.6.0@sha256:cfd1c37d8ad24e10681d82d6e6ce8a641b4602c1b0ffa8516ae15b4958bb12d4
|
||||
ghcr.io/cozystack/cozystack/mariadb-backup:0.8.0@sha256:cfd1c37d8ad24e10681d82d6e6ce8a641b4602c1b0ffa8516ae15b4958bb12d4
|
||||
|
||||
@@ -11,35 +11,34 @@ These presets are for basic testing and not meant to be used in production
|
||||
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||
*/}}
|
||||
{{- define "resources.preset" -}}
|
||||
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
|
||||
{{- $presets := dict
|
||||
"nano" (dict
|
||||
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"micro" (dict
|
||||
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"small" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"medium" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"large" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"xlarge" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"2xlarge" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
}}
|
||||
{{- if hasKey $presets .type -}}
|
||||
|
||||
@@ -25,3 +25,14 @@ rules:
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
subjects:
|
||||
{{ include "cozy-lib.rbac.subjectsForTenantAndAccessLevel" (list "use" .Release.Namespace) }}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
@@ -57,6 +57,11 @@ spec:
|
||||
name: {{ .Release.Name }}-my-cnf
|
||||
key: config
|
||||
|
||||
service:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: {{ $.Release.Name }}
|
||||
|
||||
storage:
|
||||
size: {{ .Values.size }}
|
||||
resizeInUseVolumes: true
|
||||
@@ -74,7 +79,7 @@ spec:
|
||||
# type: LoadBalancer
|
||||
|
||||
{{- if .Values.resources }}
|
||||
resources: {{- toYaml .Values.resources | nindent 4 }}
|
||||
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.resources $) | nindent 4 }}
|
||||
{{- else if ne .Values.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.resourcesPreset "Release" .Release) | nindent 4 }}
|
||||
resources: {{- include "cozy-lib.resources.preset" (list .Values.resourcesPreset $) | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user