mirror of
https://github.com/cozystack/cozystack.git
synced 2026-03-03 13:38:56 +00:00
Compare commits
228 Commits
fix/monito
...
feat/node-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ba823b0c06 | ||
|
|
7ff5b2ba23 | ||
|
|
199ffe319a | ||
|
|
681b2cef54 | ||
|
|
5aa53d14c8 | ||
|
|
daf1b71e7c | ||
|
|
0198c9896a | ||
|
|
78f8ee2deb | ||
|
|
87d0390256 | ||
|
|
96467cdefd | ||
|
|
bff5468b52 | ||
|
|
3b267d6882 | ||
|
|
e7ffc21743 | ||
|
|
5bafdfd453 | ||
|
|
3e1981bc24 | ||
|
|
7ac989923d | ||
|
|
affd91dd41 | ||
|
|
26178d97be | ||
|
|
efb9bc70b3 | ||
|
|
0e6ae28bb8 | ||
|
|
0f2ba5aba2 | ||
|
|
490faaf292 | ||
|
|
cea57f62c8 | ||
|
|
c815725bcf | ||
|
|
6c447b2fcb | ||
|
|
0c85639fed | ||
|
|
2dd3c03279 | ||
|
|
305495d023 | ||
|
|
543ce6e5fd | ||
|
|
09805ff382 | ||
|
|
92d261fc1e | ||
|
|
9eb13fdafe | ||
|
|
cecc5861af | ||
|
|
abd644122f | ||
|
|
3fbce0dba5 | ||
|
|
b3fe6a8c4a | ||
|
|
962f8e96f4 | ||
|
|
20d122445d | ||
|
|
4187b5ed94 | ||
|
|
1558fb428a | ||
|
|
879b10b777 | ||
|
|
1ddbe68bc2 | ||
|
|
d8dd5adbe0 | ||
|
|
55cd8fc0e1 | ||
|
|
58dfc97201 | ||
|
|
153d2c48ae | ||
|
|
39b95107a5 | ||
|
|
b3b7307105 | ||
|
|
bf1e49d34b | ||
|
|
96ba3b9ca5 | ||
|
|
536766cffc | ||
|
|
8cc8e52d15 | ||
|
|
6c431d0857 | ||
|
|
c6090c554c | ||
|
|
8b7813fdeb | ||
|
|
a52da8dd8d | ||
|
|
315e5dc0bd | ||
|
|
75e25fa977 | ||
|
|
73b8946a7e | ||
|
|
f131eb109a | ||
|
|
961da56e96 | ||
|
|
bfba9fb5e7 | ||
|
|
bae70596fc | ||
|
|
84b2fa90dd | ||
|
|
7ca6e5ce9e | ||
|
|
e092047630 | ||
|
|
956d9cc2a0 | ||
|
|
ef040c2ed2 | ||
|
|
d658850578 | ||
|
|
b6dec6042d | ||
|
|
12fb9ce7dd | ||
|
|
cf505c580d | ||
|
|
9031de0538 | ||
|
|
8e8bea039f | ||
|
|
fa55b5f41f | ||
|
|
6098e2ac12 | ||
|
|
b170a9f4f9 | ||
|
|
d1cec6a4bd | ||
|
|
34cda28568 | ||
|
|
2bc5e01fda | ||
|
|
6b407bd403 | ||
|
|
b6a840e873 | ||
|
|
dbba5c325b | ||
|
|
ef48d74c5a | ||
|
|
13aa341a28 | ||
|
|
08a5f9890e | ||
|
|
783682f171 | ||
|
|
ee54495dfb | ||
|
|
5568c0be9f | ||
|
|
7251395663 | ||
|
|
b7e16aaa96 | ||
|
|
18352a5267 | ||
|
|
28985ed0a8 | ||
|
|
d279fc40cb | ||
|
|
88baceae2c | ||
|
|
62116030dc | ||
|
|
6576b3bb87 | ||
|
|
371f67276a | ||
|
|
0faab1fa98 | ||
|
|
ef208d1986 | ||
|
|
a9ab1a4ce8 | ||
|
|
574c636761 | ||
|
|
168f6f2445 | ||
|
|
46103400f2 | ||
|
|
1af999a500 | ||
|
|
78c31f72a9 | ||
|
|
8c5b39b258 | ||
|
|
a628adeb35 | ||
|
|
28ec04505c | ||
|
|
32b9a7749c | ||
|
|
9a86551e40 | ||
|
|
740eb7028b | ||
|
|
bce5300116 | ||
|
|
fb8157ef9b | ||
|
|
5ebf6d3f6a | ||
|
|
0260b15aaf | ||
|
|
4c7f7fafbc | ||
|
|
b8ccdedbf8 | ||
|
|
67f9818370 | ||
|
|
4e804e0f86 | ||
|
|
f61c8f9859 | ||
|
|
3971e9cb39 | ||
|
|
ba7c729066 | ||
|
|
5f27152d18 | ||
|
|
c54e55e070 | ||
|
|
a6a08d8224 | ||
|
|
1b25c72b6d | ||
|
|
36b2a19d3c | ||
|
|
2673624261 | ||
|
|
84010a8015 | ||
|
|
8d496d0f11 | ||
|
|
8387ea4d08 | ||
|
|
4a4c7c7ad5 | ||
|
|
6a054ee76c | ||
|
|
5bf481ae4d | ||
|
|
d5e713a4e7 | ||
|
|
e267cfcf9d | ||
|
|
c932740dc5 | ||
|
|
e978e00c7e | ||
|
|
9e47669f68 | ||
|
|
d4556e4c53 | ||
|
|
dd34fb581e | ||
|
|
3685d49c4e | ||
|
|
7c0e99e1af | ||
|
|
9f20771cf8 | ||
|
|
1cbf183164 | ||
|
|
87e394c0c9 | ||
|
|
8f015efc93 | ||
|
|
da359d558a | ||
|
|
ad24693ca3 | ||
|
|
c9d2b54917 | ||
|
|
593a8b2baa | ||
|
|
5bd298651b | ||
|
|
470d43b33e | ||
|
|
fdfb8e0608 | ||
|
|
45b223ce5d | ||
|
|
5c889124e7 | ||
|
|
9213abc260 | ||
|
|
17dea98ab2 | ||
|
|
fe90454755 | ||
|
|
534017abbf | ||
|
|
5fccc13226 | ||
|
|
33868e1daa | ||
|
|
24c8b9c7a5 | ||
|
|
f20c7c4890 | ||
|
|
c1c1171c96 | ||
|
|
bb39a0f73f | ||
|
|
6b7b6b9f29 | ||
|
|
06e7517108 | ||
|
|
939727b936 | ||
|
|
14235b2939 | ||
|
|
698e542af5 | ||
|
|
4b73afe137 | ||
|
|
494144fb92 | ||
|
|
e8ffbbb097 | ||
|
|
71f7ee0bab | ||
|
|
051889e761 | ||
|
|
f466530ea8 | ||
|
|
1d3deab3f3 | ||
|
|
0d27d3a034 | ||
|
|
8144b8232e | ||
|
|
ae6b615933 | ||
|
|
80cbe1ba96 | ||
|
|
0791e79004 | ||
|
|
b6bff2eaa3 | ||
|
|
ec86a03d40 | ||
|
|
6fea830d15 | ||
|
|
33322e5324 | ||
|
|
13d848efc3 | ||
|
|
cf2c6bc15f | ||
|
|
d82c4d46c5 | ||
|
|
4c9b1d5263 | ||
|
|
70518a78e6 | ||
|
|
8926283bde | ||
|
|
085b527f0c | ||
|
|
6542ab58eb | ||
|
|
4c50529365 | ||
|
|
8010dc5250 | ||
|
|
2b15e2899e | ||
|
|
74a8313d65 | ||
|
|
ffd97e581f | ||
|
|
27f1e79e32 | ||
|
|
c815dd46c7 | ||
|
|
55b90f5d9f | ||
|
|
906c09f3c0 | ||
|
|
1e293995de | ||
|
|
90ac6de475 | ||
|
|
330cbe70d4 | ||
|
|
b71e4fe956 | ||
|
|
0a3a38c3b6 | ||
|
|
69c0392dc6 | ||
|
|
3c75e88190 | ||
|
|
90a5d534cf | ||
|
|
976b0011ac | ||
|
|
d19b008bba | ||
|
|
000b5ff76c | ||
|
|
3eaadfc95c | ||
|
|
c2a5572574 | ||
|
|
75a4b8ecbd | ||
|
|
919e70d184 | ||
|
|
337ee88170 | ||
|
|
48a61bbae8 | ||
|
|
3e0217bbba | ||
|
|
8e210044f6 | ||
|
|
5e15a75d89 | ||
|
|
01d01cf351 | ||
|
|
4494b6a111 | ||
|
|
656e00d182 |
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@@ -1 +1 @@
|
||||
* @kvaps @lllamnyp @lexfrei @androndo
|
||||
* @kvaps @lllamnyp @lexfrei @androndo @IvanHunters
|
||||
|
||||
42
.github/workflows/pull-requests.yaml
vendored
42
.github/workflows/pull-requests.yaml
vendored
@@ -71,18 +71,6 @@ jobs:
|
||||
name: pr-patch
|
||||
path: _out/assets/pr.patch
|
||||
|
||||
- name: Upload CRDs
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: cozystack-crds
|
||||
path: _out/assets/cozystack-crds.yaml
|
||||
|
||||
- name: Upload operator
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: cozystack-operator
|
||||
path: _out/assets/cozystack-operator.yaml
|
||||
|
||||
- name: Upload Talos image
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
@@ -94,8 +82,6 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
if: contains(github.event.pull_request.labels.*.name, 'release')
|
||||
outputs:
|
||||
crds_id: ${{ steps.fetch_assets.outputs.crds_id }}
|
||||
operator_id: ${{ steps.fetch_assets.outputs.operator_id }}
|
||||
disk_id: ${{ steps.fetch_assets.outputs.disk_id }}
|
||||
|
||||
steps:
|
||||
@@ -139,21 +125,17 @@ jobs:
|
||||
return;
|
||||
}
|
||||
const find = (n) => draft.assets.find(a => a.name === n)?.id;
|
||||
const crdsId = find('cozystack-crds.yaml');
|
||||
const operatorId = find('cozystack-operator.yaml');
|
||||
const diskId = find('nocloud-amd64.raw.xz');
|
||||
if (!crdsId || !operatorId || !diskId) {
|
||||
if (!diskId) {
|
||||
core.setFailed('Required assets missing in draft release');
|
||||
return;
|
||||
}
|
||||
core.setOutput('crds_id', crdsId);
|
||||
core.setOutput('operator_id', operatorId);
|
||||
core.setOutput('disk_id', diskId);
|
||||
|
||||
|
||||
e2e:
|
||||
name: "E2E Tests"
|
||||
runs-on: [oracle-vm-24cpu-96gb-x86-64]
|
||||
runs-on: ${{ contains(github.event.pull_request.labels.*.name, 'debug') && 'self-hosted' || 'oracle-vm-24cpu-96gb-x86-64' }}
|
||||
#runs-on: [oracle-vm-32cpu-128gb-x86-64]
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -174,20 +156,6 @@ jobs:
|
||||
name: talos-image
|
||||
path: _out/assets
|
||||
|
||||
- name: "Download CRDs (regular PR)"
|
||||
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: cozystack-crds
|
||||
path: _out/assets
|
||||
|
||||
- name: "Download operator (regular PR)"
|
||||
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: cozystack-operator
|
||||
path: _out/assets
|
||||
|
||||
- name: Download PR patch
|
||||
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
|
||||
uses: actions/download-artifact@v4
|
||||
@@ -208,12 +176,6 @@ jobs:
|
||||
curl -sSL -H "Authorization: token ${GH_PAT}" -H "Accept: application/octet-stream" \
|
||||
-o _out/assets/nocloud-amd64.raw.xz \
|
||||
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/assets/${{ needs.resolve_assets.outputs.disk_id }}"
|
||||
curl -sSL -H "Authorization: token ${GH_PAT}" -H "Accept: application/octet-stream" \
|
||||
-o _out/assets/cozystack-crds.yaml \
|
||||
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/assets/${{ needs.resolve_assets.outputs.crds_id }}"
|
||||
curl -sSL -H "Authorization: token ${GH_PAT}" -H "Accept: application/octet-stream" \
|
||||
-o _out/assets/cozystack-operator.yaml \
|
||||
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/assets/${{ needs.resolve_assets.outputs.operator_id }}"
|
||||
env:
|
||||
GH_PAT: ${{ secrets.GH_PAT }}
|
||||
|
||||
|
||||
157
.github/workflows/tags.yaml
vendored
157
.github/workflows/tags.yaml
vendored
@@ -213,3 +213,160 @@ jobs:
|
||||
} else {
|
||||
console.log(`PR already exists from ${head} to ${base}`);
|
||||
}
|
||||
|
||||
generate-changelog:
|
||||
name: Generate Changelog
|
||||
runs-on: [self-hosted]
|
||||
needs: [prepare-release]
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
if: needs.prepare-release.result == 'success'
|
||||
steps:
|
||||
- name: Parse tag
|
||||
id: tag
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const ref = context.ref.replace('refs/tags/', '');
|
||||
const m = ref.match(/^v(\d+\.\d+\.\d+)(-(?:alpha|beta|rc)\.\d+)?$/);
|
||||
if (!m) {
|
||||
core.setFailed(`❌ tag '${ref}' must match 'vX.Y.Z' or 'vX.Y.Z-(alpha|beta|rc).N'`);
|
||||
return;
|
||||
}
|
||||
const version = m[1] + (m[2] ?? '');
|
||||
|
||||
core.setOutput('version', version);
|
||||
core.setOutput('tag', ref);
|
||||
|
||||
- name: Checkout main branch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: main
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
token: ${{ secrets.GH_PAT }}
|
||||
|
||||
- name: Check if changelog already exists
|
||||
id: check_changelog
|
||||
run: |
|
||||
CHANGELOG_FILE="docs/changelogs/v${{ steps.tag.outputs.version }}.md"
|
||||
if [ -f "$CHANGELOG_FILE" ]; then
|
||||
echo "exists=true" >> $GITHUB_OUTPUT
|
||||
echo "Changelog file $CHANGELOG_FILE already exists"
|
||||
else
|
||||
echo "exists=false" >> $GITHUB_OUTPUT
|
||||
echo "Changelog file $CHANGELOG_FILE does not exist"
|
||||
fi
|
||||
|
||||
- name: Setup Node.js
|
||||
if: steps.check_changelog.outputs.exists == 'false'
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 22
|
||||
|
||||
- name: Install GitHub Copilot CLI
|
||||
if: steps.check_changelog.outputs.exists == 'false'
|
||||
run: npm i -g @github/copilot
|
||||
|
||||
- name: Generate changelog using AI
|
||||
if: steps.check_changelog.outputs.exists == 'false'
|
||||
env:
|
||||
COPILOT_GITHUB_TOKEN: ${{ secrets.COPILOT_GITHUB_TOKEN }}
|
||||
GH_TOKEN: ${{ secrets.GH_PAT }}
|
||||
run: |
|
||||
copilot --prompt "prepare changelog file for tagged release v${{ steps.tag.outputs.version }}, use @docs/agents/changelog.md for it. Create the changelog file at docs/changelogs/v${{ steps.tag.outputs.version }}.md" \
|
||||
--allow-all-tools --allow-all-paths < /dev/null
|
||||
|
||||
- name: Create changelog branch and commit
|
||||
if: steps.check_changelog.outputs.exists == 'false'
|
||||
env:
|
||||
GH_PAT: ${{ secrets.GH_PAT }}
|
||||
run: |
|
||||
git config user.name "cozystack-bot"
|
||||
git config user.email "217169706+cozystack-bot@users.noreply.github.com"
|
||||
git remote set-url origin https://cozystack-bot:${GH_PAT}@github.com/${GITHUB_REPOSITORY}
|
||||
|
||||
CHANGELOG_FILE="docs/changelogs/v${{ steps.tag.outputs.version }}.md"
|
||||
CHANGELOG_BRANCH="changelog-v${{ steps.tag.outputs.version }}"
|
||||
|
||||
if [ -f "$CHANGELOG_FILE" ]; then
|
||||
# Fetch latest main branch
|
||||
git fetch origin main
|
||||
|
||||
# Delete local branch if it exists
|
||||
git branch -D "$CHANGELOG_BRANCH" 2>/dev/null || true
|
||||
|
||||
# Create and checkout new branch from main
|
||||
git checkout -b "$CHANGELOG_BRANCH" origin/main
|
||||
|
||||
# Add and commit changelog
|
||||
git add "$CHANGELOG_FILE"
|
||||
if git diff --staged --quiet; then
|
||||
echo "⚠️ No changes to commit (file may already be committed)"
|
||||
else
|
||||
git commit -m "docs: add changelog for v${{ steps.tag.outputs.version }}" -s
|
||||
echo "✅ Changelog committed to branch $CHANGELOG_BRANCH"
|
||||
fi
|
||||
|
||||
# Push the branch (force push to update if it exists)
|
||||
git push -f origin "$CHANGELOG_BRANCH"
|
||||
else
|
||||
echo "⚠️ Changelog file was not generated"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Create PR for changelog
|
||||
if: steps.check_changelog.outputs.exists == 'false'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.GH_PAT }}
|
||||
script: |
|
||||
const version = '${{ steps.tag.outputs.version }}';
|
||||
const changelogBranch = `changelog-v${version}`;
|
||||
const baseBranch = 'main';
|
||||
|
||||
// Check if PR already exists
|
||||
const prs = await github.rest.pulls.list({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
head: `${context.repo.owner}:${changelogBranch}`,
|
||||
base: baseBranch,
|
||||
state: 'open'
|
||||
});
|
||||
|
||||
if (prs.data.length > 0) {
|
||||
const pr = prs.data[0];
|
||||
console.log(`PR #${pr.number} already exists for changelog branch ${changelogBranch}`);
|
||||
|
||||
// Update PR body with latest info
|
||||
const body = `This PR adds the changelog for release \`v${version}\`.\n\n✅ Changelog has been automatically generated in \`docs/changelogs/v${version}.md\`.`;
|
||||
await github.rest.pulls.update({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
pull_number: pr.number,
|
||||
body: body
|
||||
});
|
||||
console.log(`Updated existing PR #${pr.number}`);
|
||||
} else {
|
||||
// Create new PR
|
||||
const pr = await github.rest.pulls.create({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
head: changelogBranch,
|
||||
base: baseBranch,
|
||||
title: `docs: add changelog for v${version}`,
|
||||
body: `This PR adds the changelog for release \`v${version}\`.\n\n✅ Changelog has been automatically generated in \`docs/changelogs/v${version}.md\`.`,
|
||||
draft: false
|
||||
});
|
||||
|
||||
// Add label if needed
|
||||
await github.rest.issues.addLabels({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: pr.data.number,
|
||||
labels: ['documentation', 'automated']
|
||||
});
|
||||
|
||||
console.log(`Created PR #${pr.data.number} for changelog`);
|
||||
}
|
||||
|
||||
24
Makefile
24
Makefile
@@ -1,4 +1,4 @@
|
||||
.PHONY: manifests assets unit-tests helm-unit-tests
|
||||
.PHONY: manifests assets unit-tests helm-unit-tests verify-crds
|
||||
|
||||
include hack/common-envs.mk
|
||||
|
||||
@@ -11,7 +11,7 @@ build-deps:
|
||||
|
||||
build: build-deps
|
||||
make -C packages/apps/http-cache image
|
||||
make -C packages/apps/mysql image
|
||||
make -C packages/apps/mariadb image
|
||||
make -C packages/apps/clickhouse image
|
||||
make -C packages/apps/kubernetes image
|
||||
make -C packages/system/monitoring image
|
||||
@@ -27,7 +27,6 @@ build: build-deps
|
||||
make -C packages/system/dashboard image
|
||||
make -C packages/system/metallb image
|
||||
make -C packages/system/kamaji image
|
||||
make -C packages/system/kilo image
|
||||
make -C packages/system/bucket image
|
||||
make -C packages/system/objectstorage-controller image
|
||||
make -C packages/system/grafana-operator image
|
||||
@@ -39,22 +38,23 @@ build: build-deps
|
||||
|
||||
manifests:
|
||||
mkdir -p _out/assets
|
||||
helm template installer packages/core/installer -n cozy-system \
|
||||
-s templates/crds.yaml \
|
||||
> _out/assets/cozystack-crds.yaml
|
||||
cat packages/core/installer/crds/*.yaml > _out/assets/cozystack-crds.yaml
|
||||
# Talos variant (default)
|
||||
helm template installer packages/core/installer -n cozy-system \
|
||||
-s templates/cozystack-operator.yaml \
|
||||
-s templates/packagesource.yaml \
|
||||
> _out/assets/cozystack-operator.yaml
|
||||
> _out/assets/cozystack-operator-talos.yaml
|
||||
# Generic Kubernetes variant (k3s, kubeadm, RKE2)
|
||||
helm template installer packages/core/installer -n cozy-system \
|
||||
-s templates/cozystack-operator-generic.yaml \
|
||||
--set cozystackOperator.variant=generic \
|
||||
--set cozystack.apiServerHost=REPLACE_ME \
|
||||
-s templates/cozystack-operator.yaml \
|
||||
-s templates/packagesource.yaml \
|
||||
> _out/assets/cozystack-operator-generic.yaml
|
||||
# Hosted variant (managed Kubernetes)
|
||||
helm template installer packages/core/installer -n cozy-system \
|
||||
-s templates/cozystack-operator-hosted.yaml \
|
||||
--set cozystackOperator.variant=hosted \
|
||||
-s templates/cozystack-operator.yaml \
|
||||
-s templates/packagesource.yaml \
|
||||
> _out/assets/cozystack-operator-hosted.yaml
|
||||
|
||||
@@ -80,7 +80,11 @@ test:
|
||||
make -C packages/core/testing apply
|
||||
make -C packages/core/testing test
|
||||
|
||||
unit-tests: helm-unit-tests
|
||||
verify-crds:
|
||||
@diff --recursive packages/core/installer/crds/ internal/crdinstall/manifests/ --exclude='.*' \
|
||||
|| (echo "ERROR: CRD manifests out of sync. Run 'make generate' to fix." && exit 1)
|
||||
|
||||
unit-tests: helm-unit-tests verify-crds
|
||||
|
||||
helm-unit-tests:
|
||||
hack/helm-unit-tests.sh
|
||||
|
||||
@@ -56,6 +56,8 @@ type VeleroSpec struct {
|
||||
// templated from a Velero backup strategy.
|
||||
type VeleroTemplate struct {
|
||||
Spec velerov1.BackupSpec `json:"spec"`
|
||||
// +optional
|
||||
RestoreSpec *velerov1.RestoreSpec `json:"restoreSpec,omitempty"`
|
||||
}
|
||||
|
||||
type VeleroStatus struct {
|
||||
|
||||
@@ -21,6 +21,7 @@ limitations under the License.
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
@@ -223,6 +224,11 @@ func (in *VeleroStatus) DeepCopy() *VeleroStatus {
|
||||
func (in *VeleroTemplate) DeepCopyInto(out *VeleroTemplate) {
|
||||
*out = *in
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
if in.RestoreSpec != nil {
|
||||
in, out := &in.RestoreSpec, &out.RestoreSpec
|
||||
*out = new(velerov1.RestoreSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VeleroTemplate.
|
||||
|
||||
@@ -196,7 +196,7 @@ type ApplicationSelector struct {
|
||||
// +optional
|
||||
APIGroup *string `json:"apiGroup,omitempty"`
|
||||
|
||||
// Kind is the kind of the application (e.g., VirtualMachine, MySQL).
|
||||
// Kind is the kind of the application (e.g., VirtualMachine, MariaDB).
|
||||
Kind string `json:"kind"`
|
||||
}
|
||||
```
|
||||
|
||||
@@ -73,7 +73,7 @@ type ApplicationSelector struct {
|
||||
// +optional
|
||||
APIGroup *string `json:"apiGroup,omitempty"`
|
||||
|
||||
// Kind is the kind of the application (e.g., VirtualMachine, MySQL).
|
||||
// Kind is the kind of the application (e.g., VirtualMachine, MariaDB).
|
||||
Kind string `json:"kind"`
|
||||
}
|
||||
|
||||
|
||||
@@ -57,6 +57,7 @@ type BackupJobSpec struct {
|
||||
// The BackupClass will be resolved to determine the appropriate strategy and storage
|
||||
// based on the ApplicationRef.
|
||||
// This field is immutable once the BackupJob is created.
|
||||
// +kubebuilder:validation:MinLength=1
|
||||
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="backupClassName is immutable"
|
||||
BackupClassName string `json:"backupClassName"`
|
||||
}
|
||||
|
||||
@@ -1,67 +0,0 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
|
||||
)
|
||||
|
||||
// SetupWebhookWithManager registers the BackupJob webhook with the manager.
|
||||
func SetupBackupJobWebhookWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewWebhookManagedBy(mgr).
|
||||
For(&BackupJob{}).
|
||||
Complete()
|
||||
}
|
||||
|
||||
// +kubebuilder:webhook:path=/mutate-backups-cozystack-io-v1alpha1-backupjob,mutating=true,failurePolicy=fail,sideEffects=None,groups=backups.cozystack.io,resources=backupjobs,verbs=create;update,versions=v1alpha1,name=mbackupjob.kb.io,admissionReviewVersions=v1
|
||||
|
||||
// Default implements webhook.Defaulter so a webhook will be registered for the type
|
||||
func (j *BackupJob) Default() {
|
||||
j.Spec.ApplicationRef = NormalizeApplicationRef(j.Spec.ApplicationRef)
|
||||
}
|
||||
|
||||
// +kubebuilder:webhook:path=/validate-backups-cozystack-io-v1alpha1-backupjob,mutating=false,failurePolicy=fail,sideEffects=None,groups=backups.cozystack.io,resources=backupjobs,verbs=create;update,versions=v1alpha1,name=vbackupjob.kb.io,admissionReviewVersions=v1
|
||||
|
||||
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
|
||||
func (j *BackupJob) ValidateCreate() (admission.Warnings, error) {
|
||||
logger := log.FromContext(context.Background())
|
||||
logger.Info("validating BackupJob creation", "name", j.Name, "namespace", j.Namespace)
|
||||
|
||||
// Validate that backupClassName is set
|
||||
if strings.TrimSpace(j.Spec.BackupClassName) == "" {
|
||||
return nil, fmt.Errorf("backupClassName is required and cannot be empty")
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
|
||||
func (j *BackupJob) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {
|
||||
logger := log.FromContext(context.Background())
|
||||
logger.Info("validating BackupJob update", "name", j.Name, "namespace", j.Namespace)
|
||||
|
||||
oldJob, ok := old.(*BackupJob)
|
||||
if !ok {
|
||||
return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a BackupJob but got a %T", old))
|
||||
}
|
||||
|
||||
// Enforce immutability of backupClassName
|
||||
if oldJob.Spec.BackupClassName != j.Spec.BackupClassName {
|
||||
return nil, fmt.Errorf("backupClassName is immutable and cannot be changed from %q to %q", oldJob.Spec.BackupClassName, j.Spec.BackupClassName)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
|
||||
func (j *BackupJob) ValidateDelete() (admission.Warnings, error) {
|
||||
// No validation needed for deletion
|
||||
return nil, nil
|
||||
}
|
||||
@@ -1,334 +0,0 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
func TestBackupJob_ValidateCreate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
job *BackupJob
|
||||
wantErr bool
|
||||
errMsg string
|
||||
}{
|
||||
{
|
||||
name: "valid BackupJob with backupClassName",
|
||||
job: &BackupJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-job",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: BackupJobSpec{
|
||||
ApplicationRef: corev1.TypedLocalObjectReference{
|
||||
Kind: "VirtualMachine",
|
||||
Name: "vm1",
|
||||
},
|
||||
BackupClassName: "velero",
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "BackupJob with empty backupClassName should be rejected",
|
||||
job: &BackupJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-job",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: BackupJobSpec{
|
||||
ApplicationRef: corev1.TypedLocalObjectReference{
|
||||
Kind: "VirtualMachine",
|
||||
Name: "vm1",
|
||||
},
|
||||
BackupClassName: "",
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
errMsg: "backupClassName is required and cannot be empty",
|
||||
},
|
||||
{
|
||||
name: "BackupJob with whitespace-only backupClassName should be rejected",
|
||||
job: &BackupJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-job",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: BackupJobSpec{
|
||||
ApplicationRef: corev1.TypedLocalObjectReference{
|
||||
Kind: "VirtualMachine",
|
||||
Name: "vm1",
|
||||
},
|
||||
BackupClassName: " ",
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
errMsg: "backupClassName is required and cannot be empty",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
warnings, err := tt.job.ValidateCreate()
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ValidateCreate() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if tt.wantErr && err != nil {
|
||||
if tt.errMsg != "" && err.Error() != tt.errMsg {
|
||||
t.Errorf("ValidateCreate() error message = %v, want %v", err.Error(), tt.errMsg)
|
||||
}
|
||||
}
|
||||
if warnings != nil && len(warnings) > 0 {
|
||||
t.Logf("ValidateCreate() warnings = %v", warnings)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackupJob_ValidateUpdate(t *testing.T) {
|
||||
baseJob := &BackupJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-job",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: BackupJobSpec{
|
||||
ApplicationRef: corev1.TypedLocalObjectReference{
|
||||
Kind: "VirtualMachine",
|
||||
Name: "vm1",
|
||||
},
|
||||
BackupClassName: "velero",
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
old runtime.Object
|
||||
new *BackupJob
|
||||
wantErr bool
|
||||
errMsg string
|
||||
}{
|
||||
{
|
||||
name: "update with same backupClassName should succeed",
|
||||
old: baseJob,
|
||||
new: &BackupJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-job",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: BackupJobSpec{
|
||||
ApplicationRef: corev1.TypedLocalObjectReference{
|
||||
Kind: "VirtualMachine",
|
||||
Name: "vm1",
|
||||
},
|
||||
BackupClassName: "velero", // Same as old
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "update changing backupClassName should be rejected",
|
||||
old: baseJob,
|
||||
new: &BackupJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-job",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: BackupJobSpec{
|
||||
ApplicationRef: corev1.TypedLocalObjectReference{
|
||||
Kind: "VirtualMachine",
|
||||
Name: "vm1",
|
||||
},
|
||||
BackupClassName: "different-class", // Changed!
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
errMsg: "backupClassName is immutable and cannot be changed from \"velero\" to \"different-class\"",
|
||||
},
|
||||
{
|
||||
name: "update changing other fields but keeping backupClassName should succeed",
|
||||
old: baseJob,
|
||||
new: &BackupJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-job",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"new-label": "value",
|
||||
},
|
||||
},
|
||||
Spec: BackupJobSpec{
|
||||
ApplicationRef: corev1.TypedLocalObjectReference{
|
||||
Kind: "VirtualMachine",
|
||||
Name: "vm2", // Changed application
|
||||
},
|
||||
BackupClassName: "velero", // Same as old
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "update when old backupClassName is empty should be rejected",
|
||||
old: &BackupJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-job",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: BackupJobSpec{
|
||||
ApplicationRef: corev1.TypedLocalObjectReference{
|
||||
Kind: "VirtualMachine",
|
||||
Name: "vm1",
|
||||
},
|
||||
BackupClassName: "", // Empty in old
|
||||
},
|
||||
},
|
||||
new: &BackupJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-job",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: BackupJobSpec{
|
||||
ApplicationRef: corev1.TypedLocalObjectReference{
|
||||
Kind: "VirtualMachine",
|
||||
Name: "vm1",
|
||||
},
|
||||
BackupClassName: "velero", // Setting it for the first time
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
errMsg: "backupClassName is immutable",
|
||||
},
|
||||
{
|
||||
name: "update changing from non-empty to different non-empty should be rejected",
|
||||
old: &BackupJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-job",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: BackupJobSpec{
|
||||
ApplicationRef: corev1.TypedLocalObjectReference{
|
||||
Kind: "VirtualMachine",
|
||||
Name: "vm1",
|
||||
},
|
||||
BackupClassName: "class-a",
|
||||
},
|
||||
},
|
||||
new: &BackupJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-job",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: BackupJobSpec{
|
||||
ApplicationRef: corev1.TypedLocalObjectReference{
|
||||
Kind: "VirtualMachine",
|
||||
Name: "vm1",
|
||||
},
|
||||
BackupClassName: "class-b", // Changed from class-a
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
errMsg: "backupClassName is immutable and cannot be changed from \"class-a\" to \"class-b\"",
|
||||
},
|
||||
{
|
||||
name: "update with invalid old object type should be rejected",
|
||||
old: &corev1.Pod{ // Wrong type - will be cast to runtime.Object in test
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-job",
|
||||
Namespace: "default",
|
||||
},
|
||||
},
|
||||
new: baseJob,
|
||||
wantErr: true,
|
||||
errMsg: "expected a BackupJob but got a",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
warnings, err := tt.new.ValidateUpdate(tt.old)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ValidateUpdate() error = %v, wantErr %v", err, tt.wantErr)
|
||||
if err != nil {
|
||||
t.Logf("Error message: %v", err.Error())
|
||||
}
|
||||
return
|
||||
}
|
||||
if tt.wantErr && err != nil {
|
||||
if tt.errMsg != "" {
|
||||
if tt.errMsg != "" && !contains(err.Error(), tt.errMsg) {
|
||||
t.Errorf("ValidateUpdate() error message = %v, want contains %v", err.Error(), tt.errMsg)
|
||||
}
|
||||
}
|
||||
}
|
||||
if warnings != nil && len(warnings) > 0 {
|
||||
t.Logf("ValidateUpdate() warnings = %v", warnings)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackupJob_ValidateDelete(t *testing.T) {
|
||||
job := &BackupJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-job",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: BackupJobSpec{
|
||||
ApplicationRef: corev1.TypedLocalObjectReference{
|
||||
Kind: "VirtualMachine",
|
||||
Name: "vm1",
|
||||
},
|
||||
BackupClassName: "velero",
|
||||
},
|
||||
}
|
||||
|
||||
warnings, err := job.ValidateDelete()
|
||||
if err != nil {
|
||||
t.Errorf("ValidateDelete() should never return an error, got %v", err)
|
||||
}
|
||||
if warnings != nil && len(warnings) > 0 {
|
||||
t.Logf("ValidateDelete() warnings = %v", warnings)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackupJob_Default(t *testing.T) {
|
||||
job := &BackupJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-job",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: BackupJobSpec{
|
||||
ApplicationRef: corev1.TypedLocalObjectReference{
|
||||
Kind: "VirtualMachine",
|
||||
Name: "vm1",
|
||||
},
|
||||
BackupClassName: "velero",
|
||||
},
|
||||
}
|
||||
|
||||
// Default() should not panic and should not modify the object
|
||||
originalClassName := job.Spec.BackupClassName
|
||||
job.Default()
|
||||
if job.Spec.BackupClassName != originalClassName {
|
||||
t.Errorf("Default() should not modify backupClassName, got %v, want %v", job.Spec.BackupClassName, originalClassName)
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to check if a string contains a substring
|
||||
func contains(s, substr string) bool {
|
||||
if len(substr) == 0 {
|
||||
return true
|
||||
}
|
||||
if len(s) < len(substr) {
|
||||
return false
|
||||
}
|
||||
for i := 0; i <= len(s)-len(substr); i++ {
|
||||
if s[i:i+len(substr)] == substr {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -71,6 +71,8 @@ type RestoreJobStatus struct {
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",priority=0
|
||||
|
||||
// RestoreJob represents a single execution of a restore from a Backup.
|
||||
type RestoreJob struct {
|
||||
|
||||
@@ -253,3 +253,25 @@ type FactoryList struct {
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []Factory `json:"items"`
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// CustomFormsOverrideMapping
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:resource:path=cfomappings,scope=Cluster
|
||||
// +kubebuilder:subresource:status
|
||||
type CFOMapping struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ArbitrarySpec `json:"spec"`
|
||||
Status CommonStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
type CFOMappingList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []CFOMapping `json:"items"`
|
||||
}
|
||||
|
||||
@@ -69,6 +69,9 @@ func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
|
||||
&Factory{},
|
||||
&FactoryList{},
|
||||
|
||||
&CFOMapping{},
|
||||
&CFOMappingList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(scheme, GroupVersion)
|
||||
return nil
|
||||
|
||||
@@ -159,6 +159,65 @@ func (in *BreadcrumbList) DeepCopyObject() runtime.Object {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CFOMapping) DeepCopyInto(out *CFOMapping) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CFOMapping.
|
||||
func (in *CFOMapping) DeepCopy() *CFOMapping {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(CFOMapping)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *CFOMapping) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CFOMappingList) DeepCopyInto(out *CFOMappingList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]CFOMapping, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CFOMappingList.
|
||||
func (in *CFOMappingList) DeepCopy() *CFOMappingList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(CFOMappingList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *CFOMappingList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CommonStatus) DeepCopyInto(out *CommonStatus) {
|
||||
*out = *in
|
||||
|
||||
@@ -37,10 +37,8 @@ import (
|
||||
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
|
||||
strategyv1alpha1 "github.com/cozystack/cozystack/api/backups/strategy/v1alpha1"
|
||||
backupsv1alpha1 "github.com/cozystack/cozystack/api/backups/v1alpha1"
|
||||
"github.com/cozystack/cozystack/internal/backupcontroller"
|
||||
velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
// +kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
@@ -53,8 +51,6 @@ func init() {
|
||||
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
|
||||
|
||||
utilruntime.Must(backupsv1alpha1.AddToScheme(scheme))
|
||||
utilruntime.Must(strategyv1alpha1.AddToScheme(scheme))
|
||||
utilruntime.Must(velerov1.AddToScheme(scheme))
|
||||
// +kubebuilder:scaffold:scheme
|
||||
}
|
||||
|
||||
@@ -166,21 +162,6 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&backupcontroller.BackupJobReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Recorder: mgr.GetEventRecorderFor("backup-controller"),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "BackupJob")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Register BackupJob webhook for validation (immutability of backupClassName)
|
||||
if err = backupsv1alpha1.SetupBackupJobWebhookWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create webhook", "webhook", "BackupJob")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// +kubebuilder:scaffold:builder
|
||||
|
||||
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
|
||||
|
||||
@@ -37,8 +37,10 @@ import (
|
||||
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
|
||||
strategyv1alpha1 "github.com/cozystack/cozystack/api/backups/strategy/v1alpha1"
|
||||
backupsv1alpha1 "github.com/cozystack/cozystack/api/backups/v1alpha1"
|
||||
"github.com/cozystack/cozystack/internal/backupcontroller"
|
||||
velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
// +kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
@@ -51,6 +53,8 @@ func init() {
|
||||
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
|
||||
|
||||
utilruntime.Must(backupsv1alpha1.AddToScheme(scheme))
|
||||
utilruntime.Must(strategyv1alpha1.AddToScheme(scheme))
|
||||
utilruntime.Must(velerov1.AddToScheme(scheme))
|
||||
// +kubebuilder:scaffold:scheme
|
||||
}
|
||||
|
||||
@@ -155,10 +159,20 @@ func main() {
|
||||
}
|
||||
|
||||
if err = (&backupcontroller.BackupJobReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Recorder: mgr.GetEventRecorderFor("backup-controller"),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "Job")
|
||||
setupLog.Error(err, "unable to create controller", "controller", "BackupJob")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&backupcontroller.RestoreJobReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Recorder: mgr.GetEventRecorderFor("restore-controller"),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "RestoreJob")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
||||
@@ -68,7 +68,6 @@ func main() {
|
||||
var disableTelemetry bool
|
||||
var telemetryEndpoint string
|
||||
var telemetryInterval string
|
||||
var reconcileDeployment bool
|
||||
var tlsOpts []func(*tls.Config)
|
||||
flag.StringVar(&metricsAddr, "metrics-bind-address", "0", "The address the metrics endpoint binds to. "+
|
||||
"Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.")
|
||||
@@ -86,8 +85,6 @@ func main() {
|
||||
"Endpoint for sending telemetry data")
|
||||
flag.StringVar(&telemetryInterval, "telemetry-interval", "15m",
|
||||
"Interval between telemetry data collection (e.g. 15m, 1h)")
|
||||
flag.BoolVar(&reconcileDeployment, "reconcile-deployment", false,
|
||||
"If set, the Cozystack API server is assumed to run as a Deployment, else as a DaemonSet.")
|
||||
opts := zap.Options{
|
||||
Development: false,
|
||||
}
|
||||
@@ -196,14 +193,9 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
cozyAPIKind := "DaemonSet"
|
||||
if reconcileDeployment {
|
||||
cozyAPIKind = "Deployment"
|
||||
}
|
||||
if err = (&controller.ApplicationDefinitionReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
CozystackAPIKind: cozyAPIKind,
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "ApplicationDefinitionReconciler")
|
||||
os.Exit(1)
|
||||
|
||||
@@ -50,6 +50,7 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
|
||||
"github.com/cozystack/cozystack/internal/cozyvaluesreplicator"
|
||||
"github.com/cozystack/cozystack/internal/crdinstall"
|
||||
"github.com/cozystack/cozystack/internal/fluxinstall"
|
||||
"github.com/cozystack/cozystack/internal/operator"
|
||||
"github.com/cozystack/cozystack/internal/telemetry"
|
||||
@@ -77,6 +78,7 @@ func main() {
|
||||
var probeAddr string
|
||||
var secureMetrics bool
|
||||
var enableHTTP2 bool
|
||||
var installCRDs bool
|
||||
var installFlux bool
|
||||
var disableTelemetry bool
|
||||
var telemetryEndpoint string
|
||||
@@ -97,6 +99,7 @@ func main() {
|
||||
"If set the metrics endpoint is served securely")
|
||||
flag.BoolVar(&enableHTTP2, "enable-http2", false,
|
||||
"If set, HTTP/2 will be enabled for the metrics and webhook servers")
|
||||
flag.BoolVar(&installCRDs, "install-crds", false, "Install Cozystack CRDs before starting reconcile loop")
|
||||
flag.BoolVar(&installFlux, "install-flux", false, "Install Flux components before starting reconcile loop")
|
||||
flag.BoolVar(&disableTelemetry, "disable-telemetry", false,
|
||||
"Disable telemetry collection")
|
||||
@@ -134,8 +137,7 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Start the controller manager
|
||||
setupLog.Info("Starting controller manager")
|
||||
// Initialize the controller manager
|
||||
mgr, err := ctrl.NewManager(config, ctrl.Options{
|
||||
Scheme: scheme,
|
||||
Cache: cache.Options{
|
||||
@@ -177,10 +179,26 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Set up signal handler early so install phases respect SIGTERM
|
||||
mgrCtx := ctrl.SetupSignalHandler()
|
||||
|
||||
// Install Cozystack CRDs before starting reconcile loop
|
||||
if installCRDs {
|
||||
setupLog.Info("Installing Cozystack CRDs before starting reconcile loop")
|
||||
installCtx, installCancel := context.WithTimeout(mgrCtx, 2*time.Minute)
|
||||
defer installCancel()
|
||||
|
||||
if err := crdinstall.Install(installCtx, directClient, crdinstall.WriteEmbeddedManifests); err != nil {
|
||||
setupLog.Error(err, "failed to install CRDs")
|
||||
os.Exit(1)
|
||||
}
|
||||
setupLog.Info("CRD installation completed successfully")
|
||||
}
|
||||
|
||||
// Install Flux before starting reconcile loop
|
||||
if installFlux {
|
||||
setupLog.Info("Installing Flux components before starting reconcile loop")
|
||||
installCtx, installCancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||
installCtx, installCancel := context.WithTimeout(mgrCtx, 5*time.Minute)
|
||||
defer installCancel()
|
||||
|
||||
// Use direct client for pre-start operations (cache is not ready yet)
|
||||
@@ -194,7 +212,7 @@ func main() {
|
||||
// Generate and install platform source resource if specified
|
||||
if platformSourceURL != "" {
|
||||
setupLog.Info("Generating platform source resource", "url", platformSourceURL, "name", platformSourceName, "ref", platformSourceRef)
|
||||
installCtx, installCancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
installCtx, installCancel := context.WithTimeout(mgrCtx, 2*time.Minute)
|
||||
defer installCancel()
|
||||
|
||||
// Use direct client for pre-start operations (cache is not ready yet)
|
||||
@@ -276,7 +294,6 @@ func main() {
|
||||
}
|
||||
|
||||
setupLog.Info("Starting controller manager")
|
||||
mgrCtx := ctrl.SetupSignalHandler()
|
||||
if err := mgr.Start(mgrCtx); err != nil {
|
||||
setupLog.Error(err, "problem running manager")
|
||||
os.Exit(1)
|
||||
|
||||
1541
dashboards/nats/nats-jetstream.json
Normal file
1541
dashboards/nats/nats-jetstream.json
Normal file
File diff suppressed because it is too large
Load Diff
1463
dashboards/nats/nats-server.json
Normal file
1463
dashboards/nats/nats-server.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -27,7 +27,7 @@ git commit --signoff -m "[component] Brief description of changes"
|
||||
|
||||
**Component prefixes:**
|
||||
- System: `[dashboard]`, `[platform]`, `[cilium]`, `[kube-ovn]`, `[linstor]`, `[fluxcd]`, `[cluster-api]`
|
||||
- Apps: `[postgres]`, `[mysql]`, `[redis]`, `[kafka]`, `[clickhouse]`, `[virtual-machine]`, `[kubernetes]`
|
||||
- Apps: `[postgres]`, `[mariadb]`, `[redis]`, `[kafka]`, `[clickhouse]`, `[virtual-machine]`, `[kubernetes]`
|
||||
- Other: `[tests]`, `[ci]`, `[docs]`, `[maintenance]`
|
||||
|
||||
**Examples:**
|
||||
|
||||
144
docs/changelogs/v1.0.0-beta.3.md
Normal file
144
docs/changelogs/v1.0.0-beta.3.md
Normal file
@@ -0,0 +1,144 @@
|
||||
<!--
|
||||
https://github.com/cozystack/cozystack/releases/tag/v1.0.0-beta.3
|
||||
-->
|
||||
|
||||
> **⚠️ Beta Release Warning**: This is a pre-release version intended for testing and early adoption. Breaking changes may occur before the stable v1.0.0 release.
|
||||
|
||||
## Major Features and Improvements
|
||||
|
||||
### New Applications
|
||||
|
||||
* **[qdrant] Add Qdrant vector database application**: Added Qdrant as a new managed application, providing a high-performance vector database for AI and machine learning workloads. Supports single replica or clustered mode, persistent storage, resource presets, API key authentication, and optional external LoadBalancer access ([**@lexfrei**](https://github.com/lexfrei) in #1987).
|
||||
|
||||
### System Components
|
||||
|
||||
* **[system] Add cluster-autoscaler package for Hetzner and Azure**: Added cluster-autoscaler system package with support for multiple cloud providers (Hetzner and Azure) to automatically scale management cluster nodes. Includes comprehensive documentation for Hetzner setup with Talos Linux, vSwitch configuration, and Kilo mesh networking integration ([**@kvaps**](https://github.com/kvaps) in #1964).
|
||||
|
||||
* **[system] Add clustersecret-operator package**: Added clustersecret-operator system package for managing secrets across multiple namespaces in Kubernetes clusters ([**@sircthulhu**](https://github.com/sircthulhu) in #2025).
|
||||
|
||||
### Networking
|
||||
|
||||
* **[kilo] Update to v0.7.0 and add configurable MTU**: Updated Kilo WireGuard mesh networking to v0.7.0 from cozystack fork with pre-built images. Added configurable MTU parameter (default: auto) for WireGuard interface, allowing automatic MTU detection or manual override ([**@kvaps**](https://github.com/kvaps) in #2003).
|
||||
|
||||
* **[local-ccm] Add node-lifecycle-controller component**: Added optional node-lifecycle-controller to local-ccm package that automatically deletes unreachable NotReady nodes from the cluster. Solves the "zombie" node problem when cluster autoscaler deletes cloud instances but node objects remain in Kubernetes. Supports configurable node selectors, protected labels, and HA deployment with leader election ([**@IvanHunters**](https://github.com/IvanHunters) in #1992).
|
||||
|
||||
### Virtual Machines
|
||||
|
||||
* **[vm] Add cpuModel field to specify CPU model without instanceType**: Added cpuModel field to VirtualMachine API, allowing users to specify CPU model directly without using instanceType, providing more granular control over VM CPU configuration ([**@sircthulhu**](https://github.com/sircthulhu) in #2007).
|
||||
|
||||
* **[vm] Allow switching between instancetype and custom resources**: Implemented atomic upgrade hook that allows switching between instanceType-based and custom resource-based VM configuration, providing more flexibility in VM resource management ([**@sircthulhu**](https://github.com/sircthulhu) in #2008).
|
||||
|
||||
* **[vm] Migrate to runStrategy instead of running**: Migrated VirtualMachine API from deprecated `running` field to `runStrategy` field, following KubeVirt upstream best practices ([**@sircthulhu**](https://github.com/sircthulhu) in #2004).
|
||||
|
||||
### Backups
|
||||
|
||||
* **[backups] Add comprehensive backup and restore functionality**: Major update to backup system including BackupClass for Velero, virtual machine backup strategies, RestoreJob resource with end-to-end restore workflows, Velero integration with polling and status tracking, and enhanced backup plans UI with simplified Plan/BackupJob API ([**@androndo**](https://github.com/androndo) in #1967, [**@lllamnyp**](https://github.com/lllamnyp) in #1968).
|
||||
|
||||
* **[backups] Add kubevirt plugin to velero**: Added KubeVirt plugin to Velero for proper virtual machine backup support, enabling consistent snapshots of VM state and data ([**@lllamnyp**](https://github.com/lllamnyp) in #2017).
|
||||
|
||||
* **[backups] Install backupstrategy controller by default**: Enabled backupstrategy controller by default to provide automatic backup scheduling and management for managed applications ([**@lllamnyp**](https://github.com/lllamnyp) in #2020).
|
||||
|
||||
* **[backups] Better selectors for VM strategy**: Improved VM backup strategy selectors for more accurate and reliable backup targeting ([**@lllamnyp**](https://github.com/lllamnyp) in #2023).
|
||||
|
||||
### Platform
|
||||
|
||||
* **[kubernetes] Auto-enable Gateway API support in cert-manager**: Added automatic Gateway API support in cert-manager for tenant Kubernetes clusters, enabling automatic certificate management for Gateway API resources ([**@kvaps**](https://github.com/kvaps) in #1997).
|
||||
|
||||
* **[tenant,rbac] Use shared clusterroles**: Refactored tenant RBAC to use shared ClusterRoles, improving maintainability and consistency across tenant namespaces ([**@lllamnyp**](https://github.com/lllamnyp) in #1999).
|
||||
|
||||
* **[mongodb] Unify users and databases configuration**: Simplified MongoDB user and database configuration with a more unified API structure ([**@kvaps**](https://github.com/kvaps) in #1923).
|
||||
|
||||
## Improvements
|
||||
|
||||
* **[keycloak-configure,dashboard] Enable insecure TLS verification by default**: Made SSL certificate verification configurable with insecure mode enabled by default for easier local development and testing ([**@IvanHunters**](https://github.com/IvanHunters) in #2005).
|
||||
|
||||
* **[dashboard] Add startupProbe to prevent container restarts on slow hardware**: Added startup probe to dashboard pods to prevent unnecessary container restarts on slow hardware or during high load ([**@kvaps**](https://github.com/kvaps) in #1996).
|
||||
|
||||
* **[cilium] Change cilium-operator replicas to 1**: Reduced Cilium operator replicas from 2 to 1 to decrease resource consumption in smaller deployments ([**@IvanHunters**](https://github.com/IvanHunters) in #1784).
|
||||
|
||||
* **[monitoring] Enable monitoring for core components**: Enhanced monitoring capabilities with better dashboards and metrics collection for core Cozystack components ([**@IvanHunters**](https://github.com/IvanHunters) in #1937).
|
||||
|
||||
* **[branding] Separate values for Keycloak**: Separated Keycloak branding values for better customization capabilities ([**@nbykov0**](https://github.com/nbykov0) in #1947).
|
||||
|
||||
* **[kubernetes] Use ingress-nginx nodeport service**: Changed Kubernetes managed clusters to use ingress-nginx NodePort service for improved compatibility and flexibility ([**@sircthulhu**](https://github.com/sircthulhu) in #1948).
|
||||
|
||||
## Fixes
|
||||
|
||||
* **[linstor] Extract piraeus-operator CRDs into separate package**: Fixed issue where Helm did not reliably install all CRDs from large crds.yaml files by creating dedicated piraeus-operator-crds package. This ensures the linstorsatellites.io CRD is properly installed, preventing satellite pod creation failures ([**@IvanHunters**](https://github.com/IvanHunters) in #1991).
|
||||
|
||||
* **[platform] Fix cozystack-values secret race condition**: Fixed race condition in cozystack-values secret creation that could cause platform initialization failures ([**@lllamnyp**](https://github.com/lllamnyp) in #2024).
|
||||
|
||||
* **[seaweedfs] Increase certificate duration to 10 years**: Increased SeaweedFS certificate validity from 1 year to 10 years to reduce certificate rotation overhead and prevent unexpected certificate expiration issues ([**@IvanHunters**](https://github.com/IvanHunters) in #1986).
|
||||
|
||||
* **[monitoring] Remove cozystack-controller dependency**: Fixed monitoring package to remove unnecessary dependency on cozystack-controller, allowing monitoring to be installed independently ([**@IvanHunters**](https://github.com/IvanHunters) in #1990).
|
||||
|
||||
* **[monitoring] Remove duplicate dashboards.list from extra/monitoring**: Fixed duplicate dashboards.list configuration in extra/monitoring package ([**@IvanHunters**](https://github.com/IvanHunters) in #2016).
|
||||
|
||||
* **[mongodb] Fix pre-commit check**: Fixed pre-commit linting issues in MongoDB package ([**@kvaps**](https://github.com/kvaps) in #1753).
|
||||
|
||||
* **[mongodb] Update MongoDB logo**: Updated MongoDB application logo in the dashboard to use the correct branding ([**@kvaps**](https://github.com/kvaps) in #2027).
|
||||
|
||||
* **[bootbox] Auto-create bootbox-application as dependency**: Fixed bootbox package to automatically create required bootbox-application dependency ([**@kvaps**](https://github.com/kvaps) in #1974).
|
||||
|
||||
* **[migrations] Add migration 25 for v1.0 upgrade cleanup**: Added migration script to handle cleanup during v1.0 upgrade path ([**@kvaps**](https://github.com/kvaps) in #1975).
|
||||
|
||||
* **[build] Fix platform migrations image build**: Fixed Docker image build process for platform migrations ([**@kvaps**](https://github.com/kvaps) in #1976).
|
||||
|
||||
* **[postgres-operator] Correct PromQL syntax in CNPGClusterOffline alert**: Fixed incorrect PromQL syntax in CNPGClusterOffline Prometheus alert for PostgreSQL clusters ([**@mattia-eleuteri**](https://github.com/mattia-eleuteri) in #1981).
|
||||
|
||||
* **[coredns] Fix serviceaccount to match kubernetes bootstrap RBAC**: Fixed CoreDNS service account configuration to correctly match Kubernetes bootstrap RBAC requirements ([**@mattia-eleuteri**](https://github.com/mattia-eleuteri) in #1958).
|
||||
|
||||
* **[dashboard] Verify JWT token**: Added JWT token verification to dashboard for improved security ([**@lllamnyp**](https://github.com/lllamnyp) in #1980).
|
||||
|
||||
* **[talm] Skip config loading for completion subcommands**: Fixed talm CLI to skip unnecessary config loading for shell completion commands ([**@kitsunoff**](https://github.com/kitsunoff) in [cozystack/talm#109](https://github.com/cozystack/talm/pull/109)).
|
||||
|
||||
## Dependencies
|
||||
|
||||
* **[kube-ovn] Update Kube-OVN to v1.15.3**: Updated Kube-OVN CNI to v1.15.3 with performance improvements and bug fixes ([**@kvaps**](https://github.com/kvaps) in #2022).
|
||||
|
||||
* **[local-ccm] Update to v0.3.0**: Updated local cloud controller manager to v0.3.0 with node-lifecycle-controller support ([**@kvaps**](https://github.com/kvaps) in #1992).
|
||||
|
||||
* **[kilo] Update to v0.7.0**: Updated Kilo to v0.7.0 from cozystack fork with improved MTU handling ([**@kvaps**](https://github.com/kvaps) in #2003).
|
||||
|
||||
## Development, Testing, and CI/CD
|
||||
|
||||
* **[ci] Use GitHub Copilot CLI for changelog generation**: Automated changelog generation using GitHub Copilot CLI to improve release process efficiency ([**@androndo**](https://github.com/androndo) in #1753).
|
||||
|
||||
* **[ci] Choose runner conditional on label**: Added conditional runner selection in CI based on PR labels for more flexible CI/CD workflows ([**@lllamnyp**](https://github.com/lllamnyp) in #1998).
|
||||
|
||||
* **[backups] Add restore jobs controller**: Added controller for managing backup restore jobs ([**@androndo**](https://github.com/androndo) in #1811).
|
||||
|
||||
* **Update CODEOWNERS**: Updated CODEOWNERS file to include new maintainers ([**@lllamnyp**](https://github.com/lllamnyp) in #1972, [**@IvanHunters**](https://github.com/IvanHunters) in #2015).
|
||||
|
||||
## Documentation
|
||||
|
||||
* **[website] Add LINSTOR disk preparation guide**: Added comprehensive documentation for preparing disks for LINSTOR storage system ([**@IvanHunters**](https://github.com/IvanHunters) in [cozystack/website#411](https://github.com/cozystack/website/pull/411)).
|
||||
|
||||
* **[website] Add Proxmox VM migration guide**: Added detailed guide for migrating virtual machines from Proxmox to Cozystack ([**@IvanHunters**](https://github.com/IvanHunters) in [cozystack/website#410](https://github.com/cozystack/website/pull/410)).
|
||||
|
||||
* **[website] Describe operator-based and HelmRelease-based package patterns**: Added development documentation explaining operator-based and HelmRelease-based package patterns for Cozystack ([**@kvaps**](https://github.com/kvaps) in [cozystack/website#413](https://github.com/cozystack/website/pull/413)).
|
||||
|
||||
* **[website] Correct typo in kubeconfig reference in Kubernetes installation guide**: Fixed documentation typo in kubeconfig reference ([**@shkarface**](https://github.com/shkarface) in [cozystack/website#414](https://github.com/cozystack/website/pull/414)).
|
||||
|
||||
* **[website] Check quotas before an upgrade**: Added troubleshooting documentation for checking resource quotas before performing upgrades ([**@nbykov0**](https://github.com/nbykov0) in [cozystack/website#405](https://github.com/cozystack/website/pull/405)).
|
||||
|
||||
---
|
||||
|
||||
## Contributors
|
||||
|
||||
We'd like to thank all contributors who made this release possible:
|
||||
|
||||
* [**@IvanHunters**](https://github.com/IvanHunters)
|
||||
* [**@androndo**](https://github.com/androndo)
|
||||
* [**@kitsunoff**](https://github.com/kitsunoff)
|
||||
* [**@kvaps**](https://github.com/kvaps)
|
||||
* [**@lexfrei**](https://github.com/lexfrei)
|
||||
* [**@lllamnyp**](https://github.com/lllamnyp)
|
||||
* [**@mattia-eleuteri**](https://github.com/mattia-eleuteri)
|
||||
* [**@nbykov0**](https://github.com/nbykov0)
|
||||
* [**@shkarface**](https://github.com/shkarface)
|
||||
* [**@sircthulhu**](https://github.com/sircthulhu)
|
||||
|
||||
---
|
||||
|
||||
**Full Changelog**: [v1.0.0-beta.2...v1.0.0-beta.3](https://github.com/cozystack/cozystack/compare/v1.0.0-beta.2...v1.0.0-beta.3)
|
||||
96
docs/changelogs/v1.0.0-beta.4.md
Normal file
96
docs/changelogs/v1.0.0-beta.4.md
Normal file
@@ -0,0 +1,96 @@
|
||||
<!--
|
||||
https://github.com/cozystack/cozystack/releases/tag/v1.0.0-beta.4
|
||||
-->
|
||||
|
||||
> **⚠️ Beta Release Warning**: This is a pre-release version intended for testing and early adoption. Breaking changes may occur before the stable v1.0.0 release.
|
||||
|
||||
## Major Features and Improvements
|
||||
|
||||
### Virtual Machines
|
||||
|
||||
* **[vm-instance] Complete migration from virtual-machine to vm-disk and vm-instance**: Completed the architectural redesign of virtual machine management by fully migrating from the legacy `virtual-machine` application to the new `vm-disk` and `vm-instance` applications. This includes automatic migration scripts (migration 28) that convert existing virtual machines, handle CDI webhook configurations, and update cloud-init references. The new architecture provides better separation of concerns between disk management and VM lifecycle, enabling more flexible VM configuration and improved resource management ([**@kvaps**](https://github.com/kvaps) in #2040).
|
||||
|
||||
* **[vm-instance] Port advanced VM features**: Ported critical VM features from the legacy virtual-machine application including cpuModel field for direct CPU model specification, support for switching between instanceType and custom resource configurations, and migration from deprecated `running` field to `runStrategy` field following KubeVirt best practices ([**@kvaps**](https://github.com/kvaps) in #2040).
|
||||
|
||||
### Storage and CSI
|
||||
|
||||
* **[kubevirt-csi-driver] Add RWX Filesystem (NFS) support**: Added Read-Write-Many (RWX) filesystem support to kubevirt-csi-driver, enabling multiple pods to mount the same persistent volume simultaneously via NFS. This provides native NFS support for shared storage use cases without requiring external NFS provisioners, with automatic NFS server deployment per PVC and seamless integration with KubeVirt's storage layer ([**@kvaps**](https://github.com/kvaps) in #2042).
|
||||
|
||||
### Platform and Infrastructure
|
||||
|
||||
* **[cozystack-api] Switch from DaemonSet to Deployment**: Migrated cozystack-api from DaemonSet to Deployment with PreferClose topology spread constraints, improving resource efficiency while maintaining high availability. The Deployment approach reduces resource consumption compared to running API pods on every node, while topology spreading ensures resilient pod placement across the cluster ([**@kvaps**](https://github.com/kvaps) in #2041, #2048).
|
||||
|
||||
* **[linstor] Move CRDs installation to dedicated chart**: Refactored LINSTOR CRDs installation by moving them to a dedicated `piraeus-operator-crds` chart, solving Helm's limitation with large CRD files that could cause unreliable installations. This ensures all LINSTOR CRDs (including linstorsatellites.io) are properly installed before the operator starts, preventing satellite pod creation failures. Includes automatic migration script to reassign existing CRDs to the new chart ([**@kvaps**](https://github.com/kvaps) in #2036).
|
||||
|
||||
* **[installer] Unify operator templates**: Merged separate operator templates into a single variant-based template, simplifying the installation process and reducing configuration duplication. The new template supports different deployment variants (Talos, non-Talos) through a unified configuration approach ([**@kvaps**](https://github.com/kvaps) in #2034).
|
||||
|
||||
### Applications
|
||||
|
||||
* **[mariadb] Rename mysql application to mariadb**: Renamed the MySQL application to MariaDB to accurately reflect the underlying database engine being used. Includes automatic migration script (migration 27) that handles resource renaming and ensures seamless upgrade path for existing MySQL deployments. All resources, including databases, users, backups, and configurations, are automatically migrated to use the mariadb naming ([**@kvaps**](https://github.com/kvaps) in #2026).
|
||||
|
||||
* **[ferretdb] Remove FerretDB application**: Removed the FerretDB application from the catalog as it has been superseded by native MongoDB support with improved performance and features ([**@kvaps**](https://github.com/kvaps) in #2028).
|
||||
|
||||
## Improvements
|
||||
|
||||
* **[rbac] Use hierarchical naming scheme**: Refactored RBAC configuration to use hierarchical naming scheme for cluster roles and role bindings, improving organization and maintainability of permission structures across the platform ([**@lllamnyp**](https://github.com/lllamnyp) in #2019).
|
||||
|
||||
* **[backups] Create RBAC for backup resources**: Added comprehensive RBAC configuration for backup resources, enabling proper permission management for backup operations and restore jobs across different user roles ([**@lllamnyp**](https://github.com/lllamnyp) in #2018).
|
||||
|
||||
* **[etcd-operator] Add vertical-pod-autoscaler dependency**: Added vertical-pod-autoscaler as a dependency to etcd-operator package, ensuring proper resource scaling and optimization for etcd clusters ([**@sircthulhu**](https://github.com/sircthulhu) in #2047).
|
||||
|
||||
## Fixes
|
||||
|
||||
* **[cozystack-operator] Preserve existing suspend field in package reconciler**: Fixed package reconciler to properly preserve the existing suspend field state during reconciliation, preventing unintended resumption of suspended packages ([**@sircthulhu**](https://github.com/sircthulhu) in #2043).
|
||||
|
||||
* **[cozystack-operator] Fix namespace privileged flag resolution**: Fixed operator to correctly resolve namespace privileged flag by checking all Packages in the namespace, not just the first one. This ensures namespaces are properly marked as privileged when any package requires elevated permissions ([**@kvaps**](https://github.com/kvaps) in #2046).
|
||||
|
||||
* **[cozystack-operator] Fix namespace reconciliation field ownership**: Fixed Server-Side Apply (SSA) field ownership conflicts by using per-Package field owner for namespace reconciliation, preventing conflicts when multiple packages reconcile the same namespace ([**@kvaps**](https://github.com/kvaps) in #2046).
|
||||
|
||||
* **[platform] Clean up Helm secrets for removed releases**: Added cleanup logic to migration 23 to remove orphaned Helm secrets from removed -rd releases, preventing secret accumulation and reducing cluster resource usage ([**@kvaps**](https://github.com/kvaps) in #2035).
|
||||
|
||||
* **[monitoring] Fix YAML parse error in vmagent template**: Fixed YAML parsing error in monitoring-agents vmagent template that could cause monitoring stack deployment failures ([**@kvaps**](https://github.com/kvaps) in #2037).
|
||||
|
||||
* **[talm] Fix metadata.id type casting in physical_links_info**: Fixed Prometheus query in physical_links_info chart to properly cast metadata.id to string for regexMatch operations, preventing query failures with numeric interface IDs ([**@kvaps**](https://github.com/kvaps) in cozystack/talm#110).
|
||||
|
||||
## Dependencies
|
||||
|
||||
* **[kilo] Update to v0.7.1**: Updated Kilo WireGuard mesh networking to v0.7.1 with bug fixes and improvements ([**@kvaps**](https://github.com/kvaps) in #2049).
|
||||
|
||||
## Development, Testing, and CI/CD
|
||||
|
||||
* **[ci] Improve cozyreport functionality**: Enhanced cozyreport tool with improved reporting capabilities for CI/CD pipelines, providing better visibility into test results and build status ([**@lllamnyp**](https://github.com/lllamnyp) in #2032).
|
||||
|
||||
* **[e2e] Increase HelmRelease readiness timeout for kubernetes test**: Increased HelmRelease readiness timeout in Kubernetes end-to-end tests to prevent false failures on slower hardware or during high load conditions, specifically targeting ingress-nginx component which may take longer to become ready ([**@lexfrei**](https://github.com/lexfrei) in #2033).
|
||||
|
||||
## Documentation
|
||||
|
||||
* **[website] Add documentation versioning**: Implemented comprehensive documentation versioning system with separate v0 and v1 documentation trees, version selector in the UI, proper URL redirects for unversioned docs, and improved navigation for users working with different Cozystack versions ([**@IvanStukov**](https://github.com/IvanStukov) in cozystack/website#415).
|
||||
|
||||
* **[website] Describe upgrade to v1.0**: Added detailed upgrade instructions for migrating from v0.x to v1.0, including prerequisites, upgrade steps, and troubleshooting guidance ([**@nbykov0**](https://github.com/nbykov0) in cozystack/website@21bbe84).
|
||||
|
||||
* **[website] Update support documentation**: Updated support documentation with current contact information and support channels ([**@xrmtech-isk**](https://github.com/xrmtech-isk) in cozystack/website#420).
|
||||
|
||||
---
|
||||
|
||||
## Contributors
|
||||
|
||||
We'd like to thank all contributors who made this release possible:
|
||||
|
||||
* [**@IvanStukov**](https://github.com/IvanStukov)
|
||||
* [**@kvaps**](https://github.com/kvaps)
|
||||
* [**@lexfrei**](https://github.com/lexfrei)
|
||||
* [**@lllamnyp**](https://github.com/lllamnyp)
|
||||
* [**@nbykov0**](https://github.com/nbykov0)
|
||||
* [**@sircthulhu**](https://github.com/sircthulhu)
|
||||
* [**@xrmtech-isk**](https://github.com/xrmtech-isk)
|
||||
|
||||
### New Contributors
|
||||
|
||||
We're excited to welcome our first-time contributors:
|
||||
|
||||
* [**@IvanStukov**](https://github.com/IvanStukov) - First contribution!
|
||||
* [**@xrmtech-isk**](https://github.com/xrmtech-isk) - First contribution!
|
||||
|
||||
---
|
||||
|
||||
**Full Changelog**: [v1.0.0-beta.3...v1.0.0-beta.4](https://github.com/cozystack/cozystack/compare/v1.0.0-beta.3...v1.0.0-beta.4)
|
||||
36
docs/changelogs/v1.0.0-beta.5.md
Normal file
36
docs/changelogs/v1.0.0-beta.5.md
Normal file
@@ -0,0 +1,36 @@
|
||||
<!--
|
||||
https://github.com/cozystack/cozystack/releases/tag/v1.0.0-beta.5
|
||||
-->
|
||||
|
||||
> **⚠️ Beta Release Warning**: This is a pre-release version intended for testing and early adoption. Breaking changes may occur before the stable v1.0.0 release.
|
||||
|
||||
## Features and Improvements
|
||||
|
||||
* **[installer] Add variant-aware templates for generic Kubernetes support**: Extended the installer chart to support generic and hosted Kubernetes deployments via the existing `cozystackOperator.variant` parameter. When using `variant=generic`, the installer now renders separate templates for the Cozystack operator, skipping Talos-specific components. This enables users to deploy Cozystack on standard Kubernetes distributions and hosted Kubernetes services, expanding platform compatibility beyond Talos Linux ([**@lexfrei**](https://github.com/lexfrei) in #2010).
|
||||
|
||||
* **[kilo] Add Cilium compatibility variant**: Added a new `cilium` variant to the kilo PackageSource that deploys kilo with the `--compatibility=cilium` flag. This enables Cilium-aware IPIP encapsulation where the outer packet IP matches the inner packet source, allowing Cilium's network policies to function correctly with kilo's WireGuard mesh networking. Users can now run kilo alongside Cilium CNI while maintaining full network policy enforcement capabilities ([**@kvaps**](https://github.com/kvaps) in #2055).
|
||||
|
||||
* **[cluster-autoscaler] Enable enforce-node-group-min-size by default**: Enabled the `enforce-node-group-min-size` option for the system cluster-autoscaler chart. This ensures node groups are always scaled up to their configured minimum size, even when current workload demands are lower, preventing unexpected scale-down below minimum thresholds and improving cluster stability for production workloads ([**@kvaps**](https://github.com/kvaps) in #2050).
|
||||
|
||||
* **[dashboard] Upgrade dashboard to version 1.4.0**: Updated the Cozystack dashboard to version 1.4.0 with new features and improvements for better user experience and cluster management capabilities ([**@sircthulhu**](https://github.com/sircthulhu) in #2051).
|
||||
|
||||
## Breaking Changes & Upgrade Notes
|
||||
|
||||
* **[vpc] Migrate subnets definition from map to array format**: Migrated VPC subnets definition from map format (`map[string]Subnet`) to array format (`[]Subnet`) with an explicit `name` field. This aligns VPC subnet definitions with the vm-instance `networks` field pattern and provides more intuitive configuration. Existing VPC deployments are automatically migrated via migration 30, which converts the subnet map to an array while preserving all existing subnet configurations and network connectivity ([**@kvaps**](https://github.com/kvaps) in #2052).
|
||||
|
||||
## Dependencies
|
||||
|
||||
* **[kilo] Update to v0.8.0**: Updated Kilo WireGuard mesh networking to v0.8.0 with performance improvements, bug fixes, and new compatibility features ([**@kvaps**](https://github.com/kvaps) in #2053).
|
||||
|
||||
* **[talm] Skip config loading for __complete command**: Fixed CLI completion behavior by skipping config loading for the `__complete` command, preventing errors during shell completion when configuration files are not available or misconfigured ([**@kitsunoff**](https://github.com/kitsunoff) in cozystack/talm#109).
|
||||
|
||||
## Contributors
|
||||
|
||||
We'd like to thank all contributors who made this release possible:
|
||||
|
||||
* [**@kitsunoff**](https://github.com/kitsunoff)
|
||||
* [**@kvaps**](https://github.com/kvaps)
|
||||
* [**@lexfrei**](https://github.com/lexfrei)
|
||||
* [**@sircthulhu**](https://github.com/sircthulhu)
|
||||
|
||||
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v1.0.0-beta.4...v1.0.0-beta.5
|
||||
46
docs/changelogs/v1.0.0-beta.6.md
Normal file
46
docs/changelogs/v1.0.0-beta.6.md
Normal file
@@ -0,0 +1,46 @@
|
||||
<!--
|
||||
https://github.com/cozystack/cozystack/releases/tag/v1.0.0-beta.6
|
||||
-->
|
||||
|
||||
> **⚠️ Beta Release Warning**: This is a pre-release version intended for testing and early adoption. Breaking changes may occur before the stable v1.0.0 release.
|
||||
|
||||
## Features and Improvements
|
||||
|
||||
* **[platform] Add cilium-kilo networking variant**: Added a new `cilium-kilo` networking variant that combines Cilium CNI with Kilo WireGuard mesh overlay. This variant enables `enable-ipip-termination` in Cilium for proper IPIP packet handling and deploys Kilo with `--compatibility=cilium` flag. Users can now select `cilium-kilo` as their networking variant during platform setup, simplifying the multi-location WireGuard setup compared to manually combining Cilium and standalone Kilo ([**@kvaps**](https://github.com/kvaps) in #2064).
|
||||
|
||||
* **[nats] Add monitoring**: Added Grafana dashboards for NATS JetStream and server metrics monitoring, along with Prometheus monitoring support with TLS-aware endpoint configuration. Includes updated image customization options (digest and full image name) and component version upgrades for the NATS exporter and utilities. Users now have full observability into NATS message broker performance and health ([**@klinch0**](https://github.com/klinch0) in #1381).
|
||||
|
||||
* **[platform] Add DNS-1035 validation for Application names**: Added dynamic DNS-1035 label validation for Application names in the Cozystack API, using `IsDNS1035Label` from `k8s.io/apimachinery`. Validation is performed at creation time and accounts for the root host length to prevent names that would exceed Kubernetes resource naming limits. This prevents creation of resources with invalid names that would fail downstream Kubernetes resource creation ([**@lexfrei**](https://github.com/lexfrei) in #1771).
|
||||
|
||||
* **[operator] Add automatic CRD installation at startup**: Added `--install-crds` flag to the Cozystack operator that installs embedded CRD manifests at startup, ensuring CRDs exist before the operator begins reconciliation. CRD manifests are now embedded in the operator binary and verified for consistency with the Helm `crds/` directory via a new CI Makefile check. This eliminates ordering issues during initial cluster setup where CRDs might not yet be present ([**@lexfrei**](https://github.com/lexfrei) in #2060).
|
||||
|
||||
## Fixes
|
||||
|
||||
* **[platform] Adopt tenant-root into cozystack-basics during migration**: Added migration 31 to adopt existing `tenant-root` Namespace and HelmRelease into the `cozystack-basics` Helm release when upgrading from v0.41.x to v1.0. Previously these resources were applied via `kubectl apply` with no Helm release tracking, causing Helm to treat them as foreign resources and potentially delete them during reconciliation. This migration ensures a safe upgrade path by annotating and labeling these resources for Helm adoption ([**@kvaps**](https://github.com/kvaps) in #2065).
|
||||
|
||||
* **[platform] Preserve tenant-root HelmRelease during migration**: Fixed a data-loss risk during migration from v0.41.x to v1.0.0-beta where the `tenant-root` HelmRelease (and the namespace it manages) could be deleted, causing tenant service outages. Added safety annotation to the HelmRelease and lookup logic to preserve current parameters during migration, preventing unwanted deletion of tenant-root resources ([**@sircthulhu**](https://github.com/sircthulhu) in #2063).
|
||||
|
||||
* **[codegen] Add gen_client to update-codegen.sh and regenerate applyconfiguration**: Fixed a build error in `pkg/generated/applyconfiguration/utils.go` caused by a reference to `testing.TypeConverter` which was removed in client-go v0.34.1. The root cause was that `hack/update-codegen.sh` never called `gen_client`, leaving the generated applyconfiguration code stale. Running the full code generation now produces a consistent and compilable codebase ([**@lexfrei**](https://github.com/lexfrei) in #2061).
|
||||
|
||||
* **[e2e] Make kubernetes test retries effective by cleaning up stale resources**: Fixed E2E test retries for the Kubernetes tenant test by adding pre-creation cleanup of backend deployment/service and NFS pod/PVC in `run-kubernetes.sh`. Previously, retries would fail immediately because stale resources from a failed attempt blocked re-creation. Also increased the tenant deployment wait timeout from 90s to 300s to handle CI resource pressure ([**@lexfrei**](https://github.com/lexfrei) in #2062).
|
||||
|
||||
## Development, Testing, and CI/CD
|
||||
|
||||
* **[e2e] Use helm install instead of kubectl apply for cozystack installation**: Replaced the pre-rendered static YAML application flow (`kubectl apply`) with direct `helm upgrade --install` of the `packages/core/installer` chart in E2E tests. Removed the CRD/operator artifact upload/download steps from the CI workflow, simplifying the pipeline. The chart with correct values is already present in the sandbox via workspace copy and `pr.patch` ([**@lexfrei**](https://github.com/lexfrei) in #2060).
|
||||
|
||||
## Documentation
|
||||
|
||||
* **[website] Improve Azure autoscaling troubleshooting guide**: Enhanced the Azure autoscaling troubleshooting documentation with serial console instructions for debugging VMSS worker nodes, a troubleshooting section for nodes stuck in maintenance mode due to invalid or missing machine config, `az vmss update --custom-data` instructions for updating machine config, and a warning that Azure does not support reading back `customData` ([**@kvaps**](https://github.com/kvaps) in cozystack/website#424).
|
||||
|
||||
* **[website] Update multi-location documentation for cilium-kilo variant**: Updated multi-location networking documentation to reflect the new integrated `cilium-kilo` variant selection during platform setup, replacing the previous manual Kilo installation and Cilium configuration steps. Added explanation of `enable-ipip-termination` and updated the troubleshooting section ([**@kvaps**](https://github.com/kvaps) in cozystack/website@02d63f0).
|
||||
|
||||
## Contributors
|
||||
|
||||
We'd like to thank all contributors who made this release possible:
|
||||
|
||||
* [**@klinch0**](https://github.com/klinch0)
|
||||
* [**@kvaps**](https://github.com/kvaps)
|
||||
* [**@lexfrei**](https://github.com/lexfrei)
|
||||
* [**@sircthulhu**](https://github.com/sircthulhu)
|
||||
|
||||
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v1.0.0-beta.5...v1.0.0-beta.6
|
||||
356
docs/proposals/affinity-class.md
Normal file
356
docs/proposals/affinity-class.md
Normal file
@@ -0,0 +1,356 @@
|
||||
# AffinityClass: Named Placement Classes for CozyStack Applications (Draft)
|
||||
|
||||
## Concept
|
||||
|
||||
Similar to StorageClass in Kubernetes, a new resource **AffinityClass** is introduced — a named abstraction over scheduling constraints. When creating an Application, the user selects an AffinityClass by name without knowing the details of the cluster topology.
|
||||
|
||||
```
|
||||
StorageClass → "which disk" → PV provisioning
|
||||
AffinityClass → "where to place" → Pod scheduling
|
||||
```
|
||||
|
||||
## Design
|
||||
|
||||
### 1. AffinityClass CRD
|
||||
|
||||
A cluster-scoped resource created by the platform administrator:
|
||||
|
||||
```yaml
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: AffinityClass
|
||||
metadata:
|
||||
name: dc1
|
||||
spec:
|
||||
# nodeSelector that MUST be present on every pod of the application.
|
||||
# Used for validation by the lineage webhook.
|
||||
nodeSelector:
|
||||
topology.kubernetes.io/zone: dc1
|
||||
```
|
||||
|
||||
```yaml
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: AffinityClass
|
||||
metadata:
|
||||
name: dc2
|
||||
spec:
|
||||
nodeSelector:
|
||||
topology.kubernetes.io/zone: dc2
|
||||
```
|
||||
|
||||
```yaml
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: AffinityClass
|
||||
metadata:
|
||||
name: gpu
|
||||
spec:
|
||||
nodeSelector:
|
||||
node.kubernetes.io/gpu: "true"
|
||||
```
|
||||
|
||||
An AffinityClass contains a `nodeSelector` — a set of key=value pairs that must be present in `pod.spec.nodeSelector` on every pod of the application. This is a contract: the chart is responsible for setting these selectors, the webhook is responsible for verifying them.
|
||||
|
||||
### 2. Tenant: Restricting Available Classes
|
||||
|
||||
Tenant gets `allowedAffinityClasses` and `defaultAffinityClass` fields:
|
||||
|
||||
```yaml
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: acme
|
||||
namespace: tenant-root
|
||||
spec:
|
||||
defaultAffinityClass: dc1 # default class for applications
|
||||
allowedAffinityClasses: # which classes are allowed
|
||||
- dc1
|
||||
- dc2
|
||||
etcd: false
|
||||
ingress: true
|
||||
monitoring: false
|
||||
```
|
||||
|
||||
These values are propagated to the `cozystack-values` Secret in the child namespace:
|
||||
|
||||
```yaml
|
||||
# Secret cozystack-values in namespace tenant-acme
|
||||
stringData:
|
||||
values.yaml: |
|
||||
_cluster:
|
||||
# ... existing cluster config
|
||||
_namespace:
|
||||
# ... existing namespace config
|
||||
defaultAffinityClass: dc1
|
||||
allowedAffinityClasses:
|
||||
- dc1
|
||||
- dc2
|
||||
```
|
||||
|
||||
### 3. Application: Selecting a Class
|
||||
|
||||
Each application can specify an `affinityClass`. If not specified, the `defaultAffinityClass` from the tenant is used:
|
||||
|
||||
```yaml
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Postgres
|
||||
metadata:
|
||||
name: main-db
|
||||
namespace: tenant-acme
|
||||
spec:
|
||||
affinityClass: dc1 # explicit selection
|
||||
replicas: 3
|
||||
```
|
||||
|
||||
```yaml
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Redis
|
||||
metadata:
|
||||
name: cache
|
||||
namespace: tenant-acme
|
||||
spec:
|
||||
# affinityClass not specified → uses tenant's defaultAffinityClass (dc1)
|
||||
replicas: 2
|
||||
```
|
||||
|
||||
### 4. How affinityClass Reaches the HelmRelease
|
||||
|
||||
When creating an Application, the API server (`pkg/registry/apps/application/rest.go`):
|
||||
|
||||
1. Extracts `affinityClass` from `spec` (or uses the default from `cozystack-values`)
|
||||
2. Records `affinityClass` as a **label on the HelmRelease**:
|
||||
```
|
||||
apps.cozystack.io/affinity-class: dc1
|
||||
```
|
||||
3. Resolves AffinityClass to `nodeSelector` and passes it into HelmRelease values as `_scheduling`:
|
||||
```yaml
|
||||
_scheduling:
|
||||
affinityClass: dc1
|
||||
nodeSelector:
|
||||
topology.kubernetes.io/zone: dc1
|
||||
```
|
||||
|
||||
### 5. How Charts Apply Scheduling
|
||||
|
||||
A helper is added to `cozy-lib`:
|
||||
|
||||
```yaml
|
||||
{{- define "cozy-lib.scheduling.nodeSelector" -}}
|
||||
{{- if .Values._scheduling }}
|
||||
{{- if .Values._scheduling.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- .Values._scheduling.nodeSelector | toYaml | nindent 2 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
```
|
||||
|
||||
Each app chart uses the helper when rendering Pod/StatefulSet/Deployment specs:
|
||||
|
||||
```yaml
|
||||
# packages/apps/postgres/templates/db.yaml
|
||||
spec:
|
||||
instances: {{ .Values.replicas }}
|
||||
{{- include "cozy-lib.scheduling.nodeSelector" . | nindent 2 }}
|
||||
```
|
||||
|
||||
```yaml
|
||||
# packages/apps/redis/templates/redis.yaml
|
||||
spec:
|
||||
replicas: {{ .Values.replicas }}
|
||||
template:
|
||||
spec:
|
||||
{{- include "cozy-lib.scheduling.nodeSelector" . | nindent 6 }}
|
||||
```
|
||||
|
||||
Charts **must** apply `_scheduling.nodeSelector`. If they don't, pods will be rejected by the webhook.
|
||||
|
||||
---
|
||||
|
||||
## Validation via Lineage Webhook
|
||||
|
||||
### Why Validation, Not Mutation
|
||||
|
||||
Mutation (injecting nodeSelector into a pod) creates problems:
|
||||
- Requires merging with existing pod nodeSelector/affinity — complex logic with edge cases
|
||||
- Operators (CNPG, Strimzi) may overwrite nodeSelector on pod restart
|
||||
- Hidden behavior: pod is created with one spec but actually runs with another
|
||||
|
||||
Validation is simpler and more reliable:
|
||||
- Webhook checks: "does this pod **have** the required nodeSelector?"
|
||||
- If not, the pod is **rejected** with a clear error message
|
||||
- The chart and operator are responsible for setting the correct spec
|
||||
|
||||
### What Already Exists in the Lineage Webhook
|
||||
|
||||
The lineage webhook (`internal/lineagecontrollerwebhook/webhook.go`) on every Pod creation:
|
||||
|
||||
1. Decodes the Pod
|
||||
2. Walks the ownership graph (`lineage.WalkOwnershipGraph`) — finds the **owning HelmRelease**
|
||||
3. Extracts labels from the HelmRelease: `apps.cozystack.io/application.kind`, `.group`, `.name`
|
||||
4. Applies these labels to the Pod
|
||||
|
||||
**Key point:** the webhook already knows which HelmRelease owns each Pod.
|
||||
|
||||
### What Is Added
|
||||
|
||||
After computing lineage labels, a validation step is added:
|
||||
|
||||
```
|
||||
Handle(pod):
|
||||
1. [existing] computeLabels(pod) → finds owning HelmRelease
|
||||
2. [existing] applyLabels(pod, labels) → mutates labels
|
||||
3. [NEW] validateAffinity(pod, hr) → checks nodeSelector
|
||||
4. Return patch or Denied
|
||||
```
|
||||
|
||||
The `validateAffinity` logic:
|
||||
|
||||
```go
|
||||
func (h *LineageControllerWebhook) validateAffinity(
|
||||
ctx context.Context,
|
||||
pod *unstructured.Unstructured,
|
||||
hr *helmv2.HelmRelease,
|
||||
) *admission.Response {
|
||||
// 1. Extract affinityClass from HelmRelease label
|
||||
affinityClassName, ok := hr.Labels["apps.cozystack.io/affinity-class"]
|
||||
if !ok {
|
||||
return nil // no affinityClass — no validation needed
|
||||
}
|
||||
|
||||
// 2. Look up AffinityClass from cache
|
||||
affinityClass, ok := h.affinityClassMap[affinityClassName]
|
||||
if !ok {
|
||||
resp := admission.Denied(fmt.Sprintf(
|
||||
"AffinityClass %q not found", affinityClassName))
|
||||
return &resp
|
||||
}
|
||||
|
||||
// 3. Check pod's nodeSelector
|
||||
podNodeSelector := extractNodeSelector(pod) // from pod.spec.nodeSelector
|
||||
for key, expected := range affinityClass.Spec.NodeSelector {
|
||||
actual, exists := podNodeSelector[key]
|
||||
if !exists || actual != expected {
|
||||
resp := admission.Denied(fmt.Sprintf(
|
||||
"pod %s/%s belongs to application with AffinityClass %q "+
|
||||
"but missing required nodeSelector %s=%s",
|
||||
pod.GetNamespace(), pod.GetName(),
|
||||
affinityClassName, key, expected))
|
||||
return &resp
|
||||
}
|
||||
}
|
||||
|
||||
return nil // validation passed
|
||||
}
|
||||
```
|
||||
|
||||
### AffinityClass Caching
|
||||
|
||||
The lineage webhook controller already caches ApplicationDefinitions (`runtimeConfig.appCRDMap`). An AffinityClass cache is added in the same way:
|
||||
|
||||
```go
|
||||
type runtimeConfig struct {
|
||||
appCRDMap map[appRef]*cozyv1alpha1.ApplicationDefinition
|
||||
affinityClassMap map[string]*cozyv1alpha1.AffinityClass // NEW
|
||||
}
|
||||
```
|
||||
|
||||
The controller adds a watch on AffinityClass:
|
||||
|
||||
```go
|
||||
func (c *LineageControllerWebhook) SetupWithManagerAsController(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&cozyv1alpha1.ApplicationDefinition{}).
|
||||
Watches(&cozyv1alpha1.AffinityClass{}, &handler.EnqueueRequestForObject{}).
|
||||
Complete(c)
|
||||
}
|
||||
```
|
||||
|
||||
When an AffinityClass changes, the cache is rebuilt.
|
||||
|
||||
---
|
||||
|
||||
## End-to-End Flow
|
||||
|
||||
```
|
||||
1. Admin creates AffinityClass "dc1" (nodeSelector: zone=dc1)
|
||||
|
||||
2. Admin creates Tenant "acme" (defaultAffinityClass: dc1, allowed: [dc1, dc2])
|
||||
→ namespace tenant-acme
|
||||
→ cozystack-values Secret with defaultAffinityClass
|
||||
|
||||
3. User creates Postgres "main-db" (affinityClass: dc1)
|
||||
→ API server checks: dc1 ∈ allowedAffinityClasses? ✓
|
||||
→ API server resolves AffinityClass → nodeSelector
|
||||
→ HelmRelease is created with:
|
||||
- label: apps.cozystack.io/affinity-class=dc1
|
||||
- values: _scheduling.nodeSelector.topology.kubernetes.io/zone=dc1
|
||||
|
||||
4. FluxCD deploys HelmRelease → Helm renders the chart
|
||||
→ Chart uses cozy-lib helper
|
||||
→ CNPG Cluster is created with nodeSelector: {zone: dc1}
|
||||
|
||||
5. CNPG operator creates Pod
|
||||
→ Pod has nodeSelector: {zone: dc1}
|
||||
|
||||
6. Lineage webhook intercepts the Pod:
|
||||
a. WalkOwnershipGraph → finds HelmRelease "main-db"
|
||||
b. HelmRelease label → affinityClass=dc1
|
||||
c. AffinityClass "dc1" → nodeSelector: {zone: dc1}
|
||||
d. Checks: pod.spec.nodeSelector contains zone=dc1? ✓
|
||||
e. Admits Pod (+ standard lineage labels)
|
||||
|
||||
7. Scheduler places the Pod on a node in dc1
|
||||
```
|
||||
|
||||
### Error Scenario (chart forgot to apply nodeSelector):
|
||||
|
||||
```
|
||||
5. CNPG operator creates Pod WITHOUT nodeSelector
|
||||
|
||||
6. Lineage webhook:
|
||||
d. Checks: pod.spec.nodeSelector contains zone=dc1? ✗
|
||||
e. REJECTS Pod:
|
||||
"pod main-db-1 belongs to application with AffinityClass dc1
|
||||
but missing required nodeSelector topology.kubernetes.io/zone=dc1"
|
||||
|
||||
7. Pod is not created. CNPG operator sees the error and retries.
|
||||
→ Chart developer gets a signal that the chart does not support scheduling.
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Code Changes
|
||||
|
||||
### New Files
|
||||
|
||||
| File | Description |
|
||||
|------------------------------------------------------|-------------------------|
|
||||
| `api/v1alpha1/affinityclass_types.go` | AffinityClass CRD types |
|
||||
| `config/crd/bases/cozystack.io_affinityclasses.yaml` | CRD manifest |
|
||||
|
||||
### Modified Files
|
||||
|
||||
| File | Change |
|
||||
|-------------------------------------------------------|-------------------------------------------------------------------|
|
||||
| `internal/lineagecontrollerwebhook/webhook.go` | Add `validateAffinity()` to `Handle()` |
|
||||
| `internal/lineagecontrollerwebhook/config.go` | Add `affinityClassMap` to `runtimeConfig` |
|
||||
| `internal/lineagecontrollerwebhook/controller.go` | Add watch on AffinityClass |
|
||||
| `pkg/registry/apps/application/rest.go` | On Create/Update: resolve affinityClass, pass to values and label |
|
||||
| `packages/apps/tenant/values.yaml` | Add `defaultAffinityClass`, `allowedAffinityClasses` |
|
||||
| `packages/apps/tenant/templates/namespace.yaml` | Propagate to cozystack-values |
|
||||
| `packages/system/tenant-rd/cozyrds/tenant.yaml` | Extend OpenAPI schema |
|
||||
| `packages/library/cozy-lib/templates/_cozyconfig.tpl` | Add `cozy-lib.scheduling.nodeSelector` helper |
|
||||
| `packages/apps/*/templates/*.yaml` | Each app chart: add helper usage |
|
||||
|
||||
---
|
||||
|
||||
## Open Questions
|
||||
|
||||
1. **AffinityClass outside Tenants**: Should AffinityClass work for applications outside tenant namespaces (system namespace)? Or only for tenant workloads?
|
||||
|
||||
2. **affinityClass validation on Application creation**: The API server should verify that the specified affinityClass exists and is included in the tenant's `allowedAffinityClasses`. Where should this be done — in the REST handler (`rest.go`) or in a separate validating webhook?
|
||||
|
||||
3. **Soft mode (warn vs deny)**: Is a mode needed where the webhook issues a warning instead of rejecting? This would simplify gradual adoption while not all charts support `_scheduling`.
|
||||
|
||||
4. **affinityClass inheritance**: If a child Tenant does not specify `defaultAffinityClass`, should it be inherited from the parent? The current `cozystack-values` architecture supports this inheritance natively.
|
||||
|
||||
5. **Multiple nodeSelectors**: Is OR-logic support needed (pod can be in dc1 OR dc2)? With `nodeSelector` this is impossible — AffinityClass would need to be extended to `nodeAffinity`. However, validation becomes significantly more complex.
|
||||
3
go.mod
3
go.mod
@@ -29,7 +29,7 @@ require (
|
||||
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b
|
||||
k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d
|
||||
sigs.k8s.io/controller-runtime v0.22.4
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.7.0
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -125,7 +125,6 @@ require (
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
|
||||
sigs.k8s.io/yaml v1.6.0 // indirect
|
||||
)
|
||||
|
||||
|
||||
5
go.sum
5
go.sum
@@ -81,7 +81,6 @@ github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI=
|
||||
github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM=
|
||||
github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
|
||||
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
@@ -324,13 +323,9 @@ sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327U
|
||||
sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8=
|
||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
|
||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
|
||||
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
||||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.7.0 h1:qPeWmscJcXP0snki5IYF79Z8xrl8ETFxgMd7wez1XkI=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.7.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
|
||||
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
|
||||
|
||||
@@ -57,23 +57,23 @@ kubectl get hr -A --no-headers | awk '$4 != "True"' | \
|
||||
done
|
||||
|
||||
echo "Collecting packages..."
|
||||
kubectl get packages -A > $REPORT_DIR/kubernetes/packages.txt 2>&1
|
||||
kubectl get packages -A --no-headers | awk '$4 != "True"' | \
|
||||
while read NAMESPACE NAME _; do
|
||||
DIR=$REPORT_DIR/kubernetes/packages/$NAMESPACE/$NAME
|
||||
kubectl get packages > $REPORT_DIR/kubernetes/packages.txt 2>&1
|
||||
kubectl get packages --no-headers | awk '$3 != "True"' | \
|
||||
while read NAME _; do
|
||||
DIR=$REPORT_DIR/kubernetes/packages/$NAME
|
||||
mkdir -p $DIR
|
||||
kubectl get package -n $NAMESPACE $NAME -o yaml > $DIR/package.yaml 2>&1
|
||||
kubectl describe package -n $NAMESPACE $NAME > $DIR/describe.txt 2>&1
|
||||
kubectl get package $NAME -o yaml > $DIR/package.yaml 2>&1
|
||||
kubectl describe package $NAME > $DIR/describe.txt 2>&1
|
||||
done
|
||||
|
||||
echo "Collecting packagesources..."
|
||||
kubectl get packagesources -A > $REPORT_DIR/kubernetes/packagesources.txt 2>&1
|
||||
kubectl get packagesources -A --no-headers | awk '$4 != "True"' | \
|
||||
while read NAMESPACE NAME _; do
|
||||
DIR=$REPORT_DIR/kubernetes/packagesources/$NAMESPACE/$NAME
|
||||
kubectl get packagesources > $REPORT_DIR/kubernetes/packagesources.txt 2>&1
|
||||
kubectl get packagesources --no-headers | awk '$3 != "True"' | \
|
||||
while read NAME _; do
|
||||
DIR=$REPORT_DIR/kubernetes/packagesources/$NAME
|
||||
mkdir -p $DIR
|
||||
kubectl get packagesource -n $NAMESPACE $NAME -o yaml > $DIR/packagesource.yaml 2>&1
|
||||
kubectl describe packagesource -n $NAMESPACE $NAME > $DIR/describe.txt 2>&1
|
||||
kubectl get packagesource $NAME -o yaml > $DIR/packagesource.yaml 2>&1
|
||||
kubectl describe packagesource $NAME > $DIR/describe.txt 2>&1
|
||||
done
|
||||
|
||||
echo "Collecting pods..."
|
||||
@@ -82,7 +82,7 @@ kubectl get pod -A --no-headers | awk '$4 !~ /Running|Succeeded|Completed/' |
|
||||
while read NAMESPACE NAME _ STATE _; do
|
||||
DIR=$REPORT_DIR/kubernetes/pods/$NAMESPACE/$NAME
|
||||
mkdir -p $DIR
|
||||
CONTAINERS=$(kubectl get pod -o jsonpath='{.spec.containers[*].name}' -n $NAMESPACE $NAME)
|
||||
CONTAINERS=$(kubectl get pod -o jsonpath='{.spec.containers[*].name} {.spec.initContainers[*].name}' -n $NAMESPACE $NAME)
|
||||
kubectl get pod -n $NAMESPACE $NAME -o yaml > $DIR/pod.yaml 2>&1
|
||||
kubectl describe pod -n $NAMESPACE $NAME > $DIR/describe.txt 2>&1
|
||||
if [ "$STATE" != "Pending" ]; then
|
||||
|
||||
@@ -83,6 +83,8 @@ modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//flux/flux-stats
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//kafka/strimzi-kafka.json
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//seaweedfs/seaweedfs.json
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//goldpinger/goldpinger.json
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//nats/nats-jetstream.json
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//nats/nats-server.json
|
||||
EOT
|
||||
|
||||
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
@test "Create DB FerretDB" {
|
||||
name='test'
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: FerretDB
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
backup:
|
||||
destinationPath: "s3://bucket/path/to/folder/"
|
||||
enabled: false
|
||||
endpointURL: "http://minio-gateway-service:9000"
|
||||
retentionPolicy: "30d"
|
||||
s3AccessKey: "<your-access-key>"
|
||||
s3SecretKey: "<your-secret-key>"
|
||||
schedule: "0 2 * * * *"
|
||||
bootstrap:
|
||||
enabled: false
|
||||
external: false
|
||||
quorum:
|
||||
maxSyncReplicas: 0
|
||||
minSyncReplicas: 0
|
||||
replicas: 2
|
||||
resources: {}
|
||||
resourcesPreset: "micro"
|
||||
size: "10Gi"
|
||||
users:
|
||||
testuser:
|
||||
password: xai7Wepo
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr ferretdb-$name --timeout=100s --for=condition=ready
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc ferretdb-$name-postgres-r -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc ferretdb-$name-postgres-ro -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc ferretdb-$name-postgres-rw -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
||||
timeout 120 sh -ec "until kubectl -n tenant-test get endpoints ferretdb-$name-postgres-r -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
# for some reason it takes longer for the read-only endpoint to be ready
|
||||
#timeout 120 sh -ec "until kubectl -n tenant-test get endpoints ferretdb-$name-postgres-ro -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
timeout 120 sh -ec "until kubectl -n tenant-test get endpoints ferretdb-$name-postgres-rw -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test delete ferretdb.apps.cozystack.io $name
|
||||
}
|
||||
74
hack/e2e-apps/harbor.bats
Normal file
74
hack/e2e-apps/harbor.bats
Normal file
@@ -0,0 +1,74 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
@test "Create Harbor" {
|
||||
name='test'
|
||||
release="harbor-$name"
|
||||
|
||||
# Clean up stale resources from previous failed runs
|
||||
kubectl -n tenant-test delete harbor.apps.cozystack.io $name 2>/dev/null || true
|
||||
kubectl -n tenant-test wait hr $release --timeout=60s --for=delete 2>/dev/null || true
|
||||
|
||||
kubectl apply -f- <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Harbor
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
host: ""
|
||||
storageClass: ""
|
||||
core:
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
registry:
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
jobservice:
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
trivy:
|
||||
enabled: false
|
||||
size: 2Gi
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
database:
|
||||
size: 2Gi
|
||||
replicas: 1
|
||||
redis:
|
||||
size: 1Gi
|
||||
replicas: 1
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr $release --timeout=60s --for=condition=ready
|
||||
|
||||
# Wait for COSI to provision bucket
|
||||
kubectl -n tenant-test wait bucketclaims.objectstorage.k8s.io $release-registry \
|
||||
--timeout=300s --for=jsonpath='{.status.bucketReady}'=true
|
||||
kubectl -n tenant-test wait bucketaccesses.objectstorage.k8s.io $release-registry \
|
||||
--timeout=60s --for=jsonpath='{.status.accessGranted}'=true
|
||||
|
||||
kubectl -n tenant-test wait hr $release-system --timeout=600s --for=condition=ready || {
|
||||
echo "=== HelmRelease status ==="
|
||||
kubectl -n tenant-test get hr $release-system -o yaml 2>&1 || true
|
||||
echo "=== Pods ==="
|
||||
kubectl -n tenant-test get pods 2>&1 || true
|
||||
echo "=== Events ==="
|
||||
kubectl -n tenant-test get events --sort-by='.lastTimestamp' 2>&1 | tail -30 || true
|
||||
echo "=== ExternalArtifact ==="
|
||||
kubectl -n cozy-system get externalartifact cozystack-harbor-application-default-harbor-system -o yaml 2>&1 || true
|
||||
echo "=== BucketClaim status ==="
|
||||
kubectl -n tenant-test get bucketclaims.objectstorage.k8s.io $release-registry -o yaml 2>&1 || true
|
||||
echo "=== BucketAccess status ==="
|
||||
kubectl -n tenant-test get bucketaccesses.objectstorage.k8s.io $release-registry -o yaml 2>&1 || true
|
||||
echo "=== BucketAccess Secret ==="
|
||||
kubectl -n tenant-test get secret $release-registry-bucket -o jsonpath='{.data.BucketInfo}' 2>&1 | base64 -d 2>&1 || true
|
||||
false
|
||||
}
|
||||
kubectl -n tenant-test wait deploy $release-core --timeout=120s --for=condition=available
|
||||
kubectl -n tenant-test wait deploy $release-registry --timeout=120s --for=condition=available
|
||||
kubectl -n tenant-test wait deploy $release-portal --timeout=120s --for=condition=available
|
||||
kubectl -n tenant-test get secret $release-credentials -o jsonpath='{.data.admin-password}' | base64 --decode | grep -q '.'
|
||||
kubectl -n tenant-test get secret $release-credentials -o jsonpath='{.data.url}' | base64 --decode | grep -q 'https://'
|
||||
kubectl -n tenant-test get svc $release -o jsonpath='{.spec.ports[0].port}' | grep -q '80'
|
||||
kubectl -n tenant-test delete harbor.apps.cozystack.io $name
|
||||
}
|
||||
46
hack/e2e-apps/mariadb.bats
Normal file
46
hack/e2e-apps/mariadb.bats
Normal file
@@ -0,0 +1,46 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
@test "Create DB MariaDB" {
|
||||
name='test'
|
||||
kubectl apply -f- <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: MariaDB
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
external: false
|
||||
size: 10Gi
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
users:
|
||||
testuser:
|
||||
maxUserConnections: 1000
|
||||
password: xai7Wepo
|
||||
databases:
|
||||
testdb:
|
||||
roles:
|
||||
admin:
|
||||
- testuser
|
||||
backup:
|
||||
enabled: false
|
||||
s3Region: us-east-1
|
||||
s3Bucket: s3.example.org/mariadb-backups
|
||||
schedule: "0 2 * * *"
|
||||
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr mariadb-$name --timeout=30s --for=condition=ready
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get svc mariadb-$name -o jsonpath='{.spec.ports[0].port}' | grep -q '3306'; do sleep 10; done"
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints mariadb-$name -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test wait statefulset.apps/mariadb-$name --timeout=110s --for=jsonpath='{.status.replicas}'=2
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get svc mariadb-$name-metrics -o jsonpath='{.spec.ports[0].port}' | grep -q '9104'; do sleep 10; done"
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get endpoints mariadb-$name-metrics -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test wait deployment.apps/mariadb-$name-metrics --timeout=90s --for=jsonpath='{.status.replicas}'=1
|
||||
kubectl -n tenant-test delete mariadbs.apps.cozystack.io $name
|
||||
}
|
||||
@@ -13,7 +13,7 @@ spec:
|
||||
size: 10Gi
|
||||
replicas: 1
|
||||
storageClass: ""
|
||||
resourcesPreset: "nano"
|
||||
resourcesPreset: "small"
|
||||
users:
|
||||
testuser:
|
||||
password: xai7Wepo
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
@test "Create DB MySQL" {
|
||||
name='test'
|
||||
kubectl apply -f- <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: MySQL
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
external: false
|
||||
size: 10Gi
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
users:
|
||||
testuser:
|
||||
maxUserConnections: 1000
|
||||
password: xai7Wepo
|
||||
databases:
|
||||
testdb:
|
||||
roles:
|
||||
admin:
|
||||
- testuser
|
||||
backup:
|
||||
enabled: false
|
||||
s3Region: us-east-1
|
||||
s3Bucket: s3.example.org/postgres-backups
|
||||
schedule: "0 2 * * *"
|
||||
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr mysql-$name --timeout=30s --for=condition=ready
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get svc mysql-$name -o jsonpath='{.spec.ports[0].port}' | grep -q '3306'; do sleep 10; done"
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints mysql-$name -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test wait statefulset.apps/mysql-$name --timeout=110s --for=jsonpath='{.status.replicas}'=2
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get svc mysql-$name-metrics -o jsonpath='{.spec.ports[0].port}' | grep -q '9104'; do sleep 10; done"
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get endpoints mysql-$name-metrics -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test wait deployment.apps/mysql-$name-metrics --timeout=90s --for=jsonpath='{.status.replicas}'=1
|
||||
kubectl -n tenant-test delete mysqls.apps.cozystack.io $name
|
||||
}
|
||||
25
hack/e2e-apps/qdrant.bats
Executable file
25
hack/e2e-apps/qdrant.bats
Executable file
@@ -0,0 +1,25 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
@test "Create Qdrant" {
|
||||
name='test'
|
||||
kubectl apply -f- <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Qdrant
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
replicas: 1
|
||||
size: 10Gi
|
||||
storageClass: ""
|
||||
resourcesPreset: "nano"
|
||||
resources: {}
|
||||
external: false
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr qdrant-$name --timeout=60s --for=condition=ready
|
||||
kubectl -n tenant-test wait hr qdrant-$name-system --timeout=120s --for=condition=ready
|
||||
kubectl -n tenant-test wait sts qdrant-$name --timeout=90s --for=jsonpath='{.status.readyReplicas}'=1
|
||||
kubectl -n tenant-test wait pvc qdrant-storage-qdrant-$name-0 --timeout=50s --for=jsonpath='{.status.phase}'=Bound
|
||||
kubectl -n tenant-test delete qdrant.apps.cozystack.io $name
|
||||
}
|
||||
@@ -56,7 +56,7 @@ spec:
|
||||
gpus: []
|
||||
instanceType: u1.medium
|
||||
maxReplicas: 10
|
||||
minReplicas: 0
|
||||
minReplicas: 2
|
||||
roles:
|
||||
- ingress-nginx
|
||||
storageClass: replicated
|
||||
@@ -80,10 +80,10 @@ EOF
|
||||
# Wait for the machine deployment to scale to 2 replicas (timeout after 1 minute)
|
||||
kubectl wait machinedeployment kubernetes-${test_name}-md0 -n tenant-test --timeout=1m --for=jsonpath='{.status.replicas}'=2
|
||||
# Get the admin kubeconfig and save it to a file
|
||||
kubectl get secret kubernetes-${test_name}-admin-kubeconfig -ojsonpath='{.data.super-admin\.conf}' -n tenant-test | base64 -d > tenantkubeconfig-${test_name}
|
||||
kubectl get secret kubernetes-${test_name}-admin-kubeconfig -ojsonpath='{.data.super-admin\.conf}' -n tenant-test | base64 -d > "tenantkubeconfig-${test_name}"
|
||||
|
||||
# Update the kubeconfig to use localhost for the API server
|
||||
yq -i ".clusters[0].cluster.server = \"https://localhost:${port}\"" tenantkubeconfig-${test_name}
|
||||
yq -i ".clusters[0].cluster.server = \"https://localhost:${port}\"" "tenantkubeconfig-${test_name}"
|
||||
|
||||
|
||||
# Set up port forwarding to the Kubernetes API server for a 200 second timeout
|
||||
@@ -98,8 +98,8 @@ EOF
|
||||
done
|
||||
'
|
||||
# Verify the nodes are ready
|
||||
kubectl --kubeconfig tenantkubeconfig-${test_name} wait node --all --timeout=2m --for=condition=Ready
|
||||
kubectl --kubeconfig tenantkubeconfig-${test_name} get nodes -o wide
|
||||
kubectl --kubeconfig "tenantkubeconfig-${test_name}" wait node --all --timeout=2m --for=condition=Ready
|
||||
kubectl --kubeconfig "tenantkubeconfig-${test_name}" get nodes -o wide
|
||||
|
||||
# Verify the kubelet version matches what we expect
|
||||
versions=$(kubectl --kubeconfig "tenantkubeconfig-${test_name}" \
|
||||
@@ -125,15 +125,21 @@ EOF
|
||||
fi
|
||||
|
||||
|
||||
kubectl --kubeconfig tenantkubeconfig-${test_name} apply -f - <<EOF
|
||||
kubectl --kubeconfig "tenantkubeconfig-${test_name}" apply -f - <<EOF
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: tenant-test
|
||||
EOF
|
||||
|
||||
# Clean up backend resources from any previous failed attempt
|
||||
kubectl delete deployment --kubeconfig "tenantkubeconfig-${test_name}" "${test_name}-backend" \
|
||||
-n tenant-test --ignore-not-found --timeout=60s || true
|
||||
kubectl delete service --kubeconfig "tenantkubeconfig-${test_name}" "${test_name}-backend" \
|
||||
-n tenant-test --ignore-not-found --timeout=60s || true
|
||||
|
||||
# Backend 1
|
||||
kubectl apply --kubeconfig tenantkubeconfig-${test_name} -f- <<EOF
|
||||
kubectl apply --kubeconfig "tenantkubeconfig-${test_name}" -f- <<EOF
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
@@ -165,7 +171,7 @@ spec:
|
||||
EOF
|
||||
|
||||
# LoadBalancer Service
|
||||
kubectl apply --kubeconfig tenantkubeconfig-${test_name} -f- <<EOF
|
||||
kubectl apply --kubeconfig "tenantkubeconfig-${test_name}" -f- <<EOF
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
@@ -182,7 +188,7 @@ spec:
|
||||
EOF
|
||||
|
||||
# Wait for pods readiness
|
||||
kubectl wait deployment --kubeconfig tenantkubeconfig-${test_name} ${test_name}-backend -n tenant-test --for=condition=Available --timeout=90s
|
||||
kubectl wait deployment --kubeconfig "tenantkubeconfig-${test_name}" "${test_name}-backend" -n tenant-test --for=condition=Available --timeout=300s
|
||||
|
||||
# Wait for LoadBalancer to be provisioned (IP or hostname)
|
||||
timeout 90 sh -ec "
|
||||
@@ -193,7 +199,7 @@ EOF
|
||||
"
|
||||
|
||||
LB_ADDR=$(
|
||||
kubectl get svc --kubeconfig tenantkubeconfig-${test_name} "${test_name}-backend" \
|
||||
kubectl get svc --kubeconfig "tenantkubeconfig-${test_name}" "${test_name}-backend" \
|
||||
-n tenant-test \
|
||||
-o jsonpath='{.status.loadBalancer.ingress[0].ip}{.status.loadBalancer.ingress[0].hostname}'
|
||||
)
|
||||
@@ -215,15 +221,79 @@ fi
|
||||
fi
|
||||
|
||||
# Cleanup
|
||||
kubectl delete deployment --kubeconfig tenantkubeconfig-${test_name} "${test_name}-backend" -n tenant-test
|
||||
kubectl delete service --kubeconfig tenantkubeconfig-${test_name} "${test_name}-backend" -n tenant-test
|
||||
kubectl delete deployment --kubeconfig "tenantkubeconfig-${test_name}" "${test_name}-backend" -n tenant-test
|
||||
kubectl delete service --kubeconfig "tenantkubeconfig-${test_name}" "${test_name}-backend" -n tenant-test
|
||||
|
||||
# Clean up NFS test resources from any previous failed attempt
|
||||
kubectl --kubeconfig "tenantkubeconfig-${test_name}" delete pod nfs-test-pod \
|
||||
-n tenant-test --ignore-not-found --timeout=60s || true
|
||||
kubectl --kubeconfig "tenantkubeconfig-${test_name}" delete pvc nfs-test-pvc \
|
||||
-n tenant-test --ignore-not-found --timeout=60s || true
|
||||
|
||||
# Test RWX NFS mount in tenant cluster (uses kubevirt CSI driver with RWX support)
|
||||
kubectl --kubeconfig "tenantkubeconfig-${test_name}" apply -f - <<EOF
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: nfs-test-pvc
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
storageClassName: kubevirt
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
EOF
|
||||
|
||||
# Wait for PVC to be bound
|
||||
kubectl --kubeconfig "tenantkubeconfig-${test_name}" wait pvc nfs-test-pvc -n tenant-test --timeout=2m --for=jsonpath='{.status.phase}'=Bound
|
||||
|
||||
# Create Pod that writes and reads data from NFS volume
|
||||
kubectl --kubeconfig "tenantkubeconfig-${test_name}" apply -f - <<EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nfs-test-pod
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
containers:
|
||||
- name: test
|
||||
image: busybox
|
||||
command: ["sh", "-c", "echo 'nfs-mount-ok' > /data/test.txt && cat /data/test.txt"]
|
||||
volumeMounts:
|
||||
- name: nfs-vol
|
||||
mountPath: /data
|
||||
volumes:
|
||||
- name: nfs-vol
|
||||
persistentVolumeClaim:
|
||||
claimName: nfs-test-pvc
|
||||
restartPolicy: Never
|
||||
EOF
|
||||
|
||||
# Wait for Pod to complete successfully
|
||||
kubectl --kubeconfig "tenantkubeconfig-${test_name}" wait pod nfs-test-pod -n tenant-test --timeout=5m --for=jsonpath='{.status.phase}'=Succeeded
|
||||
|
||||
# Verify NFS data integrity
|
||||
nfs_result=$(kubectl --kubeconfig "tenantkubeconfig-${test_name}" logs nfs-test-pod -n tenant-test)
|
||||
if [ "$nfs_result" != "nfs-mount-ok" ]; then
|
||||
echo "NFS mount test failed: expected 'nfs-mount-ok', got '$nfs_result'" >&2
|
||||
kubectl --kubeconfig "tenantkubeconfig-${test_name}" delete pod nfs-test-pod -n tenant-test --wait=false 2>/dev/null || true
|
||||
kubectl --kubeconfig "tenantkubeconfig-${test_name}" delete pvc nfs-test-pvc -n tenant-test --wait=false 2>/dev/null || true
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Cleanup NFS test resources in tenant cluster
|
||||
kubectl --kubeconfig "tenantkubeconfig-${test_name}" delete pod nfs-test-pod -n tenant-test --wait
|
||||
kubectl --kubeconfig "tenantkubeconfig-${test_name}" delete pvc nfs-test-pvc -n tenant-test
|
||||
|
||||
# Wait for all machine deployment replicas to be ready (timeout after 10 minutes)
|
||||
kubectl wait machinedeployment kubernetes-${test_name}-md0 -n tenant-test --timeout=10m --for=jsonpath='{.status.v1beta2.readyReplicas}'=2
|
||||
|
||||
for component in cilium coredns csi ingress-nginx vsnap-crd; do
|
||||
for component in cilium coredns csi vsnap-crd; do
|
||||
kubectl wait hr kubernetes-${test_name}-${component} -n tenant-test --timeout=1m --for=condition=ready
|
||||
done
|
||||
kubectl wait hr kubernetes-${test_name}-ingress-nginx -n tenant-test --timeout=5m --for=condition=ready
|
||||
|
||||
# Clean up by deleting the Kubernetes resource
|
||||
kubectl -n tenant-test delete kuberneteses.apps.cozystack.io $test_name
|
||||
|
||||
@@ -1,45 +0,0 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
@test "Create a Virtual Machine" {
|
||||
name='test'
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: VirtualMachine
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
external: false
|
||||
externalMethod: PortList
|
||||
externalPorts:
|
||||
- 22
|
||||
instanceType: "u1.medium"
|
||||
instanceProfile: ubuntu
|
||||
systemDisk:
|
||||
image: ubuntu
|
||||
storage: 5Gi
|
||||
storageClass: replicated
|
||||
gpus: []
|
||||
resources: {}
|
||||
sshKeys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF
|
||||
test@test
|
||||
cloudInit: |
|
||||
#cloud-config
|
||||
users:
|
||||
- name: test
|
||||
shell: /bin/bash
|
||||
sudo: ['ALL=(ALL) NOPASSWD: ALL']
|
||||
groups: sudo
|
||||
ssh_authorized_keys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF test@test
|
||||
cloudInitSeed: ""
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr virtual-machine-$name --timeout=10s --for=condition=ready
|
||||
kubectl -n tenant-test wait dv virtual-machine-$name --timeout=150s --for=condition=ready
|
||||
kubectl -n tenant-test wait pvc virtual-machine-$name --timeout=100s --for=jsonpath='{.status.phase}'=Bound
|
||||
kubectl -n tenant-test wait vm virtual-machine-$name --timeout=100s --for=condition=ready
|
||||
timeout 120 sh -ec "until kubectl -n tenant-test get vmi virtual-machine-$name -o jsonpath='{.status.interfaces[0].ipAddress}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test delete virtualmachines.apps.cozystack.io $name
|
||||
}
|
||||
@@ -1,25 +1,22 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
@test "Required installer assets exist" {
|
||||
if [ ! -f _out/assets/cozystack-crds.yaml ]; then
|
||||
echo "Missing: _out/assets/cozystack-crds.yaml" >&2
|
||||
exit 1
|
||||
fi
|
||||
if [ ! -f _out/assets/cozystack-operator.yaml ]; then
|
||||
echo "Missing: _out/assets/cozystack-operator.yaml" >&2
|
||||
@test "Required installer chart exists" {
|
||||
if [ ! -f packages/core/installer/Chart.yaml ]; then
|
||||
echo "Missing: packages/core/installer/Chart.yaml" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
@test "Install Cozystack" {
|
||||
# Create namespace
|
||||
kubectl create namespace cozy-system --dry-run=client -o yaml | kubectl apply -f -
|
||||
# Install cozy-installer chart (CRDs from crds/ are applied automatically)
|
||||
helm upgrade installer packages/core/installer \
|
||||
--install \
|
||||
--namespace cozy-system \
|
||||
--create-namespace \
|
||||
--wait \
|
||||
--timeout 2m
|
||||
|
||||
# Apply installer manifests (CRDs + operator)
|
||||
kubectl apply -f _out/assets/cozystack-crds.yaml
|
||||
kubectl apply -f _out/assets/cozystack-operator.yaml
|
||||
|
||||
# Wait for the operator deployment to become available
|
||||
# Verify the operator deployment is available
|
||||
kubectl wait deployment/cozystack-operator -n cozy-system --timeout=1m --for=condition=Available
|
||||
|
||||
# Create platform Package with isp-full variant
|
||||
|
||||
@@ -19,12 +19,13 @@ set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
|
||||
CODEGEN_PKG=${CODEGEN_PKG:-$(cd "${SCRIPT_ROOT}"; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)}
|
||||
CODEGEN_PKG=${CODEGEN_PKG:-~/go/pkg/mod/k8s.io/code-generator@v0.34.1}
|
||||
API_KNOWN_VIOLATIONS_DIR="${API_KNOWN_VIOLATIONS_DIR:-"${SCRIPT_ROOT}/api/api-rules"}"
|
||||
UPDATE_API_KNOWN_VIOLATIONS="${UPDATE_API_KNOWN_VIOLATIONS:-true}"
|
||||
CONTROLLER_GEN="go run sigs.k8s.io/controller-tools/cmd/controller-gen@v0.16.4"
|
||||
TMPDIR=$(mktemp -d)
|
||||
OPERATOR_CRDDIR=packages/core/installer/definitions
|
||||
OPERATOR_CRDDIR=packages/core/installer/crds
|
||||
OPERATOR_EMBEDDIR=internal/crdinstall/manifests
|
||||
COZY_CONTROLLER_CRDDIR=packages/system/cozystack-controller/definitions
|
||||
COZY_RD_CRDDIR=packages/system/application-definition-crd/definition
|
||||
BACKUPS_CORE_CRDDIR=packages/system/backup-controller/definitions
|
||||
@@ -34,7 +35,7 @@ trap 'rm -rf ${TMPDIR}' EXIT
|
||||
|
||||
source "${CODEGEN_PKG}/kube_codegen.sh"
|
||||
|
||||
THIS_PKG="k8s.io/sample-apiserver"
|
||||
THIS_PKG="github.com/cozystack/cozystack"
|
||||
|
||||
kube::codegen::gen_helpers \
|
||||
--boilerplate "${SCRIPT_ROOT}/hack/boilerplate.go.txt" \
|
||||
@@ -60,12 +61,22 @@ kube::codegen::gen_openapi \
|
||||
--boilerplate "${SCRIPT_ROOT}/hack/boilerplate.go.txt" \
|
||||
"${SCRIPT_ROOT}/pkg/apis"
|
||||
|
||||
kube::codegen::gen_client \
|
||||
--with-applyconfig \
|
||||
--output-dir "${SCRIPT_ROOT}/pkg/generated" \
|
||||
--output-pkg "${THIS_PKG}/pkg/generated" \
|
||||
--boilerplate "${SCRIPT_ROOT}/hack/boilerplate.go.txt" \
|
||||
"${SCRIPT_ROOT}/pkg/apis"
|
||||
|
||||
$CONTROLLER_GEN object:headerFile="hack/boilerplate.go.txt" paths="./api/..."
|
||||
$CONTROLLER_GEN rbac:roleName=manager-role crd paths="./api/..." output:crd:artifacts:config=${TMPDIR}
|
||||
|
||||
mv ${TMPDIR}/cozystack.io_packages.yaml ${OPERATOR_CRDDIR}/cozystack.io_packages.yaml
|
||||
mv ${TMPDIR}/cozystack.io_packagesources.yaml ${OPERATOR_CRDDIR}/cozystack.io_packagesources.yaml
|
||||
|
||||
cp ${OPERATOR_CRDDIR}/cozystack.io_packages.yaml ${OPERATOR_EMBEDDIR}/cozystack.io_packages.yaml
|
||||
cp ${OPERATOR_CRDDIR}/cozystack.io_packagesources.yaml ${OPERATOR_EMBEDDIR}/cozystack.io_packagesources.yaml
|
||||
|
||||
mv ${TMPDIR}/cozystack.io_applicationdefinitions.yaml \
|
||||
${COZY_RD_CRDDIR}/cozystack.io_applicationdefinitions.yaml
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ set -xe
|
||||
version=${VERSION:-$(git describe --tags)}
|
||||
|
||||
gh release upload --clobber $version _out/assets/cozystack-crds.yaml
|
||||
gh release upload --clobber $version _out/assets/cozystack-operator.yaml
|
||||
gh release upload --clobber $version _out/assets/cozystack-operator-talos.yaml
|
||||
gh release upload --clobber $version _out/assets/cozystack-operator-generic.yaml
|
||||
gh release upload --clobber $version _out/assets/cozystack-operator-hosted.yaml
|
||||
gh release upload --clobber $version _out/assets/metal-amd64.iso
|
||||
|
||||
@@ -141,13 +141,13 @@ func TestResolveBackupClass(t *testing.T) {
|
||||
StrategyRef: corev1.TypedLocalObjectReference{
|
||||
APIGroup: stringPtr("strategy.backups.cozystack.io"),
|
||||
Kind: "Velero",
|
||||
Name: "velero-strategy-mysql",
|
||||
Name: "velero-strategy-mariadb",
|
||||
},
|
||||
Application: backupsv1alpha1.ApplicationSelector{
|
||||
Kind: "MySQL",
|
||||
Kind: "MariaDB",
|
||||
},
|
||||
Parameters: map[string]string{
|
||||
"backupStorageLocationName": "mysql-storage",
|
||||
"backupStorageLocationName": "mariadb-storage",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -169,7 +169,7 @@ func TestResolveBackupClass(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "successful resolution - matches MySQL strategy with explicit apiGroup",
|
||||
name: "successful resolution - matches MariaDB strategy with explicit apiGroup",
|
||||
backupClass: &backupsv1alpha1.BackupClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "velero",
|
||||
@@ -180,14 +180,14 @@ func TestResolveBackupClass(t *testing.T) {
|
||||
StrategyRef: corev1.TypedLocalObjectReference{
|
||||
APIGroup: stringPtr("strategy.backups.cozystack.io"),
|
||||
Kind: "Velero",
|
||||
Name: "velero-strategy-mysql",
|
||||
Name: "velero-strategy-mariadb",
|
||||
},
|
||||
Application: backupsv1alpha1.ApplicationSelector{
|
||||
APIGroup: stringPtr("apps.cozystack.io"),
|
||||
Kind: "MySQL",
|
||||
Kind: "MariaDB",
|
||||
},
|
||||
Parameters: map[string]string{
|
||||
"backupStorageLocationName": "mysql-storage",
|
||||
"backupStorageLocationName": "mariadb-storage",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -195,18 +195,18 @@ func TestResolveBackupClass(t *testing.T) {
|
||||
},
|
||||
applicationRef: corev1.TypedLocalObjectReference{
|
||||
APIGroup: stringPtr("apps.cozystack.io"),
|
||||
Kind: "MySQL",
|
||||
Name: "mysql1",
|
||||
Kind: "MariaDB",
|
||||
Name: "mariadb1",
|
||||
},
|
||||
backupClassName: "velero",
|
||||
wantErr: false,
|
||||
expectedStrategyRef: &corev1.TypedLocalObjectReference{
|
||||
APIGroup: stringPtr("strategy.backups.cozystack.io"),
|
||||
Kind: "Velero",
|
||||
Name: "velero-strategy-mysql",
|
||||
Name: "velero-strategy-mariadb",
|
||||
},
|
||||
expectedParams: map[string]string{
|
||||
"backupStorageLocationName": "mysql-storage",
|
||||
"backupStorageLocationName": "mariadb-storage",
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/dynamic"
|
||||
@@ -20,8 +21,8 @@ import (
|
||||
backupsv1alpha1 "github.com/cozystack/cozystack/api/backups/v1alpha1"
|
||||
)
|
||||
|
||||
// BackupVeleroStrategyReconciler reconciles BackupJob with a strategy referencing
|
||||
// Velero.strategy.backups.cozystack.io objects.
|
||||
// BackupJobReconciler reconciles BackupJob with a strategy from the
|
||||
// strategy.backups.cozystack.io API group.
|
||||
type BackupJobReconciler struct {
|
||||
client.Client
|
||||
dynamic.Interface
|
||||
@@ -115,3 +116,27 @@ func (r *BackupJobReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
For(&backupsv1alpha1.BackupJob{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (r *BackupJobReconciler) markBackupJobFailed(ctx context.Context, backupJob *backupsv1alpha1.BackupJob, message string) (ctrl.Result, error) {
|
||||
logger := getLogger(ctx)
|
||||
now := metav1.Now()
|
||||
backupJob.Status.CompletedAt = &now
|
||||
backupJob.Status.Phase = backupsv1alpha1.BackupJobPhaseFailed
|
||||
backupJob.Status.Message = message
|
||||
|
||||
// Add condition
|
||||
backupJob.Status.Conditions = append(backupJob.Status.Conditions, metav1.Condition{
|
||||
Type: "Ready",
|
||||
Status: metav1.ConditionFalse,
|
||||
Reason: "BackupFailed",
|
||||
Message: message,
|
||||
LastTransitionTime: now,
|
||||
})
|
||||
|
||||
if err := r.Status().Update(ctx, backupJob); err != nil {
|
||||
logger.Error(err, "failed to update BackupJob status to Failed")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
logger.Debug("BackupJob failed", "message", message)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
@@ -67,8 +67,8 @@ func TestBackupJob(t *testing.T) {
|
||||
Spec: backupsv1alpha1.PlanSpec{
|
||||
ApplicationRef: corev1.TypedLocalObjectReference{
|
||||
// No APIGroup specified
|
||||
Kind: "MySQL",
|
||||
Name: "mysql1",
|
||||
Kind: "MariaDB",
|
||||
Name: "mariadb1",
|
||||
},
|
||||
BackupClassName: "velero",
|
||||
Schedule: backupsv1alpha1.PlanSchedule{
|
||||
|
||||
@@ -14,3 +14,8 @@ func (r *BackupJobReconciler) reconcileJob(ctx context.Context, j *backupsv1alph
|
||||
_ = resolved // Use resolved BackupClass parameters when implementing your job strategy
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *RestoreJobReconciler) reconcileJobRestore(ctx context.Context, restoreJob *backupsv1alpha1.RestoreJob, backup *backupsv1alpha1.Backup) (ctrl.Result, error) {
|
||||
_ = log.FromContext(ctx)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
140
internal/backupcontroller/restorejob_controller.go
Normal file
140
internal/backupcontroller/restorejob_controller.go
Normal file
@@ -0,0 +1,140 @@
|
||||
package backupcontroller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/record"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
strategyv1alpha1 "github.com/cozystack/cozystack/api/backups/strategy/v1alpha1"
|
||||
backupsv1alpha1 "github.com/cozystack/cozystack/api/backups/v1alpha1"
|
||||
)
|
||||
|
||||
// RestoreJobReconciler reconciles RestoreJob objects.
|
||||
// It routes RestoreJobs to strategy-specific handlers based on the strategy
|
||||
// referenced in the Backup that the RestoreJob is restoring from.
|
||||
type RestoreJobReconciler struct {
|
||||
client.Client
|
||||
dynamic.Interface
|
||||
meta.RESTMapper
|
||||
Scheme *runtime.Scheme
|
||||
Recorder record.EventRecorder
|
||||
}
|
||||
|
||||
func (r *RestoreJobReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
logger := log.FromContext(ctx)
|
||||
logger.Info("reconciling RestoreJob", "namespace", req.Namespace, "name", req.Name)
|
||||
|
||||
restoreJob := &backupsv1alpha1.RestoreJob{}
|
||||
err := r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: req.Name}, restoreJob)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
logger.V(1).Info("RestoreJob not found, skipping")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
logger.Error(err, "failed to get RestoreJob")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// If already completed, no need to reconcile
|
||||
if restoreJob.Status.Phase == backupsv1alpha1.RestoreJobPhaseSucceeded ||
|
||||
restoreJob.Status.Phase == backupsv1alpha1.RestoreJobPhaseFailed {
|
||||
logger.V(1).Info("RestoreJob already completed, skipping", "phase", restoreJob.Status.Phase)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// Step 1: Fetch the referenced Backup
|
||||
backup := &backupsv1alpha1.Backup{}
|
||||
backupKey := types.NamespacedName{Namespace: req.Namespace, Name: restoreJob.Spec.BackupRef.Name}
|
||||
if err := r.Get(ctx, backupKey, backup); err != nil {
|
||||
return r.markRestoreJobFailed(ctx, restoreJob, fmt.Sprintf("failed to get Backup: %v", err))
|
||||
}
|
||||
|
||||
// Step 2: Determine effective strategy from backup.spec.strategyRef
|
||||
if backup.Spec.StrategyRef.APIGroup == nil {
|
||||
return r.markRestoreJobFailed(ctx, restoreJob, "Backup has nil StrategyRef.APIGroup")
|
||||
}
|
||||
|
||||
if *backup.Spec.StrategyRef.APIGroup != strategyv1alpha1.GroupVersion.Group {
|
||||
return r.markRestoreJobFailed(ctx, restoreJob,
|
||||
fmt.Sprintf("StrategyRef.APIGroup doesn't match: %s", *backup.Spec.StrategyRef.APIGroup))
|
||||
}
|
||||
|
||||
logger.Info("processing RestoreJob", "restorejob", restoreJob.Name, "backup", backup.Name, "strategyKind", backup.Spec.StrategyRef.Kind)
|
||||
switch backup.Spec.StrategyRef.Kind {
|
||||
case strategyv1alpha1.JobStrategyKind:
|
||||
return r.reconcileJobRestore(ctx, restoreJob, backup)
|
||||
case strategyv1alpha1.VeleroStrategyKind:
|
||||
return r.reconcileVeleroRestore(ctx, restoreJob, backup)
|
||||
default:
|
||||
return r.markRestoreJobFailed(ctx, restoreJob, fmt.Sprintf("StrategyRef.Kind not supported: %s", backup.Spec.StrategyRef.Kind))
|
||||
}
|
||||
}
|
||||
|
||||
// SetupWithManager registers our controller with the Manager and sets up watches.
|
||||
func (r *RestoreJobReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
cfg := mgr.GetConfig()
|
||||
var err error
|
||||
if r.Interface, err = dynamic.NewForConfig(cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
var h *http.Client
|
||||
if h, err = rest.HTTPClientFor(cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
if r.RESTMapper, err = apiutil.NewDynamicRESTMapper(cfg, h); err != nil {
|
||||
return err
|
||||
}
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&backupsv1alpha1.RestoreJob{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
// getTargetApplicationRef determines the effective target application reference.
|
||||
// According to DESIGN.md, if spec.targetApplicationRef is omitted, drivers SHOULD
|
||||
// restore into backup.spec.applicationRef.
|
||||
// The returned reference is normalized to ensure APIGroup has a default value.
|
||||
func (r *RestoreJobReconciler) getTargetApplicationRef(restoreJob *backupsv1alpha1.RestoreJob, backup *backupsv1alpha1.Backup) corev1.TypedLocalObjectReference {
|
||||
if restoreJob.Spec.TargetApplicationRef != nil {
|
||||
return backupsv1alpha1.NormalizeApplicationRef(*restoreJob.Spec.TargetApplicationRef)
|
||||
}
|
||||
return backup.Spec.ApplicationRef
|
||||
}
|
||||
|
||||
// markRestoreJobFailed updates the RestoreJob status to Failed with the given message.
|
||||
func (r *RestoreJobReconciler) markRestoreJobFailed(ctx context.Context, restoreJob *backupsv1alpha1.RestoreJob, message string) (ctrl.Result, error) {
|
||||
logger := getLogger(ctx)
|
||||
now := metav1.Now()
|
||||
restoreJob.Status.CompletedAt = &now
|
||||
restoreJob.Status.Phase = backupsv1alpha1.RestoreJobPhaseFailed
|
||||
restoreJob.Status.Message = message
|
||||
|
||||
// Add condition
|
||||
restoreJob.Status.Conditions = append(restoreJob.Status.Conditions, metav1.Condition{
|
||||
Type: "Ready",
|
||||
Status: metav1.ConditionFalse,
|
||||
Reason: "RestoreFailed",
|
||||
Message: message,
|
||||
LastTransitionTime: now,
|
||||
})
|
||||
|
||||
if err := r.Status().Update(ctx, restoreJob); err != nil {
|
||||
logger.Error(err, "failed to update RestoreJob status to Failed")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
logger.Debug("RestoreJob failed", "message", message)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
@@ -47,11 +47,14 @@ type S3Credentials struct {
|
||||
}
|
||||
|
||||
const (
|
||||
defaultRequeueAfter = 5 * time.Second
|
||||
defaultActiveJobPollingInterval = defaultRequeueAfter
|
||||
defaultRequeueAfter = 5 * time.Second
|
||||
defaultActiveJobPollingInterval = defaultRequeueAfter
|
||||
defaultRestoreRequeueAfter = 5 * time.Second
|
||||
defaultActiveRestorePollingInterval = defaultRestoreRequeueAfter
|
||||
// Velero requires API objects and secrets to be in the cozy-velero namespace
|
||||
veleroNamespace = "cozy-velero"
|
||||
virtualMachinePrefix = "virtual-machine-"
|
||||
veleroNamespace = "cozy-velero"
|
||||
veleroBackupNameMetadataKey = "velero.io/backup-name"
|
||||
veleroBackupNamespaceMetadataKey = "velero.io/backup-namespace"
|
||||
)
|
||||
|
||||
func boolPtr(b bool) *bool {
|
||||
@@ -100,7 +103,6 @@ func (r *BackupJobReconciler) reconcileVelero(ctx context.Context, j *backupsv1a
|
||||
|
||||
// Step 3: Execute backup logic
|
||||
// Check if we already created a Velero Backup
|
||||
// Use human-readable timestamp: YYYY-MM-DD-HH-MM-SS
|
||||
if j.Status.StartedAt == nil {
|
||||
logger.Error(nil, "StartedAt is nil after status update, this should not happen")
|
||||
return ctrl.Result{RequeueAfter: defaultRequeueAfter}, nil
|
||||
@@ -206,30 +208,6 @@ func (r *BackupJobReconciler) reconcileVelero(ctx context.Context, j *backupsv1a
|
||||
return ctrl.Result{RequeueAfter: 5 * time.Second}, nil
|
||||
}
|
||||
|
||||
func (r *BackupJobReconciler) markBackupJobFailed(ctx context.Context, backupJob *backupsv1alpha1.BackupJob, message string) (ctrl.Result, error) {
|
||||
logger := getLogger(ctx)
|
||||
now := metav1.Now()
|
||||
backupJob.Status.CompletedAt = &now
|
||||
backupJob.Status.Phase = backupsv1alpha1.BackupJobPhaseFailed
|
||||
backupJob.Status.Message = message
|
||||
|
||||
// Add condition
|
||||
backupJob.Status.Conditions = append(backupJob.Status.Conditions, metav1.Condition{
|
||||
Type: "Ready",
|
||||
Status: metav1.ConditionFalse,
|
||||
Reason: "BackupFailed",
|
||||
Message: message,
|
||||
LastTransitionTime: now,
|
||||
})
|
||||
|
||||
if err := r.Status().Update(ctx, backupJob); err != nil {
|
||||
logger.Error(err, "failed to update BackupJob status to Failed")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
logger.Debug("BackupJob failed", "message", message)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *BackupJobReconciler) createVeleroBackup(ctx context.Context, backupJob *backupsv1alpha1.BackupJob, strategy *strategyv1alpha1.Velero, resolved *ResolvedBackupConfig) error {
|
||||
logger := getLogger(ctx)
|
||||
logger.Debug("createVeleroBackup called", "strategy", strategy.Name)
|
||||
@@ -297,8 +275,8 @@ func (r *BackupJobReconciler) createBackupResource(ctx context.Context, backupJo
|
||||
|
||||
// Extract driver metadata (e.g., Velero backup name)
|
||||
driverMetadata := map[string]string{
|
||||
"velero.io/backup-name": veleroBackup.Name,
|
||||
"velero.io/backup-namespace": veleroBackup.Namespace,
|
||||
veleroBackupNameMetadataKey: veleroBackup.Name,
|
||||
veleroBackupNamespaceMetadataKey: veleroBackup.Namespace,
|
||||
}
|
||||
|
||||
// Create a basic artifact referencing the Velero backup
|
||||
@@ -344,3 +322,178 @@ func (r *BackupJobReconciler) createBackupResource(ctx context.Context, backupJo
|
||||
logger.Debug("created Backup resource", "name", backup.Name)
|
||||
return backup, nil
|
||||
}
|
||||
|
||||
// reconcileVeleroRestore handles restore operations for Velero strategy.
|
||||
func (r *RestoreJobReconciler) reconcileVeleroRestore(ctx context.Context, restoreJob *backupsv1alpha1.RestoreJob, backup *backupsv1alpha1.Backup) (ctrl.Result, error) {
|
||||
logger := getLogger(ctx)
|
||||
logger.Debug("reconciling Velero strategy restore", "restorejob", restoreJob.Name, "backup", backup.Name)
|
||||
|
||||
// Step 1: On first reconcile, set startedAt and phase = Running
|
||||
if restoreJob.Status.StartedAt == nil {
|
||||
logger.Debug("setting RestoreJob StartedAt and phase to Running")
|
||||
now := metav1.Now()
|
||||
restoreJob.Status.StartedAt = &now
|
||||
restoreJob.Status.Phase = backupsv1alpha1.RestoreJobPhaseRunning
|
||||
if err := r.Status().Update(ctx, restoreJob); err != nil {
|
||||
logger.Error(err, "failed to update RestoreJob status")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
return ctrl.Result{RequeueAfter: defaultRestoreRequeueAfter}, nil
|
||||
}
|
||||
|
||||
// Step 2: Resolve inputs - Read Strategy, Storage, target Application
|
||||
logger.Debug("fetching Velero strategy", "strategyName", backup.Spec.StrategyRef.Name)
|
||||
veleroStrategy := &strategyv1alpha1.Velero{}
|
||||
if err := r.Get(ctx, client.ObjectKey{Name: backup.Spec.StrategyRef.Name}, veleroStrategy); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
logger.Error(err, "Velero strategy not found", "strategyName", backup.Spec.StrategyRef.Name)
|
||||
return r.markRestoreJobFailed(ctx, restoreJob, fmt.Sprintf("Velero strategy not found: %s", backup.Spec.StrategyRef.Name))
|
||||
}
|
||||
logger.Error(err, "failed to get Velero strategy")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
logger.Debug("fetched Velero strategy", "strategyName", veleroStrategy.Name)
|
||||
|
||||
// Get Velero backup name from Backup's driverMetadata
|
||||
veleroBackupName, ok := backup.Spec.DriverMetadata[veleroBackupNameMetadataKey]
|
||||
if !ok {
|
||||
return r.markRestoreJobFailed(ctx, restoreJob, fmt.Sprintf("Backup missing Velero backup name in driverMetadata (key: %s)", veleroBackupNameMetadataKey))
|
||||
}
|
||||
|
||||
// Step 3: Execute restore logic
|
||||
// Check if we already created a Velero Restore
|
||||
logger.Debug("checking for existing Velero Restore", "namespace", veleroNamespace)
|
||||
veleroRestoreList := &velerov1.RestoreList{}
|
||||
opts := []client.ListOption{
|
||||
client.InNamespace(veleroNamespace),
|
||||
client.MatchingLabels{
|
||||
backupsv1alpha1.OwningJobNameLabel: restoreJob.Name,
|
||||
backupsv1alpha1.OwningJobNamespaceLabel: restoreJob.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
if err := r.List(ctx, veleroRestoreList, opts...); err != nil {
|
||||
logger.Error(err, "failed to get Velero Restore")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if len(veleroRestoreList.Items) == 0 {
|
||||
// Create Velero Restore
|
||||
logger.Debug("Velero Restore not found, creating new one")
|
||||
if err := r.createVeleroRestore(ctx, restoreJob, backup, veleroStrategy, veleroBackupName); err != nil {
|
||||
logger.Error(err, "failed to create Velero Restore")
|
||||
return r.markRestoreJobFailed(ctx, restoreJob, fmt.Sprintf("failed to create Velero Restore: %v", err))
|
||||
}
|
||||
logger.Debug("created Velero Restore, requeuing")
|
||||
// Requeue to check status
|
||||
return ctrl.Result{RequeueAfter: defaultRestoreRequeueAfter}, nil
|
||||
}
|
||||
|
||||
if len(veleroRestoreList.Items) > 1 {
|
||||
logger.Error(fmt.Errorf("too many Velero restores for RestoreJob"), "found more than one Velero Restore referencing a single RestoreJob as owner")
|
||||
return r.markRestoreJobFailed(ctx, restoreJob, "found multiple Velero Restores for this RestoreJob")
|
||||
}
|
||||
|
||||
veleroRestore := veleroRestoreList.Items[0].DeepCopy()
|
||||
logger.Debug("found existing Velero Restore", "phase", veleroRestore.Status.Phase)
|
||||
|
||||
// Check Velero Restore status
|
||||
phase := string(veleroRestore.Status.Phase)
|
||||
if phase == "" {
|
||||
// Still in progress, requeue
|
||||
return ctrl.Result{RequeueAfter: defaultActiveRestorePollingInterval}, nil
|
||||
}
|
||||
|
||||
// Step 4: On success
|
||||
if phase == "Completed" {
|
||||
now := metav1.Now()
|
||||
restoreJob.Status.CompletedAt = &now
|
||||
restoreJob.Status.Phase = backupsv1alpha1.RestoreJobPhaseSucceeded
|
||||
if err := r.Status().Update(ctx, restoreJob); err != nil {
|
||||
logger.Error(err, "failed to update RestoreJob status")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
logger.Debug("RestoreJob succeeded")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// Step 5: On failure
|
||||
if phase == "Failed" || phase == "PartiallyFailed" {
|
||||
message := fmt.Sprintf("Velero Restore failed with phase: %s", phase)
|
||||
if veleroRestore.Status.FailureReason != "" {
|
||||
message = fmt.Sprintf("%s: %s", message, veleroRestore.Status.FailureReason)
|
||||
}
|
||||
return r.markRestoreJobFailed(ctx, restoreJob, message)
|
||||
}
|
||||
|
||||
// Still in progress (InProgress, New, etc.)
|
||||
return ctrl.Result{RequeueAfter: defaultRestoreRequeueAfter}, nil
|
||||
}
|
||||
|
||||
// createVeleroRestore creates a Velero Restore resource.
|
||||
func (r *RestoreJobReconciler) createVeleroRestore(ctx context.Context, restoreJob *backupsv1alpha1.RestoreJob, backup *backupsv1alpha1.Backup, strategy *strategyv1alpha1.Velero, veleroBackupName string) error {
|
||||
logger := getLogger(ctx)
|
||||
logger.Debug("createVeleroRestore called", "strategy", strategy.Name, "veleroBackupName", veleroBackupName)
|
||||
|
||||
// Determine target application reference
|
||||
targetAppRef := r.getTargetApplicationRef(restoreJob, backup)
|
||||
|
||||
// Get the target application object for templating
|
||||
mapping, err := r.RESTMapping(schema.GroupKind{Group: *targetAppRef.APIGroup, Kind: targetAppRef.Kind})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get REST mapping for target application: %w", err)
|
||||
}
|
||||
ns := restoreJob.Namespace
|
||||
if mapping.Scope.Name() != meta.RESTScopeNameNamespace {
|
||||
ns = ""
|
||||
}
|
||||
app, err := r.Resource(mapping.Resource).Namespace(ns).Get(ctx, targetAppRef.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get target application: %w", err)
|
||||
}
|
||||
|
||||
// Build template context
|
||||
templateContext := map[string]interface{}{
|
||||
"Application": app.Object,
|
||||
// TODO: Parameters are not currently stored on Backup, so they're unavailable during restore.
|
||||
// This is a design limitation that should be addressed by persisting Parameters on the Backup object.
|
||||
"Parameters": map[string]string{},
|
||||
}
|
||||
|
||||
// Template the restore spec from the strategy, or use defaults if not specified
|
||||
var veleroRestoreSpec velerov1.RestoreSpec
|
||||
if strategy.Spec.Template.RestoreSpec != nil {
|
||||
templatedSpec, err := template.Template(strategy.Spec.Template.RestoreSpec, templateContext)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to template Velero Restore spec: %w", err)
|
||||
}
|
||||
veleroRestoreSpec = *templatedSpec
|
||||
}
|
||||
|
||||
// Set the backupName in the spec (required by Velero)
|
||||
veleroRestoreSpec.BackupName = veleroBackupName
|
||||
|
||||
generateName := fmt.Sprintf("%s.%s-", restoreJob.Namespace, restoreJob.Name)
|
||||
veleroRestore := &velerov1.Restore{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: generateName,
|
||||
Namespace: veleroNamespace,
|
||||
Labels: map[string]string{
|
||||
backupsv1alpha1.OwningJobNameLabel: restoreJob.Name,
|
||||
backupsv1alpha1.OwningJobNamespaceLabel: restoreJob.Namespace,
|
||||
},
|
||||
},
|
||||
Spec: veleroRestoreSpec,
|
||||
}
|
||||
if err := r.Create(ctx, veleroRestore); err != nil {
|
||||
logger.Error(err, "failed to create Velero Restore", "generateName", generateName)
|
||||
r.Recorder.Event(restoreJob, corev1.EventTypeWarning, "VeleroRestoreCreationFailed",
|
||||
fmt.Sprintf("Failed to create Velero Restore %s/%s: %v", veleroNamespace, generateName, err))
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Debug("created Velero Restore", "name", veleroRestore.Name, "namespace", veleroRestore.Namespace)
|
||||
r.Recorder.Event(restoreJob, corev1.EventTypeNormal, "VeleroRestoreCreated",
|
||||
fmt.Sprintf("Created Velero Restore %s/%s", veleroNamespace, veleroRestore.Name))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -32,8 +32,6 @@ type ApplicationDefinitionReconciler struct {
|
||||
mu sync.Mutex
|
||||
lastEvent time.Time
|
||||
lastHandled time.Time
|
||||
|
||||
CozystackAPIKind string
|
||||
}
|
||||
|
||||
func (r *ApplicationDefinitionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
@@ -67,7 +65,7 @@ func (r *ApplicationDefinitionReconciler) SetupWithManager(mgr ctrl.Manager) err
|
||||
}
|
||||
|
||||
type appDefHashView struct {
|
||||
Name string `json:"name"`
|
||||
Name string `json:"name"`
|
||||
Spec cozyv1alpha1.ApplicationDefinitionSpec `json:"spec"`
|
||||
}
|
||||
|
||||
@@ -155,23 +153,13 @@ func (r *ApplicationDefinitionReconciler) getWorkload(
|
||||
ctx context.Context,
|
||||
key types.NamespacedName,
|
||||
) (tpl *corev1.PodTemplateSpec, obj client.Object, patch client.Patch, err error) {
|
||||
if r.CozystackAPIKind == "Deployment" {
|
||||
dep := &appsv1.Deployment{}
|
||||
if err := r.Get(ctx, key, dep); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
obj = dep
|
||||
tpl = &dep.Spec.Template
|
||||
patch = client.MergeFrom(dep.DeepCopy())
|
||||
} else {
|
||||
ds := &appsv1.DaemonSet{}
|
||||
if err := r.Get(ctx, key, ds); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
obj = ds
|
||||
tpl = &ds.Spec.Template
|
||||
patch = client.MergeFrom(ds.DeepCopy())
|
||||
dep := &appsv1.Deployment{}
|
||||
if err := r.Get(ctx, key, dep); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
obj = dep
|
||||
tpl = &dep.Spec.Template
|
||||
patch = client.MergeFrom(dep.DeepCopy())
|
||||
if tpl.Annotations == nil {
|
||||
tpl.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
@@ -33,12 +33,12 @@ func (m *Manager) ensureBreadcrumb(ctx context.Context, crd *cozyv1alpha1.Applic
|
||||
|
||||
key := plural // e.g., "virtualmachines"
|
||||
label := labelPlural
|
||||
link := fmt.Sprintf("/openapi-ui/{clusterName}/{namespace}/api-table/%s/%s/%s", strings.ToLower(group), strings.ToLower(version), plural)
|
||||
link := fmt.Sprintf("/openapi-ui/{cluster}/{namespace}/api-table/%s/%s/%s", strings.ToLower(group), strings.ToLower(version), plural)
|
||||
// If this is a module, change the first breadcrumb item to "Tenant Modules"
|
||||
if crd.Spec.Dashboard != nil && crd.Spec.Dashboard.Module {
|
||||
key = "tenantmodules"
|
||||
label = "Tenant Modules"
|
||||
link = "/openapi-ui/{clusterName}/{namespace}/api-table/core.cozystack.io/v1alpha1/tenantmodules"
|
||||
link = "/openapi-ui/{cluster}/{namespace}/api-table/core.cozystack.io/v1alpha1/tenantmodules"
|
||||
}
|
||||
|
||||
items := []any{
|
||||
|
||||
@@ -84,6 +84,53 @@ func (m *Manager) ensureCustomFormsOverride(ctx context.Context, crd *cozyv1alph
|
||||
return err
|
||||
}
|
||||
|
||||
// ensureCFOMapping updates the CFOMapping resource to include a mapping for the given CRD
|
||||
func (m *Manager) ensureCFOMapping(ctx context.Context, crd *cozyv1alpha1.ApplicationDefinition) error {
|
||||
g, v, kind := pickGVK(crd)
|
||||
plural := pickPlural(kind, crd)
|
||||
|
||||
resourcePath := fmt.Sprintf("/%s/%s/%s", g, v, plural)
|
||||
customizationID := fmt.Sprintf("default-%s", resourcePath)
|
||||
|
||||
obj := &dashv1alpha1.CFOMapping{}
|
||||
obj.SetName("cfomapping")
|
||||
|
||||
_, err := controllerutil.CreateOrUpdate(ctx, m.Client, obj, func() error {
|
||||
// Parse existing mappings
|
||||
mappings := make(map[string]string)
|
||||
if obj.Spec.JSON.Raw != nil {
|
||||
var spec map[string]any
|
||||
if err := json.Unmarshal(obj.Spec.JSON.Raw, &spec); err == nil {
|
||||
if m, ok := spec["mappings"].(map[string]any); ok {
|
||||
for k, val := range m {
|
||||
if s, ok := val.(string); ok {
|
||||
mappings[k] = s
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add/update the mapping for this CRD
|
||||
mappings[resourcePath] = customizationID
|
||||
|
||||
specData := map[string]any{
|
||||
"mappings": mappings,
|
||||
}
|
||||
b, err := json.Marshal(specData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newSpec := dashv1alpha1.ArbitrarySpec{JSON: apiextv1.JSON{Raw: b}}
|
||||
if !compareArbitrarySpecs(obj.Spec, newSpec) {
|
||||
obj.Spec = newSpec
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// buildMultilineStringSchema parses OpenAPI schema and creates schema with multilineString
|
||||
// for all string fields inside spec that don't have enum
|
||||
func buildMultilineStringSchema(openAPISchema string) (map[string]any, error) {
|
||||
|
||||
@@ -47,7 +47,7 @@ func (m *Manager) ensureFactory(ctx context.Context, crd *cozyv1alpha1.Applicati
|
||||
if prefix, ok := vncTabPrefix(kind); ok {
|
||||
tabs = append(tabs, vncTab(prefix))
|
||||
}
|
||||
tabs = append(tabs, yamlTab(plural))
|
||||
tabs = append(tabs, yamlTab(g, v, plural))
|
||||
|
||||
// Use unified factory creation
|
||||
config := UnifiedResourceConfig{
|
||||
@@ -160,11 +160,11 @@ func detailsTab(kind, endpoint, schemaJSON string, keysOrder [][]string) map[str
|
||||
map[string]any{
|
||||
"type": "EnrichedTable",
|
||||
"data": map[string]any{
|
||||
"id": "vpc-subnets-table",
|
||||
"baseprefix": "/openapi-ui",
|
||||
"clusterNamePartOfUrl": "{2}",
|
||||
"customizationId": "virtualprivatecloud-subnets",
|
||||
"fetchUrl": "/api/clusters/{2}/k8s/api/v1/namespaces/{3}/configmaps",
|
||||
"id": "vpc-subnets-table",
|
||||
"baseprefix": "/openapi-ui",
|
||||
"cluster": "{2}",
|
||||
"customizationId": "virtualprivatecloud-subnets",
|
||||
"fetchUrl": "/api/clusters/{2}/k8s/api/v1/namespaces/{3}/configmaps",
|
||||
"fieldSelector": map[string]any{
|
||||
"metadata.name": "virtualprivatecloud-{6}-subnets",
|
||||
},
|
||||
@@ -188,12 +188,12 @@ func detailsTab(kind, endpoint, schemaJSON string, keysOrder [][]string) map[str
|
||||
map[string]any{
|
||||
"type": "EnrichedTable",
|
||||
"data": map[string]any{
|
||||
"id": "resource-quotas-table",
|
||||
"baseprefix": "/openapi-ui",
|
||||
"clusterNamePartOfUrl": "{2}",
|
||||
"customizationId": "factory-resource-quotas",
|
||||
"fetchUrl": "/api/clusters/{2}/k8s/api/v1/namespaces/{reqsJsonPath[0]['.status.namespace']}/resourcequotas",
|
||||
"pathToItems": []any{`items`},
|
||||
"id": "resource-quotas-table",
|
||||
"baseprefix": "/openapi-ui",
|
||||
"cluster": "{2}",
|
||||
"customizationId": "factory-resource-quotas",
|
||||
"fetchUrl": "/api/clusters/{2}/k8s/api/v1/namespaces/{reqsJsonPath[0]['.status.namespace']}/resourcequotas",
|
||||
"pathToItems": []any{`items`},
|
||||
},
|
||||
},
|
||||
}),
|
||||
@@ -242,13 +242,13 @@ func detailsTab(kind, endpoint, schemaJSON string, keysOrder [][]string) map[str
|
||||
map[string]any{
|
||||
"type": "EnrichedTable",
|
||||
"data": map[string]any{
|
||||
"id": "conditions-table",
|
||||
"fetchUrl": endpoint,
|
||||
"clusterNamePartOfUrl": "{2}",
|
||||
"customizationId": "factory-status-conditions",
|
||||
"baseprefix": "/openapi-ui",
|
||||
"withoutControls": true,
|
||||
"pathToItems": []any{"status", "conditions"},
|
||||
"id": "conditions-table",
|
||||
"fetchUrl": endpoint,
|
||||
"cluster": "{2}",
|
||||
"customizationId": "factory-status-conditions",
|
||||
"baseprefix": "/openapi-ui",
|
||||
"withoutControls": true,
|
||||
"pathToItems": []any{"status", "conditions"},
|
||||
},
|
||||
},
|
||||
}),
|
||||
@@ -264,12 +264,12 @@ func workloadsTab(kind string) map[string]any {
|
||||
map[string]any{
|
||||
"type": "EnrichedTable",
|
||||
"data": map[string]any{
|
||||
"id": "workloads-table",
|
||||
"fetchUrl": "/api/clusters/{2}/k8s/apis/cozystack.io/v1alpha1/namespaces/{3}/workloadmonitors",
|
||||
"clusterNamePartOfUrl": "{2}",
|
||||
"baseprefix": "/openapi-ui",
|
||||
"customizationId": "factory-details-v1alpha1.cozystack.io.workloadmonitors",
|
||||
"pathToItems": []any{"items"},
|
||||
"id": "workloads-table",
|
||||
"fetchUrl": "/api/clusters/{2}/k8s/apis/cozystack.io/v1alpha1/namespaces/{3}/workloadmonitors",
|
||||
"cluster": "{2}",
|
||||
"baseprefix": "/openapi-ui",
|
||||
"customizationId": "factory-details-v1alpha1.cozystack.io.workloadmonitors",
|
||||
"pathToItems": []any{"items"},
|
||||
"labelSelector": map[string]any{
|
||||
"apps.cozystack.io/application.group": "apps.cozystack.io",
|
||||
"apps.cozystack.io/application.kind": kind,
|
||||
@@ -289,12 +289,12 @@ func servicesTab(kind string) map[string]any {
|
||||
map[string]any{
|
||||
"type": "EnrichedTable",
|
||||
"data": map[string]any{
|
||||
"id": "services-table",
|
||||
"fetchUrl": "/api/clusters/{2}/k8s/api/v1/namespaces/{3}/services",
|
||||
"clusterNamePartOfUrl": "{2}",
|
||||
"baseprefix": "/openapi-ui",
|
||||
"customizationId": "factory-details-v1.services",
|
||||
"pathToItems": []any{"items"},
|
||||
"id": "services-table",
|
||||
"fetchUrl": "/api/clusters/{2}/k8s/api/v1/namespaces/{3}/services",
|
||||
"cluster": "{2}",
|
||||
"baseprefix": "/openapi-ui",
|
||||
"customizationId": "factory-details-v1.services",
|
||||
"pathToItems": []any{"items"},
|
||||
"labelSelector": map[string]any{
|
||||
"apps.cozystack.io/application.group": "apps.cozystack.io",
|
||||
"apps.cozystack.io/application.kind": kind,
|
||||
@@ -315,12 +315,12 @@ func ingressesTab(kind string) map[string]any {
|
||||
map[string]any{
|
||||
"type": "EnrichedTable",
|
||||
"data": map[string]any{
|
||||
"id": "ingresses-table",
|
||||
"fetchUrl": "/api/clusters/{2}/k8s/apis/networking.k8s.io/v1/namespaces/{3}/ingresses",
|
||||
"clusterNamePartOfUrl": "{2}",
|
||||
"baseprefix": "/openapi-ui",
|
||||
"customizationId": "factory-details-networking.k8s.io.v1.ingresses",
|
||||
"pathToItems": []any{"items"},
|
||||
"id": "ingresses-table",
|
||||
"fetchUrl": "/api/clusters/{2}/k8s/apis/networking.k8s.io/v1/namespaces/{3}/ingresses",
|
||||
"cluster": "{2}",
|
||||
"baseprefix": "/openapi-ui",
|
||||
"customizationId": "factory-details-networking.k8s.io.v1.ingresses",
|
||||
"pathToItems": []any{"items"},
|
||||
"labelSelector": map[string]any{
|
||||
"apps.cozystack.io/application.group": "apps.cozystack.io",
|
||||
"apps.cozystack.io/application.kind": kind,
|
||||
@@ -341,12 +341,12 @@ func secretsTab(kind string) map[string]any {
|
||||
map[string]any{
|
||||
"type": "EnrichedTable",
|
||||
"data": map[string]any{
|
||||
"id": "secrets-table",
|
||||
"fetchUrl": "/api/clusters/{2}/k8s/apis/core.cozystack.io/v1alpha1/namespaces/{3}/tenantsecrets",
|
||||
"clusterNamePartOfUrl": "{2}",
|
||||
"baseprefix": "/openapi-ui",
|
||||
"customizationId": "factory-details-v1alpha1.core.cozystack.io.tenantsecrets",
|
||||
"pathToItems": []any{"items"},
|
||||
"id": "secrets-table",
|
||||
"fetchUrl": "/api/clusters/{2}/k8s/apis/core.cozystack.io/v1alpha1/namespaces/{3}/tenantsecrets",
|
||||
"cluster": "{2}",
|
||||
"baseprefix": "/openapi-ui",
|
||||
"customizationId": "factory-details-v1alpha1.core.cozystack.io.tenantsecrets",
|
||||
"pathToItems": []any{"items"},
|
||||
"labelSelector": map[string]any{
|
||||
"apps.cozystack.io/application.group": "apps.cozystack.io",
|
||||
"apps.cozystack.io/application.kind": kind,
|
||||
@@ -358,7 +358,7 @@ func secretsTab(kind string) map[string]any {
|
||||
}
|
||||
}
|
||||
|
||||
func yamlTab(plural string) map[string]any {
|
||||
func yamlTab(group, version, plural string) map[string]any {
|
||||
return map[string]any{
|
||||
"key": "yaml",
|
||||
"label": "YAML",
|
||||
@@ -369,8 +369,10 @@ func yamlTab(plural string) map[string]any {
|
||||
"id": "yaml-editor",
|
||||
"cluster": "{2}",
|
||||
"isNameSpaced": true,
|
||||
"type": "builtin",
|
||||
"typeName": plural,
|
||||
"type": "apis",
|
||||
"apiGroup": group,
|
||||
"apiVersion": version,
|
||||
"plural": plural,
|
||||
"prefillValuesRequestIndex": float64(0),
|
||||
"readOnly": true,
|
||||
"substractHeight": float64(400),
|
||||
|
||||
@@ -132,6 +132,10 @@ func (m *Manager) EnsureForAppDef(ctx context.Context, crd *cozyv1alpha1.Applica
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err := m.ensureCFOMapping(ctx, crd); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err := m.ensureSidebar(ctx, crd); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
@@ -139,6 +143,10 @@ func (m *Manager) EnsureForAppDef(ctx context.Context, crd *cozyv1alpha1.Applica
|
||||
if err := m.ensureFactory(ctx, crd); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if err := m.ensureNavigation(ctx, crd); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -74,7 +74,7 @@ func (m *Manager) ensureMarketplacePanel(ctx context.Context, crd *cozyv1alpha1.
|
||||
"type": "nonCrd",
|
||||
"apiGroup": "apps.cozystack.io",
|
||||
"apiVersion": "v1alpha1",
|
||||
"typeName": app.Plural, // e.g., "buckets"
|
||||
"plural": app.Plural, // e.g., "buckets"
|
||||
"disabled": false,
|
||||
"hidden": false,
|
||||
"tags": tags,
|
||||
|
||||
69
internal/controller/dashboard/navigation.go
Normal file
69
internal/controller/dashboard/navigation.go
Normal file
@@ -0,0 +1,69 @@
|
||||
package dashboard
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
dashv1alpha1 "github.com/cozystack/cozystack/api/dashboard/v1alpha1"
|
||||
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||
|
||||
apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
)
|
||||
|
||||
// ensureNavigation updates the Navigation resource to include a baseFactoriesMapping entry for the given CRD
|
||||
func (m *Manager) ensureNavigation(ctx context.Context, crd *cozyv1alpha1.ApplicationDefinition) error {
|
||||
g, v, kind := pickGVK(crd)
|
||||
plural := pickPlural(kind, crd)
|
||||
|
||||
lowerKind := strings.ToLower(kind)
|
||||
factoryKey := fmt.Sprintf("%s-details", lowerKind)
|
||||
|
||||
// All CRD resources are namespaced API resources
|
||||
mappingKey := fmt.Sprintf("base-factory-namespaced-api-%s-%s-%s", g, v, plural)
|
||||
|
||||
obj := &dashv1alpha1.Navigation{}
|
||||
obj.SetName("navigation")
|
||||
|
||||
_, err := controllerutil.CreateOrUpdate(ctx, m.Client, obj, func() error {
|
||||
// Parse existing spec
|
||||
spec := make(map[string]any)
|
||||
if obj.Spec.JSON.Raw != nil {
|
||||
if err := json.Unmarshal(obj.Spec.JSON.Raw, &spec); err != nil {
|
||||
spec = make(map[string]any)
|
||||
}
|
||||
}
|
||||
|
||||
// Get or create baseFactoriesMapping
|
||||
var mappings map[string]string
|
||||
if existing, ok := spec["baseFactoriesMapping"].(map[string]any); ok {
|
||||
mappings = make(map[string]string, len(existing))
|
||||
for k, val := range existing {
|
||||
if s, ok := val.(string); ok {
|
||||
mappings[k] = s
|
||||
}
|
||||
}
|
||||
} else {
|
||||
mappings = make(map[string]string)
|
||||
}
|
||||
|
||||
// Add/update the mapping for this CRD
|
||||
mappings[mappingKey] = factoryKey
|
||||
|
||||
spec["baseFactoriesMapping"] = mappings
|
||||
|
||||
b, err := json.Marshal(spec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newSpec := dashv1alpha1.ArbitrarySpec{JSON: apiextv1.JSON{Raw: b}}
|
||||
if !compareArbitrarySpecs(obj.Spec, newSpec) {
|
||||
obj.Spec = newSpec
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
@@ -22,8 +22,8 @@ import (
|
||||
//
|
||||
// Menu rules:
|
||||
// - The first section is "Marketplace" with two hardcoded entries:
|
||||
// - Marketplace (/openapi-ui/{clusterName}/{namespace}/factory/marketplace)
|
||||
// - Tenant Info (/openapi-ui/{clusterName}/{namespace}/factory/info-details/info)
|
||||
// - Marketplace (/openapi-ui/{cluster}/{namespace}/factory/marketplace)
|
||||
// - Tenant Info (/openapi-ui/{cluster}/{namespace}/factory/info-details/info)
|
||||
// - All other sections are built from CRDs where spec.dashboard != nil.
|
||||
// - Categories are ordered strictly as:
|
||||
// Marketplace, IaaS, PaaS, NaaS, <others A→Z>, Resources, Backups, Administration
|
||||
@@ -91,7 +91,7 @@ func (m *Manager) ensureSidebar(ctx context.Context, crd *cozyv1alpha1.Applicati
|
||||
// Weight (default 0)
|
||||
weight := def.Spec.Dashboard.Weight
|
||||
|
||||
link := fmt.Sprintf("/openapi-ui/{clusterName}/{namespace}/api-table/%s/%s/%s", g, v, plural)
|
||||
link := fmt.Sprintf("/openapi-ui/{cluster}/{namespace}/api-table/%s/%s/%s", g, v, plural)
|
||||
|
||||
categories[cat] = append(categories[cat], item{
|
||||
Key: plural,
|
||||
@@ -146,7 +146,7 @@ func (m *Manager) ensureSidebar(ctx context.Context, crd *cozyv1alpha1.Applicati
|
||||
map[string]any{
|
||||
"key": "marketplace",
|
||||
"label": "Marketplace",
|
||||
"link": "/openapi-ui/{clusterName}/{namespace}/factory/marketplace",
|
||||
"link": "/openapi-ui/{cluster}/{namespace}/factory/marketplace",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -205,12 +205,12 @@ func (m *Manager) ensureSidebar(ctx context.Context, crd *cozyv1alpha1.Applicati
|
||||
map[string]any{
|
||||
"key": "info",
|
||||
"label": "Info",
|
||||
"link": "/openapi-ui/{clusterName}/{namespace}/factory/info-details/info",
|
||||
"link": "/openapi-ui/{cluster}/{namespace}/factory/info-details/info",
|
||||
},
|
||||
map[string]any{
|
||||
"key": "modules",
|
||||
"label": "Modules",
|
||||
"link": "/openapi-ui/{clusterName}/{namespace}/api-table/core.cozystack.io/v1alpha1/tenantmodules",
|
||||
"link": "/openapi-ui/{cluster}/{namespace}/api-table/core.cozystack.io/v1alpha1/tenantmodules",
|
||||
},
|
||||
map[string]any{
|
||||
"key": "loadbalancer-services",
|
||||
@@ -220,7 +220,7 @@ func (m *Manager) ensureSidebar(ctx context.Context, crd *cozyv1alpha1.Applicati
|
||||
map[string]any{
|
||||
"key": "tenants",
|
||||
"label": "Tenants",
|
||||
"link": "/openapi-ui/{clusterName}/{namespace}/api-table/apps.cozystack.io/v1alpha1/tenants",
|
||||
"link": "/openapi-ui/{cluster}/{namespace}/api-table/apps.cozystack.io/v1alpha1/tenants",
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
@@ -1134,7 +1134,7 @@ func yamlEditor(id, cluster string, isNameSpaced bool, typeName string, prefillV
|
||||
"cluster": cluster,
|
||||
"isNameSpaced": isNameSpaced,
|
||||
"type": "builtin",
|
||||
"typeName": typeName,
|
||||
"plural": typeName,
|
||||
"prefillValuesRequestIndex": prefillValuesRequestIndex,
|
||||
"substractHeight": float64(400),
|
||||
},
|
||||
|
||||
@@ -49,6 +49,8 @@ func (m *Manager) ensureStaticResource(ctx context.Context, obj client.Object) e
|
||||
resource.(*dashv1alpha1.Navigation).Spec = o.Spec
|
||||
case *dashv1alpha1.TableUriMapping:
|
||||
resource.(*dashv1alpha1.TableUriMapping).Spec = o.Spec
|
||||
case *dashv1alpha1.CFOMapping:
|
||||
resource.(*dashv1alpha1.CFOMapping).Spec = o.Spec
|
||||
}
|
||||
// Ensure labels are always set
|
||||
m.addDashboardLabels(resource, nil, ResourceTypeStatic)
|
||||
|
||||
@@ -17,111 +17,111 @@ func CreateAllBreadcrumbs() []*dashboardv1alpha1.Breadcrumb {
|
||||
return []*dashboardv1alpha1.Breadcrumb{
|
||||
// Stock project factory configmap details
|
||||
createBreadcrumb("stock-project-factory-configmap-details", []map[string]any{
|
||||
createBreadcrumbItem("configmaps", "v1/configmaps", "/openapi-ui/{clusterName}/{namespace}/builtin-table/configmaps"),
|
||||
createBreadcrumbItem("configmaps", "v1/configmaps", "/openapi-ui/{cluster}/{namespace}/builtin-table/configmaps"),
|
||||
createBreadcrumbItem("configmap", "{6}"),
|
||||
}),
|
||||
|
||||
// Stock cluster factory namespace details
|
||||
createBreadcrumb("stock-cluster-factory-namespace-details", []map[string]any{
|
||||
createBreadcrumbItem("namespaces", "v1/namespaces", "/openapi-ui/{clusterName}/builtin-table/namespaces"),
|
||||
createBreadcrumbItem("namespaces", "v1/namespaces", "/openapi-ui/{cluster}/builtin-table/namespaces"),
|
||||
createBreadcrumbItem("namespace", "{5}"),
|
||||
}),
|
||||
|
||||
// Stock cluster factory node details
|
||||
createBreadcrumb("stock-cluster-factory-node-details", []map[string]any{
|
||||
createBreadcrumbItem("node", "v1/nodes", "/openapi-ui/{clusterName}/builtin-table/nodes"),
|
||||
createBreadcrumbItem("node", "v1/nodes", "/openapi-ui/{cluster}/builtin-table/nodes"),
|
||||
createBreadcrumbItem("node", "{5}"),
|
||||
}),
|
||||
|
||||
// Stock project factory pod details
|
||||
createBreadcrumb("stock-project-factory-pod-details", []map[string]any{
|
||||
createBreadcrumbItem("pods", "v1/pods", "/openapi-ui/{clusterName}/{namespace}/builtin-table/pods"),
|
||||
createBreadcrumbItem("pods", "v1/pods", "/openapi-ui/{cluster}/{namespace}/builtin-table/pods"),
|
||||
createBreadcrumbItem("pod", "{6}"),
|
||||
}),
|
||||
|
||||
// Stock project factory secret details
|
||||
createBreadcrumb("stock-project-factory-kube-secret-details", []map[string]any{
|
||||
createBreadcrumbItem("secrets", "v1/secrets", "/openapi-ui/{clusterName}/{namespace}/builtin-table/secrets"),
|
||||
createBreadcrumbItem("secrets", "v1/secrets", "/openapi-ui/{cluster}/{namespace}/builtin-table/secrets"),
|
||||
createBreadcrumbItem("secret", "{6}"),
|
||||
}),
|
||||
|
||||
// Stock project factory service details
|
||||
createBreadcrumb("stock-project-factory-kube-service-details", []map[string]any{
|
||||
createBreadcrumbItem("services", "v1/services", "/openapi-ui/{clusterName}/{namespace}/builtin-table/services"),
|
||||
createBreadcrumbItem("services", "v1/services", "/openapi-ui/{cluster}/{namespace}/builtin-table/services"),
|
||||
createBreadcrumbItem("service", "{6}"),
|
||||
}),
|
||||
|
||||
// Stock project factory ingress details
|
||||
createBreadcrumb("stock-project-factory-kube-ingress-details", []map[string]any{
|
||||
createBreadcrumbItem("ingresses", "networking.k8s.io/v1/ingresses", "/openapi-ui/{clusterName}/{namespace}/builtin-table/ingresses"),
|
||||
createBreadcrumbItem("ingresses", "networking.k8s.io/v1/ingresses", "/openapi-ui/{cluster}/{namespace}/builtin-table/ingresses"),
|
||||
createBreadcrumbItem("ingress", "{6}"),
|
||||
}),
|
||||
|
||||
// Stock cluster api table
|
||||
createBreadcrumb("stock-cluster-api-table", []map[string]any{
|
||||
createBreadcrumbItem("api", "{apiGroup}/{apiVersion}/{typeName}"),
|
||||
createBreadcrumbItem("api", "{apiGroup}/{apiVersion}/{plural}"),
|
||||
}),
|
||||
|
||||
// Stock cluster api form
|
||||
createBreadcrumb("stock-cluster-api-form", []map[string]any{
|
||||
createBreadcrumbItem("create-api-res-namespaced-table", "{apiGroup}/{apiVersion}/{typeName}", "/openapi-ui/{clusterName}/api-table/{apiGroup}/{apiVersion}/{typeName}"),
|
||||
createBreadcrumbItem("create-api-res-namespaced-table", "{apiGroup}/{apiVersion}/{plural}", "/openapi-ui/{cluster}/api-table/{apiGroup}/{apiVersion}/{plural}"),
|
||||
createBreadcrumbItem("create-api-res-namespaced-typename", "Create"),
|
||||
}),
|
||||
|
||||
// Stock cluster api form edit
|
||||
createBreadcrumb("stock-cluster-api-form-edit", []map[string]any{
|
||||
createBreadcrumbItem("create-api-res-namespaced-table", "{apiGroup}/{apiVersion}/{typeName}", "/openapi-ui/{clusterName}/api-table/{apiGroup}/{apiVersion}/{typeName}"),
|
||||
createBreadcrumbItem("create-api-res-namespaced-table", "{apiGroup}/{apiVersion}/{plural}", "/openapi-ui/{cluster}/api-table/{apiGroup}/{apiVersion}/{plural}"),
|
||||
createBreadcrumbItem("create-api-res-namespaced-typename", "Update"),
|
||||
}),
|
||||
|
||||
// Stock cluster builtin table
|
||||
createBreadcrumb("stock-cluster-builtin-table", []map[string]any{
|
||||
createBreadcrumbItem("api", "v1/{typeName}"),
|
||||
createBreadcrumbItem("api", "v1/{plural}"),
|
||||
}),
|
||||
|
||||
// Stock cluster builtin form
|
||||
createBreadcrumb("stock-cluster-builtin-form", []map[string]any{
|
||||
createBreadcrumbItem("create-api-res-namespaced-table", "v1/{typeName}", "/openapi-ui/{clusterName}/builtin-table/{typeName}"),
|
||||
createBreadcrumbItem("create-api-res-namespaced-table", "v1/{plural}", "/openapi-ui/{cluster}/builtin-table/{plural}"),
|
||||
createBreadcrumbItem("create-api-res-namespaced-typename", "Create"),
|
||||
}),
|
||||
|
||||
// Stock cluster builtin form edit
|
||||
createBreadcrumb("stock-cluster-builtin-form-edit", []map[string]any{
|
||||
createBreadcrumbItem("create-api-res-namespaced-table", "v1/{typeName}", "/openapi-ui/{clusterName}/builtin-table/{typeName}"),
|
||||
createBreadcrumbItem("create-api-res-namespaced-table", "v1/{plural}", "/openapi-ui/{cluster}/builtin-table/{plural}"),
|
||||
createBreadcrumbItem("create-api-res-namespaced-typename", "Update"),
|
||||
}),
|
||||
|
||||
// Stock project api table
|
||||
createBreadcrumb("stock-project-api-table", []map[string]any{
|
||||
createBreadcrumbItem("api", "{apiGroup}/{apiVersion}/{typeName}"),
|
||||
createBreadcrumbItem("api", "{apiGroup}/{apiVersion}/{plural}"),
|
||||
}),
|
||||
|
||||
// Stock project api form
|
||||
createBreadcrumb("stock-project-api-form", []map[string]any{
|
||||
createBreadcrumbItem("create-api-res-namespaced-table", "{apiGroup}/{apiVersion}/{typeName}", "/openapi-ui/{clusterName}/{namespace}/api-table/{apiGroup}/{apiVersion}/{typeName}"),
|
||||
createBreadcrumbItem("create-api-res-namespaced-table", "{apiGroup}/{apiVersion}/{plural}", "/openapi-ui/{cluster}/{namespace}/api-table/{apiGroup}/{apiVersion}/{plural}"),
|
||||
createBreadcrumbItem("create-api-res-namespaced-typename", "Create"),
|
||||
}),
|
||||
|
||||
// Stock project api form edit
|
||||
createBreadcrumb("stock-project-api-form-edit", []map[string]any{
|
||||
createBreadcrumbItem("create-api-res-namespaced-table", "{apiGroup}/{apiVersion}/{typeName}", "/openapi-ui/{clusterName}/{namespace}/api-table/{apiGroup}/{apiVersion}/{typeName}"),
|
||||
createBreadcrumbItem("create-api-res-namespaced-table", "{apiGroup}/{apiVersion}/{plural}", "/openapi-ui/{cluster}/{namespace}/api-table/{apiGroup}/{apiVersion}/{plural}"),
|
||||
createBreadcrumbItem("create-api-res-namespaced-typename", "Update"),
|
||||
}),
|
||||
|
||||
// Stock project builtin table
|
||||
createBreadcrumb("stock-project-builtin-table", []map[string]any{
|
||||
createBreadcrumbItem("api", "v1/{typeName}"),
|
||||
createBreadcrumbItem("api", "v1/{plural}"),
|
||||
}),
|
||||
|
||||
// Stock project builtin form
|
||||
createBreadcrumb("stock-project-builtin-form", []map[string]any{
|
||||
createBreadcrumbItem("create-api-res-namespaced-table", "v1/{typeName}", "/openapi-ui/{clusterName}/{namespace}/builtin-table/{typeName}"),
|
||||
createBreadcrumbItem("create-api-res-namespaced-table", "v1/{plural}", "/openapi-ui/{cluster}/{namespace}/builtin-table/{plural}"),
|
||||
createBreadcrumbItem("create-api-res-namespaced-typename", "Create"),
|
||||
}),
|
||||
|
||||
// Stock project builtin form edit
|
||||
createBreadcrumb("stock-project-builtin-form-edit", []map[string]any{
|
||||
createBreadcrumbItem("create-api-res-namespaced-table", "v1/{typeName}", "/openapi-ui/{clusterName}/{namespace}/builtin-table/{typeName}"),
|
||||
createBreadcrumbItem("create-api-res-namespaced-table", "v1/{plural}", "/openapi-ui/{cluster}/{namespace}/builtin-table/{plural}"),
|
||||
createBreadcrumbItem("create-api-res-namespaced-typename", "Update"),
|
||||
}),
|
||||
}
|
||||
@@ -495,6 +495,27 @@ func CreateAllCustomFormsOverrides() []*dashboardv1alpha1.CustomFormsOverride {
|
||||
createFormItem("spec.ports", "Ports", "array"),
|
||||
},
|
||||
}),
|
||||
|
||||
// Plans form override - backups.cozystack.io/v1alpha1
|
||||
createCustomFormsOverride("default-/backups.cozystack.io/v1alpha1/plans", map[string]any{
|
||||
"formItems": []any{
|
||||
createFormItem("metadata.name", "Name", "text"),
|
||||
createFormItem("metadata.namespace", "Namespace", "text"),
|
||||
createFormItem("spec.applicationRef.kind", "Application Kind", "text"),
|
||||
createFormItem("spec.applicationRef.name", "Application Name", "text"),
|
||||
createFormItemWithAPI("spec.backupClassName", "Backup Class", "select", map[string]any{
|
||||
"api": map[string]any{
|
||||
"fetchUrl": "/api/clusters/{clusterName}/k8s/apis/backups.cozystack.io/v1alpha1/backupclasses",
|
||||
"pathToItems": []any{"items"},
|
||||
"pathToValue": []any{"metadata", "name"},
|
||||
"pathToLabel": []any{"metadata", "name"},
|
||||
"clusterNameVar": "clusterName",
|
||||
},
|
||||
}),
|
||||
createFormItem("spec.schedule.type", "Schedule Type", "text"),
|
||||
createFormItem("spec.schedule.cron", "Schedule Cron", "text"),
|
||||
},
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -514,14 +535,14 @@ func CreateAllFactories() []*dashboardv1alpha1.Factory {
|
||||
}, []any{
|
||||
map[string]any{
|
||||
"data": map[string]any{
|
||||
"baseApiVersion": "v1alpha1",
|
||||
"baseprefix": "openapi-ui",
|
||||
"clusterNamePartOfUrl": "{2}",
|
||||
"id": 311,
|
||||
"mpResourceKind": "MarketplacePanel",
|
||||
"mpResourceName": "marketplacepanels",
|
||||
"namespacePartOfUrl": "{3}",
|
||||
"baseApiGroup": "dashboard.cozystack.io",
|
||||
"baseApiVersion": "v1alpha1",
|
||||
"baseprefix": "openapi-ui",
|
||||
"cluster": "{2}",
|
||||
"id": 311,
|
||||
"marketplaceKind": "MarketplacePanel",
|
||||
"marketplacePlural": "marketplacepanels",
|
||||
"namespace": "{3}",
|
||||
"baseApiGroup": "dashboard.cozystack.io",
|
||||
},
|
||||
"type": "MarketplaceCard",
|
||||
},
|
||||
@@ -834,7 +855,7 @@ func CreateAllFactories() []*dashboardv1alpha1.Factory {
|
||||
"prefillValuesRequestIndex": 0,
|
||||
"substractHeight": float64(400),
|
||||
"type": "builtin",
|
||||
"typeName": "secrets",
|
||||
"plural": "secrets",
|
||||
"readOnly": true,
|
||||
},
|
||||
},
|
||||
@@ -1064,13 +1085,13 @@ func CreateAllFactories() []*dashboardv1alpha1.Factory {
|
||||
map[string]any{
|
||||
"type": "EnrichedTable",
|
||||
"data": map[string]any{
|
||||
"id": "service-port-mapping-table",
|
||||
"baseprefix": "/openapi-ui",
|
||||
"clusterNamePartOfUrl": "{2}",
|
||||
"customizationId": "factory-kube-service-details-port-mapping",
|
||||
"fetchUrl": "/api/clusters/{2}/k8s/api/v1/namespaces/{3}/services/{6}",
|
||||
"pathToItems": ".spec.ports",
|
||||
"withoutControls": true,
|
||||
"id": "service-port-mapping-table",
|
||||
"baseprefix": "/openapi-ui",
|
||||
"cluster": "{2}",
|
||||
"customizationId": "factory-kube-service-details-port-mapping",
|
||||
"fetchUrl": "/api/clusters/{2}/k8s/api/v1/namespaces/{3}/services/{6}",
|
||||
"pathToItems": ".spec.ports",
|
||||
"withoutControls": true,
|
||||
},
|
||||
},
|
||||
}),
|
||||
@@ -1090,11 +1111,11 @@ func CreateAllFactories() []*dashboardv1alpha1.Factory {
|
||||
map[string]any{
|
||||
"type": "EnrichedTable",
|
||||
"data": map[string]any{
|
||||
"id": "service-pod-serving-table",
|
||||
"baseprefix": "/openapi-ui",
|
||||
"clusterNamePartOfUrl": "{2}",
|
||||
"customizationId": "factory-kube-service-details-endpointslice",
|
||||
"fetchUrl": "/api/clusters/{2}/k8s/apis/discovery.k8s.io/v1/namespaces/{3}/endpointslices",
|
||||
"id": "service-pod-serving-table",
|
||||
"baseprefix": "/openapi-ui",
|
||||
"cluster": "{2}",
|
||||
"customizationId": "factory-kube-service-details-endpointslice",
|
||||
"fetchUrl": "/api/clusters/{2}/k8s/apis/discovery.k8s.io/v1/namespaces/{3}/endpointslices",
|
||||
"labelSelector": map[string]any{
|
||||
"kubernetes.io/service-name": "{reqsJsonPath[0]['.metadata.name']['-']}",
|
||||
},
|
||||
@@ -1124,7 +1145,7 @@ func CreateAllFactories() []*dashboardv1alpha1.Factory {
|
||||
"prefillValuesRequestIndex": 0,
|
||||
"substractHeight": float64(400),
|
||||
"type": "builtin",
|
||||
"typeName": "services",
|
||||
"plural": "services",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1147,11 +1168,11 @@ func CreateAllFactories() []*dashboardv1alpha1.Factory {
|
||||
map[string]any{
|
||||
"type": "EnrichedTable",
|
||||
"data": map[string]any{
|
||||
"id": "pods-table",
|
||||
"baseprefix": "/openapi-ui",
|
||||
"clusterNamePartOfUrl": "{2}",
|
||||
"customizationId": "factory-node-details-/v1/pods",
|
||||
"fetchUrl": "/api/clusters/{2}/k8s/api/v1/namespaces/{3}/pods",
|
||||
"id": "pods-table",
|
||||
"baseprefix": "/openapi-ui",
|
||||
"cluster": "{2}",
|
||||
"customizationId": "factory-node-details-/v1/pods",
|
||||
"fetchUrl": "/api/clusters/{2}/k8s/api/v1/namespaces/{3}/pods",
|
||||
"labelSelectorFull": map[string]any{
|
||||
"pathToLabels": ".spec.selector",
|
||||
"reqIndex": 0,
|
||||
@@ -1279,13 +1300,13 @@ func CreateAllFactories() []*dashboardv1alpha1.Factory {
|
||||
map[string]any{
|
||||
"type": "EnrichedTable",
|
||||
"data": map[string]any{
|
||||
"id": "rules-table",
|
||||
"fetchUrl": "/api/clusters/{2}/k8s/apis/networking.k8s.io/v1/namespaces/{3}/ingresses/{6}",
|
||||
"clusterNamePartOfUrl": "{2}",
|
||||
"customizationId": "factory-kube-ingress-details-rules",
|
||||
"baseprefix": "/openapi-ui",
|
||||
"withoutControls": true,
|
||||
"pathToItems": []any{"spec", "rules"},
|
||||
"id": "rules-table",
|
||||
"fetchUrl": "/api/clusters/{2}/k8s/apis/networking.k8s.io/v1/namespaces/{3}/ingresses/{6}",
|
||||
"cluster": "{2}",
|
||||
"customizationId": "factory-kube-ingress-details-rules",
|
||||
"baseprefix": "/openapi-ui",
|
||||
"withoutControls": true,
|
||||
"pathToItems": []any{"spec", "rules"},
|
||||
},
|
||||
},
|
||||
}),
|
||||
@@ -1301,8 +1322,10 @@ func CreateAllFactories() []*dashboardv1alpha1.Factory {
|
||||
"id": "yaml-editor",
|
||||
"cluster": "{2}",
|
||||
"isNameSpaced": true,
|
||||
"type": "builtin",
|
||||
"typeName": "ingresses",
|
||||
"type": "apis",
|
||||
"apiGroup": "networking.k8s.io",
|
||||
"apiVersion": "v1",
|
||||
"plural": "ingresses",
|
||||
"prefillValuesRequestIndex": float64(0),
|
||||
"substractHeight": float64(400),
|
||||
},
|
||||
@@ -1431,11 +1454,11 @@ func CreateAllFactories() []*dashboardv1alpha1.Factory {
|
||||
map[string]any{
|
||||
"type": "EnrichedTable",
|
||||
"data": map[string]any{
|
||||
"id": "workloads-table",
|
||||
"baseprefix": "/openapi-ui",
|
||||
"clusterNamePartOfUrl": "{2}",
|
||||
"customizationId": "factory-details-v1alpha1.cozystack.io.workloads",
|
||||
"fetchUrl": "/api/clusters/{2}/k8s/apis/cozystack.io/v1alpha1/namespaces/{3}/workloads",
|
||||
"id": "workloads-table",
|
||||
"baseprefix": "/openapi-ui",
|
||||
"cluster": "{2}",
|
||||
"customizationId": "factory-details-v1alpha1.cozystack.io.workloads",
|
||||
"fetchUrl": "/api/clusters/{2}/k8s/apis/cozystack.io/v1alpha1/namespaces/{3}/workloads",
|
||||
"labelSelector": map[string]any{
|
||||
"workloads.cozystack.io/monitor": "{reqs[0]['metadata','name']}",
|
||||
},
|
||||
@@ -1456,8 +1479,10 @@ func CreateAllFactories() []*dashboardv1alpha1.Factory {
|
||||
"isNameSpaced": true,
|
||||
"prefillValuesRequestIndex": 0,
|
||||
"substractHeight": float64(400),
|
||||
"type": "builtin",
|
||||
"typeName": "workloadmonitors",
|
||||
"type": "apis",
|
||||
"apiGroup": "cozystack.io",
|
||||
"apiVersion": "v1alpha1",
|
||||
"plural": "workloadmonitors",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1556,13 +1581,9 @@ func CreateAllFactories() []*dashboardv1alpha1.Factory {
|
||||
antdText("application-ref-label", true, "Application", nil),
|
||||
parsedText("application-ref-value", "{reqsJsonPath[0]['.spec.applicationRef.kind']['-']}.{reqsJsonPath[0]['.spec.applicationRef.apiGroup']['-']}/{reqsJsonPath[0]['.spec.applicationRef.name']['-']}", nil),
|
||||
}),
|
||||
antdFlexVertical("spec-storage-ref-block", 4, []any{
|
||||
antdText("storage-ref-label", true, "Storage", nil),
|
||||
parsedText("storage-ref-value", "{reqsJsonPath[0]['.spec.storageRef.kind']['-']}.{reqsJsonPath[0]['.spec.storageRef.apiGroup']['-']}/{reqsJsonPath[0]['.spec.storageRef.name']['-']}", nil),
|
||||
}),
|
||||
antdFlexVertical("spec-strategy-ref-block", 4, []any{
|
||||
antdText("strategy-ref-label", true, "Strategy", nil),
|
||||
parsedText("strategy-ref-value", "{reqsJsonPath[0]['.spec.strategyRef.kind']['-']}.{reqsJsonPath[0]['.spec.strategyRef.apiGroup']['-']}/{reqsJsonPath[0]['.spec.strategyRef.name']['-']}", nil),
|
||||
antdFlexVertical("spec-backup-class-name-block", 4, []any{
|
||||
antdText("backup-class-name-label", true, "Backup Class", nil),
|
||||
parsedText("backup-class-name-value", "{reqsJsonPath[0]['.spec.backupClassName']['-']}", nil),
|
||||
}),
|
||||
antdFlexVertical("spec-schedule-type-block", 4, []any{
|
||||
antdText("schedule-type-label", true, "Schedule Type", nil),
|
||||
@@ -1680,13 +1701,9 @@ func CreateAllFactories() []*dashboardv1alpha1.Factory {
|
||||
antdText("application-ref-label", true, "Application", nil),
|
||||
parsedText("application-ref-value", "{reqsJsonPath[0]['.spec.applicationRef.kind']['-']}.{reqsJsonPath[0]['.spec.applicationRef.apiGroup']['-']}/{reqsJsonPath[0]['.spec.applicationRef.name']['-']}", nil),
|
||||
}),
|
||||
antdFlexVertical("spec-storage-ref-block", 4, []any{
|
||||
antdText("storage-ref-label", true, "Storage", nil),
|
||||
parsedText("storage-ref-value", "{reqsJsonPath[0]['.spec.storageRef.kind']['-']}.{reqsJsonPath[0]['.spec.storageRef.apiGroup']['-']}/{reqsJsonPath[0]['.spec.storageRef.name']['-']}", nil),
|
||||
}),
|
||||
antdFlexVertical("spec-strategy-ref-block", 4, []any{
|
||||
antdText("strategy-ref-label", true, "Strategy", nil),
|
||||
parsedText("strategy-ref-value", "{reqsJsonPath[0]['.spec.strategyRef.name']['-']}", nil),
|
||||
antdFlexVertical("spec-backup-class-name-block", 4, []any{
|
||||
antdText("backup-class-name-label", true, "Backup Class", nil),
|
||||
parsedText("backup-class-name-value", "{reqsJsonPath[0]['.spec.backupClassName']['-']}", nil),
|
||||
}),
|
||||
antdFlexVertical("status-backup-ref-block", 4, []any{
|
||||
antdText("backup-ref-label", true, "Backup Ref", nil),
|
||||
@@ -1864,13 +1881,9 @@ func CreateAllFactories() []*dashboardv1alpha1.Factory {
|
||||
antdText("application-ref-label", true, "Application", nil),
|
||||
parsedText("application-ref-value", "{reqsJsonPath[0]['.spec.applicationRef.kind']['-']}.{reqsJsonPath[0]['.spec.applicationRef.apiGroup']['-']}/{reqsJsonPath[0]['.spec.applicationRef.name']['-']}", nil),
|
||||
}),
|
||||
antdFlexVertical("spec-storage-ref-block", 4, []any{
|
||||
antdText("storage-ref-label", true, "Storage", nil),
|
||||
parsedText("storage-ref-value", "{reqsJsonPath[0]['.spec.storageRef.kind']['-']}.{reqsJsonPath[0]['.spec.storageRef.apiGroup']['-']}/{reqsJsonPath[0]['.spec.storageRef.name']['-']}", nil),
|
||||
}),
|
||||
antdFlexVertical("spec-strategy-ref-block", 4, []any{
|
||||
antdText("strategy-ref-label", true, "Strategy", nil),
|
||||
parsedText("strategy-ref-value", "{reqsJsonPath[0]['.spec.strategyRef.kind']['-']}.{reqsJsonPath[0]['.spec.strategyRef.apiGroup']['-']}/{reqsJsonPath[0]['.spec.strategyRef.name']['-']}", nil),
|
||||
antdFlexVertical("spec-backup-class-name-block", 4, []any{
|
||||
antdText("backup-class-name-label", true, "Backup Class", nil),
|
||||
parsedText("backup-class-name-value", "{reqsJsonPath[0]['.spec.backupClassName']['-']}", nil),
|
||||
}),
|
||||
antdFlexVertical("status-artifact-uri-block", 4, []any{
|
||||
antdText("artifact-uri-label", true, "Artifact URI", nil),
|
||||
@@ -1951,12 +1964,27 @@ func CreateAllFactories() []*dashboardv1alpha1.Factory {
|
||||
|
||||
// CreateAllNavigations creates all navigation resources using helper functions
|
||||
func CreateAllNavigations() []*dashboardv1alpha1.Navigation {
|
||||
// Build baseFactoriesMapping for static (built-in) factories
|
||||
baseFactoriesMapping := map[string]string{
|
||||
// Cluster-scoped builtin resources
|
||||
"base-factory-clusterscoped-builtin-v1-namespaces": "namespace-details",
|
||||
"base-factory-clusterscoped-builtin-v1-nodes": "node-details",
|
||||
// Namespaced builtin resources
|
||||
"base-factory-namespaced-builtin-v1-pods": "pod-details",
|
||||
"base-factory-namespaced-builtin-v1-secrets": "kube-secret-details",
|
||||
"base-factory-namespaced-builtin-v1-services": "kube-service-details",
|
||||
// Namespaced API resources
|
||||
"base-factory-namespaced-api-networking.k8s.io-v1-ingresses": "kube-ingress-details",
|
||||
"base-factory-namespaced-api-cozystack.io-v1alpha1-workloadmonitors": "workloadmonitor-details",
|
||||
}
|
||||
|
||||
return []*dashboardv1alpha1.Navigation{
|
||||
createNavigation("navigation", map[string]any{
|
||||
"namespaces": map[string]any{
|
||||
"change": "/openapi-ui/{selectedCluster}/{value}/factory/marketplace",
|
||||
"clear": "/openapi-ui/{selectedCluster}/api-table/core.cozystack.io/v1alpha1/tenantnamespaces",
|
||||
},
|
||||
"baseFactoriesMapping": baseFactoriesMapping,
|
||||
}),
|
||||
}
|
||||
}
|
||||
@@ -2070,6 +2098,20 @@ func createFormItem(path, label, fieldType string) map[string]any {
|
||||
}
|
||||
}
|
||||
|
||||
// createFormItemWithAPI creates a form item with API endpoint for resource-based selects
|
||||
func createFormItemWithAPI(path, label, fieldType string, apiConfig map[string]any) map[string]any {
|
||||
item := map[string]any{
|
||||
"path": path,
|
||||
"label": label,
|
||||
"type": fieldType,
|
||||
}
|
||||
// Merge API configuration into the form item
|
||||
for key, value := range apiConfig {
|
||||
item[key] = value
|
||||
}
|
||||
return item
|
||||
}
|
||||
|
||||
// ---------------- Workloadmonitor specific functions ----------------
|
||||
|
||||
// createNamespaceHeader creates a header specifically for namespace with correct colors and text
|
||||
@@ -2319,6 +2361,51 @@ func createWorkloadmonitorHeader() map[string]any {
|
||||
}
|
||||
}
|
||||
|
||||
// CreateStaticCFOMapping creates the CFOMapping resource with mappings from static CustomFormsOverrides
|
||||
func CreateStaticCFOMapping() *dashboardv1alpha1.CFOMapping {
|
||||
// Build mappings from static CustomFormsOverrides
|
||||
customFormsOverrides := CreateAllCustomFormsOverrides()
|
||||
mappings := make(map[string]string, len(customFormsOverrides))
|
||||
for _, cfo := range customFormsOverrides {
|
||||
var spec map[string]any
|
||||
if err := json.Unmarshal(cfo.Spec.JSON.Raw, &spec); err != nil {
|
||||
continue
|
||||
}
|
||||
customizationID, ok := spec["customizationId"].(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
// Extract the resource path from customizationId (remove "default-" prefix)
|
||||
resourcePath := strings.TrimPrefix(customizationID, "default-")
|
||||
mappings[resourcePath] = customizationID
|
||||
}
|
||||
|
||||
return createCFOMapping("cfomapping", mappings)
|
||||
}
|
||||
|
||||
// createCFOMapping creates a CFOMapping resource
|
||||
func createCFOMapping(name string, mappings map[string]string) *dashboardv1alpha1.CFOMapping {
|
||||
spec := map[string]any{
|
||||
"mappings": mappings,
|
||||
}
|
||||
jsonData, _ := json.Marshal(spec)
|
||||
|
||||
return &dashboardv1alpha1.CFOMapping{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "dashboard.cozystack.io/v1alpha1",
|
||||
Kind: "CFOMapping",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: dashboardv1alpha1.ArbitrarySpec{
|
||||
JSON: v1.JSON{
|
||||
Raw: jsonData,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------- Complete resource creation function ----------------
|
||||
|
||||
// CreateAllStaticResources creates all static dashboard resources using helper functions
|
||||
@@ -2355,5 +2442,8 @@ func CreateAllStaticResources() []client.Object {
|
||||
resources = append(resources, tableUriMapping)
|
||||
}
|
||||
|
||||
// Add CFOMapping
|
||||
resources = append(resources, CreateStaticCFOMapping())
|
||||
|
||||
return resources
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@ func TestWorkloadReconciler_DeletesOnMissingMonitor(t *testing.T) {
|
||||
Name: "pod-foo",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"workloadmonitor.cozystack.io/name": "missing-monitor",
|
||||
"workloads.cozystack.io/monitor": "missing-monitor",
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -89,7 +89,7 @@ func TestWorkloadReconciler_KeepsWhenAllExist(t *testing.T) {
|
||||
Name: "pod-foo",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"workloadmonitor.cozystack.io/name": "mon",
|
||||
"workloads.cozystack.io/monitor": "mon",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
112
internal/crdinstall/install.go
Normal file
112
internal/crdinstall/install.go
Normal file
@@ -0,0 +1,112 @@
|
||||
/*
|
||||
Copyright 2025 The Cozystack Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package crdinstall
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/cozystack/cozystack/internal/manifestutil"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
)
|
||||
|
||||
// Install applies Cozystack CRDs using embedded manifests.
|
||||
// It extracts the manifests and applies them to the cluster using server-side apply,
|
||||
// then waits for all CRDs to have the Established condition.
|
||||
func Install(ctx context.Context, k8sClient client.Client, writeEmbeddedManifests func(string) error) error {
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
tmpDir, err := os.MkdirTemp("", "crd-install-*")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create temp directory: %w", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
manifestsDir := filepath.Join(tmpDir, "manifests")
|
||||
if err := os.MkdirAll(manifestsDir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create manifests directory: %w", err)
|
||||
}
|
||||
|
||||
if err := writeEmbeddedManifests(manifestsDir); err != nil {
|
||||
return fmt.Errorf("failed to extract embedded manifests: %w", err)
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(manifestsDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read manifests directory: %w", err)
|
||||
}
|
||||
|
||||
var manifestFiles []string
|
||||
for _, entry := range entries {
|
||||
if strings.HasSuffix(entry.Name(), ".yaml") {
|
||||
manifestFiles = append(manifestFiles, filepath.Join(manifestsDir, entry.Name()))
|
||||
}
|
||||
}
|
||||
|
||||
if len(manifestFiles) == 0 {
|
||||
return fmt.Errorf("no YAML manifest files found in directory")
|
||||
}
|
||||
|
||||
var objects []*unstructured.Unstructured
|
||||
for _, manifestPath := range manifestFiles {
|
||||
objs, err := manifestutil.ParseManifestFile(manifestPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse manifests from %s: %w", manifestPath, err)
|
||||
}
|
||||
objects = append(objects, objs...)
|
||||
}
|
||||
|
||||
if len(objects) == 0 {
|
||||
return fmt.Errorf("no objects found in manifests")
|
||||
}
|
||||
|
||||
// Validate all objects are CRDs — reject anything else to prevent
|
||||
// accidental force-apply of arbitrary resources.
|
||||
for _, obj := range objects {
|
||||
if obj.GetAPIVersion() != "apiextensions.k8s.io/v1" || obj.GetKind() != "CustomResourceDefinition" {
|
||||
return fmt.Errorf("unexpected object %s %s/%s in CRD manifests, only apiextensions.k8s.io/v1 CustomResourceDefinition is allowed",
|
||||
obj.GetAPIVersion(), obj.GetKind(), obj.GetName())
|
||||
}
|
||||
}
|
||||
|
||||
logger.Info("Applying Cozystack CRDs", "count", len(objects))
|
||||
for _, obj := range objects {
|
||||
patchOptions := &client.PatchOptions{
|
||||
FieldManager: "cozystack-operator",
|
||||
Force: func() *bool { b := true; return &b }(),
|
||||
}
|
||||
|
||||
if err := k8sClient.Patch(ctx, obj, client.Apply, patchOptions); err != nil {
|
||||
return fmt.Errorf("failed to apply CRD %s: %w", obj.GetName(), err)
|
||||
}
|
||||
logger.Info("Applied CRD", "name", obj.GetName())
|
||||
}
|
||||
|
||||
crdNames := manifestutil.CollectCRDNames(objects)
|
||||
if err := manifestutil.WaitForCRDsEstablished(ctx, k8sClient, crdNames); err != nil {
|
||||
return fmt.Errorf("CRDs not established after apply: %w", err)
|
||||
}
|
||||
|
||||
logger.Info("CRD installation completed successfully")
|
||||
return nil
|
||||
}
|
||||
302
internal/crdinstall/install_test.go
Normal file
302
internal/crdinstall/install_test.go
Normal file
@@ -0,0 +1,302 @@
|
||||
/*
|
||||
Copyright 2025 The Cozystack Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package crdinstall
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/interceptor"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
)
|
||||
|
||||
func TestWriteEmbeddedManifests(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
if err := WriteEmbeddedManifests(tmpDir); err != nil {
|
||||
t.Fatalf("WriteEmbeddedManifests() error = %v", err)
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read output dir: %v", err)
|
||||
}
|
||||
|
||||
var yamlFiles []string
|
||||
for _, e := range entries {
|
||||
if strings.HasSuffix(e.Name(), ".yaml") {
|
||||
yamlFiles = append(yamlFiles, e.Name())
|
||||
}
|
||||
}
|
||||
|
||||
if len(yamlFiles) == 0 {
|
||||
t.Error("WriteEmbeddedManifests() produced no YAML files")
|
||||
}
|
||||
|
||||
expectedFiles := []string{
|
||||
"cozystack.io_packages.yaml",
|
||||
"cozystack.io_packagesources.yaml",
|
||||
}
|
||||
for _, expected := range expectedFiles {
|
||||
found := false
|
||||
for _, actual := range yamlFiles {
|
||||
if actual == expected {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("expected file %q not found in output, got %v", expected, yamlFiles)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify files are non-empty
|
||||
for _, f := range yamlFiles {
|
||||
data, err := os.ReadFile(filepath.Join(tmpDir, f))
|
||||
if err != nil {
|
||||
t.Errorf("failed to read %s: %v", f, err)
|
||||
continue
|
||||
}
|
||||
if len(data) == 0 {
|
||||
t.Errorf("file %s is empty", f)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteEmbeddedManifests_filePermissions(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
if err := WriteEmbeddedManifests(tmpDir); err != nil {
|
||||
t.Fatalf("WriteEmbeddedManifests() error = %v", err)
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read output dir: %v", err)
|
||||
}
|
||||
|
||||
for _, e := range entries {
|
||||
if !strings.HasSuffix(e.Name(), ".yaml") {
|
||||
continue
|
||||
}
|
||||
info, err := e.Info()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get info for %s: %v", e.Name(), err)
|
||||
continue
|
||||
}
|
||||
perm := info.Mode().Perm()
|
||||
if perm&0o077 != 0 {
|
||||
t.Errorf("file %s has overly permissive mode %o, expected no group/other access", e.Name(), perm)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// newCRDManifestWriter returns a function that writes test CRD YAML files.
|
||||
func newCRDManifestWriter(crds ...string) func(string) error {
|
||||
return func(dir string) error {
|
||||
for i, crd := range crds {
|
||||
filename := filepath.Join(dir, fmt.Sprintf("crd%d.yaml", i+1))
|
||||
if err := os.WriteFile(filename, []byte(crd), 0600); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
var testCRD1 = `apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: packages.cozystack.io
|
||||
spec:
|
||||
group: cozystack.io
|
||||
names:
|
||||
kind: Package
|
||||
plural: packages
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
`
|
||||
|
||||
var testCRD2 = `apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: packagesources.cozystack.io
|
||||
spec:
|
||||
group: cozystack.io
|
||||
names:
|
||||
kind: PackageSource
|
||||
plural: packagesources
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
`
|
||||
|
||||
// establishedInterceptor simulates CRDs becoming Established in the API server.
|
||||
func establishedInterceptor() interceptor.Funcs {
|
||||
return interceptor.Funcs{
|
||||
Get: func(ctx context.Context, c client.WithWatch, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error {
|
||||
if err := c.Get(ctx, key, obj, opts...); err != nil {
|
||||
return err
|
||||
}
|
||||
u, ok := obj.(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if u.GetKind() == "CustomResourceDefinition" {
|
||||
_ = unstructured.SetNestedSlice(u.Object, []interface{}{
|
||||
map[string]interface{}{
|
||||
"type": "Established",
|
||||
"status": "True",
|
||||
},
|
||||
}, "status", "conditions")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestInstall_appliesAllCRDs(t *testing.T) {
|
||||
log.SetLogger(zap.New(zap.UseDevMode(true)))
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
if err := apiextensionsv1.AddToScheme(scheme); err != nil {
|
||||
t.Fatalf("failed to add apiextensions to scheme: %v", err)
|
||||
}
|
||||
|
||||
fakeClient := fake.NewClientBuilder().
|
||||
WithScheme(scheme).
|
||||
WithInterceptorFuncs(establishedInterceptor()).
|
||||
Build()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
ctx = log.IntoContext(ctx, log.FromContext(context.Background()))
|
||||
|
||||
err := Install(ctx, fakeClient, newCRDManifestWriter(testCRD1, testCRD2))
|
||||
if err != nil {
|
||||
t.Fatalf("Install() error = %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInstall_noManifests(t *testing.T) {
|
||||
log.SetLogger(zap.New(zap.UseDevMode(true)))
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
fakeClient := fake.NewClientBuilder().WithScheme(scheme).Build()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
ctx = log.IntoContext(ctx, log.FromContext(context.Background()))
|
||||
|
||||
err := Install(ctx, fakeClient, func(string) error { return nil })
|
||||
if err == nil {
|
||||
t.Error("Install() expected error for empty manifests, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "no YAML manifest files found") {
|
||||
t.Errorf("Install() error = %v, want error containing 'no YAML manifest files found'", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInstall_writeManifestsFails(t *testing.T) {
|
||||
log.SetLogger(zap.New(zap.UseDevMode(true)))
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
fakeClient := fake.NewClientBuilder().WithScheme(scheme).Build()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
ctx = log.IntoContext(ctx, log.FromContext(context.Background()))
|
||||
|
||||
err := Install(ctx, fakeClient, func(string) error { return os.ErrPermission })
|
||||
if err == nil {
|
||||
t.Error("Install() expected error when writeManifests fails, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInstall_rejectsNonCRDObjects(t *testing.T) {
|
||||
log.SetLogger(zap.New(zap.UseDevMode(true)))
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
if err := apiextensionsv1.AddToScheme(scheme); err != nil {
|
||||
t.Fatalf("failed to add apiextensions to scheme: %v", err)
|
||||
}
|
||||
|
||||
fakeClient := fake.NewClientBuilder().WithScheme(scheme).Build()
|
||||
|
||||
nonCRD := `apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: should-not-be-applied
|
||||
`
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
ctx = log.IntoContext(ctx, log.FromContext(context.Background()))
|
||||
|
||||
err := Install(ctx, fakeClient, newCRDManifestWriter(nonCRD))
|
||||
if err == nil {
|
||||
t.Fatal("Install() expected error for non-CRD object, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "unexpected object") {
|
||||
t.Errorf("Install() error = %v, want error containing 'unexpected object'", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInstall_crdNotEstablished(t *testing.T) {
|
||||
log.SetLogger(zap.New(zap.UseDevMode(true)))
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
if err := apiextensionsv1.AddToScheme(scheme); err != nil {
|
||||
t.Fatalf("failed to add apiextensions to scheme: %v", err)
|
||||
}
|
||||
|
||||
// No interceptor: CRDs will never get Established condition
|
||||
fakeClient := fake.NewClientBuilder().WithScheme(scheme).Build()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer cancel()
|
||||
ctx = log.IntoContext(ctx, log.FromContext(context.Background()))
|
||||
|
||||
err := Install(ctx, fakeClient, newCRDManifestWriter(testCRD1))
|
||||
if err == nil {
|
||||
t.Fatal("Install() expected error when CRDs never become established, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "CRDs not established") {
|
||||
t.Errorf("Install() error = %v, want error containing 'CRDs not established'", err)
|
||||
}
|
||||
}
|
||||
51
internal/crdinstall/manifests.embed.go
Normal file
51
internal/crdinstall/manifests.embed.go
Normal file
@@ -0,0 +1,51 @@
|
||||
/*
|
||||
Copyright 2025 The Cozystack Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package crdinstall
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
//go:embed manifests/*.yaml
|
||||
var embeddedCRDManifests embed.FS
|
||||
|
||||
// WriteEmbeddedManifests extracts embedded CRD manifests to a directory.
|
||||
func WriteEmbeddedManifests(dir string) error {
|
||||
manifests, err := fs.ReadDir(embeddedCRDManifests, "manifests")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read embedded manifests: %w", err)
|
||||
}
|
||||
|
||||
for _, manifest := range manifests {
|
||||
data, err := fs.ReadFile(embeddedCRDManifests, path.Join("manifests", manifest.Name()))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read file %s: %w", manifest.Name(), err)
|
||||
}
|
||||
|
||||
outputPath := filepath.Join(dir, manifest.Name())
|
||||
if err := os.WriteFile(outputPath, data, 0600); err != nil {
|
||||
return fmt.Errorf("failed to write file %s: %w", outputPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -17,18 +17,15 @@ limitations under the License.
|
||||
package fluxinstall
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cozystack/cozystack/internal/manifestutil"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
k8syaml "k8s.io/apimachinery/pkg/util/yaml"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
)
|
||||
@@ -76,7 +73,7 @@ func Install(ctx context.Context, k8sClient client.Client, writeEmbeddedManifest
|
||||
// Parse all manifest files
|
||||
var objects []*unstructured.Unstructured
|
||||
for _, manifestPath := range manifestFiles {
|
||||
objs, err := parseManifests(manifestPath)
|
||||
objs, err := manifestutil.ParseManifestFile(manifestPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse manifests from %s: %w", manifestPath, err)
|
||||
}
|
||||
@@ -110,56 +107,6 @@ func Install(ctx context.Context, k8sClient client.Client, writeEmbeddedManifest
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseManifests parses YAML manifests into unstructured objects.
|
||||
func parseManifests(manifestPath string) ([]*unstructured.Unstructured, error) {
|
||||
data, err := os.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read manifest file: %w", err)
|
||||
}
|
||||
|
||||
return readYAMLObjects(bytes.NewReader(data))
|
||||
}
|
||||
|
||||
// readYAMLObjects parses multi-document YAML into unstructured objects.
|
||||
func readYAMLObjects(reader io.Reader) ([]*unstructured.Unstructured, error) {
|
||||
var objects []*unstructured.Unstructured
|
||||
yamlReader := k8syaml.NewYAMLReader(bufio.NewReader(reader))
|
||||
|
||||
for {
|
||||
doc, err := yamlReader.Read()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return nil, fmt.Errorf("failed to read YAML document: %w", err)
|
||||
}
|
||||
|
||||
// Skip empty documents
|
||||
if len(bytes.TrimSpace(doc)) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
obj := &unstructured.Unstructured{}
|
||||
decoder := k8syaml.NewYAMLOrJSONDecoder(bytes.NewReader(doc), len(doc))
|
||||
if err := decoder.Decode(obj); err != nil {
|
||||
// Skip documents that can't be decoded (might be comments or empty)
|
||||
if err == io.EOF {
|
||||
continue
|
||||
}
|
||||
return nil, fmt.Errorf("failed to decode YAML document: %w", err)
|
||||
}
|
||||
|
||||
// Skip empty objects (no kind)
|
||||
if obj.GetKind() == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
objects = append(objects, obj)
|
||||
}
|
||||
|
||||
return objects, nil
|
||||
}
|
||||
|
||||
// applyManifests applies Kubernetes objects using server-side apply.
|
||||
func applyManifests(ctx context.Context, k8sClient client.Client, objects []*unstructured.Unstructured) error {
|
||||
logger := log.FromContext(ctx)
|
||||
@@ -183,8 +130,11 @@ func applyManifests(ctx context.Context, k8sClient client.Client, objects []*uns
|
||||
return fmt.Errorf("failed to apply cluster definitions: %w", err)
|
||||
}
|
||||
|
||||
// Wait a bit for CRDs to be registered
|
||||
time.Sleep(2 * time.Second)
|
||||
// Wait for CRDs to be established before applying dependent resources
|
||||
crdNames := manifestutil.CollectCRDNames(stageOne)
|
||||
if err := manifestutil.WaitForCRDsEstablished(ctx, k8sClient, crdNames); err != nil {
|
||||
return fmt.Errorf("CRDs not established after apply: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Apply stage two (everything else)
|
||||
@@ -215,7 +165,6 @@ func applyObjects(ctx context.Context, k8sClient client.Client, objects []*unstr
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
// extractNamespace extracts the namespace name from the Namespace object in the manifests.
|
||||
func extractNamespace(objects []*unstructured.Unstructured) (string, error) {
|
||||
for _, obj := range objects {
|
||||
@@ -386,4 +335,3 @@ func setEnvVar(env []interface{}, name, value string) []interface{} {
|
||||
|
||||
return env
|
||||
}
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
//go:embed manifests/*.yaml
|
||||
@@ -40,8 +41,8 @@ func WriteEmbeddedManifests(dir string) error {
|
||||
return fmt.Errorf("failed to read file %s: %w", manifest.Name(), err)
|
||||
}
|
||||
|
||||
outputPath := path.Join(dir, manifest.Name())
|
||||
if err := os.WriteFile(outputPath, data, 0666); err != nil {
|
||||
outputPath := filepath.Join(dir, manifest.Name())
|
||||
if err := os.WriteFile(outputPath, data, 0600); err != nil {
|
||||
return fmt.Errorf("failed to write file %s: %w", outputPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
118
internal/manifestutil/crd.go
Normal file
118
internal/manifestutil/crd.go
Normal file
@@ -0,0 +1,118 @@
|
||||
/*
|
||||
Copyright 2025 The Cozystack Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package manifestutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
)
|
||||
|
||||
var crdGVK = schema.GroupVersionKind{
|
||||
Group: "apiextensions.k8s.io",
|
||||
Version: "v1",
|
||||
Kind: "CustomResourceDefinition",
|
||||
}
|
||||
|
||||
// WaitForCRDsEstablished polls the API server until all named CRDs have the
|
||||
// Established condition set to True, or the context is cancelled.
|
||||
func WaitForCRDsEstablished(ctx context.Context, k8sClient client.Client, crdNames []string) error {
|
||||
if len(crdNames) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
logger := log.FromContext(ctx)
|
||||
ticker := time.NewTicker(500 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return fmt.Errorf("context cancelled while waiting for CRDs to be established: %w", ctx.Err())
|
||||
default:
|
||||
}
|
||||
|
||||
allEstablished := true
|
||||
var pendingCRD string
|
||||
for _, name := range crdNames {
|
||||
crd := &unstructured.Unstructured{}
|
||||
crd.SetGroupVersionKind(crdGVK)
|
||||
if err := k8sClient.Get(ctx, types.NamespacedName{Name: name}, crd); err != nil {
|
||||
allEstablished = false
|
||||
pendingCRD = name
|
||||
break
|
||||
}
|
||||
|
||||
conditions, found, err := unstructured.NestedSlice(crd.Object, "status", "conditions")
|
||||
if err != nil || !found {
|
||||
allEstablished = false
|
||||
pendingCRD = name
|
||||
break
|
||||
}
|
||||
|
||||
established := false
|
||||
for _, c := range conditions {
|
||||
cond, ok := c.(map[string]interface{})
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if cond["type"] == "Established" && cond["status"] == "True" {
|
||||
established = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !established {
|
||||
allEstablished = false
|
||||
pendingCRD = name
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if allEstablished {
|
||||
logger.Info("All CRDs established", "count", len(crdNames))
|
||||
return nil
|
||||
}
|
||||
|
||||
logger.V(1).Info("Waiting for CRD to be established", "crd", pendingCRD)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return fmt.Errorf("context cancelled while waiting for CRD %q to be established: %w", pendingCRD, ctx.Err())
|
||||
case <-ticker.C:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// CollectCRDNames returns the names of all CustomResourceDefinition objects
|
||||
// from the given list of unstructured objects. Only objects with
|
||||
// apiVersion "apiextensions.k8s.io/v1" and kind "CustomResourceDefinition"
|
||||
// are matched.
|
||||
func CollectCRDNames(objects []*unstructured.Unstructured) []string {
|
||||
var names []string
|
||||
for _, obj := range objects {
|
||||
if obj.GetAPIVersion() == "apiextensions.k8s.io/v1" && obj.GetKind() == "CustomResourceDefinition" {
|
||||
names = append(names, obj.GetName())
|
||||
}
|
||||
}
|
||||
return names
|
||||
}
|
||||
202
internal/manifestutil/crd_test.go
Normal file
202
internal/manifestutil/crd_test.go
Normal file
@@ -0,0 +1,202 @@
|
||||
/*
|
||||
Copyright 2025 The Cozystack Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package manifestutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/interceptor"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
)
|
||||
|
||||
func TestCollectCRDNames(t *testing.T) {
|
||||
objects := []*unstructured.Unstructured{
|
||||
{Object: map[string]interface{}{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Namespace",
|
||||
"metadata": map[string]interface{}{"name": "test-ns"},
|
||||
}},
|
||||
{Object: map[string]interface{}{
|
||||
"apiVersion": "apiextensions.k8s.io/v1",
|
||||
"kind": "CustomResourceDefinition",
|
||||
"metadata": map[string]interface{}{"name": "packages.cozystack.io"},
|
||||
}},
|
||||
{Object: map[string]interface{}{
|
||||
"apiVersion": "apps/v1",
|
||||
"kind": "Deployment",
|
||||
"metadata": map[string]interface{}{"name": "test-deploy"},
|
||||
}},
|
||||
{Object: map[string]interface{}{
|
||||
"apiVersion": "apiextensions.k8s.io/v1",
|
||||
"kind": "CustomResourceDefinition",
|
||||
"metadata": map[string]interface{}{"name": "packagesources.cozystack.io"},
|
||||
}},
|
||||
}
|
||||
|
||||
names := CollectCRDNames(objects)
|
||||
if len(names) != 2 {
|
||||
t.Fatalf("CollectCRDNames() returned %d names, want 2", len(names))
|
||||
}
|
||||
if names[0] != "packages.cozystack.io" {
|
||||
t.Errorf("names[0] = %q, want %q", names[0], "packages.cozystack.io")
|
||||
}
|
||||
if names[1] != "packagesources.cozystack.io" {
|
||||
t.Errorf("names[1] = %q, want %q", names[1], "packagesources.cozystack.io")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCollectCRDNames_ignoresWrongAPIVersion(t *testing.T) {
|
||||
objects := []*unstructured.Unstructured{
|
||||
{Object: map[string]interface{}{
|
||||
"apiVersion": "apiextensions.k8s.io/v1",
|
||||
"kind": "CustomResourceDefinition",
|
||||
"metadata": map[string]interface{}{"name": "real.crd.io"},
|
||||
}},
|
||||
{Object: map[string]interface{}{
|
||||
"apiVersion": "apiextensions.k8s.io/v1beta1",
|
||||
"kind": "CustomResourceDefinition",
|
||||
"metadata": map[string]interface{}{"name": "legacy.crd.io"},
|
||||
}},
|
||||
}
|
||||
|
||||
names := CollectCRDNames(objects)
|
||||
if len(names) != 1 {
|
||||
t.Fatalf("CollectCRDNames() returned %d names, want 1", len(names))
|
||||
}
|
||||
if names[0] != "real.crd.io" {
|
||||
t.Errorf("names[0] = %q, want %q", names[0], "real.crd.io")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCollectCRDNames_noCRDs(t *testing.T) {
|
||||
objects := []*unstructured.Unstructured{
|
||||
{Object: map[string]interface{}{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Namespace",
|
||||
"metadata": map[string]interface{}{"name": "test"},
|
||||
}},
|
||||
}
|
||||
|
||||
names := CollectCRDNames(objects)
|
||||
if len(names) != 0 {
|
||||
t.Errorf("CollectCRDNames() returned %d names, want 0", len(names))
|
||||
}
|
||||
}
|
||||
|
||||
func TestWaitForCRDsEstablished_success(t *testing.T) {
|
||||
log.SetLogger(zap.New(zap.UseDevMode(true)))
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
if err := apiextensionsv1.AddToScheme(scheme); err != nil {
|
||||
t.Fatalf("failed to add apiextensions to scheme: %v", err)
|
||||
}
|
||||
|
||||
// Create a CRD object in the fake client
|
||||
crd := &unstructured.Unstructured{Object: map[string]interface{}{
|
||||
"apiVersion": "apiextensions.k8s.io/v1",
|
||||
"kind": "CustomResourceDefinition",
|
||||
"metadata": map[string]interface{}{"name": "packages.cozystack.io"},
|
||||
}}
|
||||
|
||||
fakeClient := fake.NewClientBuilder().
|
||||
WithScheme(scheme).
|
||||
WithObjects(crd).
|
||||
WithInterceptorFuncs(interceptor.Funcs{
|
||||
Get: func(ctx context.Context, c client.WithWatch, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error {
|
||||
if err := c.Get(ctx, key, obj, opts...); err != nil {
|
||||
return err
|
||||
}
|
||||
u, ok := obj.(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if u.GetKind() == "CustomResourceDefinition" {
|
||||
_ = unstructured.SetNestedSlice(u.Object, []interface{}{
|
||||
map[string]interface{}{
|
||||
"type": "Established",
|
||||
"status": "True",
|
||||
},
|
||||
}, "status", "conditions")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}).
|
||||
Build()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
ctx = log.IntoContext(ctx, log.FromContext(context.Background()))
|
||||
|
||||
err := WaitForCRDsEstablished(ctx, fakeClient, []string{"packages.cozystack.io"})
|
||||
if err != nil {
|
||||
t.Fatalf("WaitForCRDsEstablished() error = %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWaitForCRDsEstablished_timeout(t *testing.T) {
|
||||
log.SetLogger(zap.New(zap.UseDevMode(true)))
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
if err := apiextensionsv1.AddToScheme(scheme); err != nil {
|
||||
t.Fatalf("failed to add apiextensions to scheme: %v", err)
|
||||
}
|
||||
|
||||
// CRD exists but never gets Established condition
|
||||
crd := &unstructured.Unstructured{Object: map[string]interface{}{
|
||||
"apiVersion": "apiextensions.k8s.io/v1",
|
||||
"kind": "CustomResourceDefinition",
|
||||
"metadata": map[string]interface{}{"name": "packages.cozystack.io"},
|
||||
}}
|
||||
|
||||
fakeClient := fake.NewClientBuilder().
|
||||
WithScheme(scheme).
|
||||
WithObjects(crd).
|
||||
Build()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer cancel()
|
||||
ctx = log.IntoContext(ctx, log.FromContext(context.Background()))
|
||||
|
||||
err := WaitForCRDsEstablished(ctx, fakeClient, []string{"packages.cozystack.io"})
|
||||
if err == nil {
|
||||
t.Fatal("WaitForCRDsEstablished() expected error on timeout, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "packages.cozystack.io") {
|
||||
t.Errorf("error should mention stuck CRD name, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWaitForCRDsEstablished_empty(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
fakeClient := fake.NewClientBuilder().WithScheme(scheme).Build()
|
||||
|
||||
ctx := context.Background()
|
||||
err := WaitForCRDsEstablished(ctx, fakeClient, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("WaitForCRDsEstablished() with empty names should return nil, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
76
internal/manifestutil/parse.go
Normal file
76
internal/manifestutil/parse.go
Normal file
@@ -0,0 +1,76 @@
|
||||
/*
|
||||
Copyright 2025 The Cozystack Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package manifestutil
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
k8syaml "k8s.io/apimachinery/pkg/util/yaml"
|
||||
)
|
||||
|
||||
// ParseManifestFile reads a YAML file and parses it into unstructured objects.
|
||||
func ParseManifestFile(manifestPath string) ([]*unstructured.Unstructured, error) {
|
||||
data, err := os.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read manifest file: %w", err)
|
||||
}
|
||||
|
||||
return ReadYAMLObjects(bytes.NewReader(data))
|
||||
}
|
||||
|
||||
// ReadYAMLObjects parses multi-document YAML from a reader into unstructured objects.
|
||||
// Empty documents and documents without a kind are skipped.
|
||||
func ReadYAMLObjects(reader io.Reader) ([]*unstructured.Unstructured, error) {
|
||||
var objects []*unstructured.Unstructured
|
||||
yamlReader := k8syaml.NewYAMLReader(bufio.NewReader(reader))
|
||||
|
||||
for {
|
||||
doc, err := yamlReader.Read()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return nil, fmt.Errorf("failed to read YAML document: %w", err)
|
||||
}
|
||||
|
||||
if len(bytes.TrimSpace(doc)) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
obj := &unstructured.Unstructured{}
|
||||
decoder := k8syaml.NewYAMLOrJSONDecoder(bytes.NewReader(doc), len(doc))
|
||||
if err := decoder.Decode(obj); err != nil {
|
||||
if err == io.EOF {
|
||||
continue
|
||||
}
|
||||
return nil, fmt.Errorf("failed to decode YAML document: %w", err)
|
||||
}
|
||||
|
||||
if obj.GetKind() == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
objects = append(objects, obj)
|
||||
}
|
||||
|
||||
return objects, nil
|
||||
}
|
||||
161
internal/manifestutil/parse_test.go
Normal file
161
internal/manifestutil/parse_test.go
Normal file
@@ -0,0 +1,161 @@
|
||||
/*
|
||||
Copyright 2025 The Cozystack Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package manifestutil
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestReadYAMLObjects(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
wantCount int
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "single document",
|
||||
input: `apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: test
|
||||
`,
|
||||
wantCount: 1,
|
||||
},
|
||||
{
|
||||
name: "multiple documents",
|
||||
input: `apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: test1
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: test2
|
||||
`,
|
||||
wantCount: 2,
|
||||
},
|
||||
{
|
||||
name: "empty input",
|
||||
input: "",
|
||||
wantCount: 0,
|
||||
},
|
||||
{
|
||||
name: "decoder rejects document without kind",
|
||||
input: `apiVersion: v1
|
||||
metadata:
|
||||
name: test
|
||||
`,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "whitespace-only document between separators is skipped",
|
||||
input: `apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: test1
|
||||
---
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: test2
|
||||
`,
|
||||
wantCount: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
objects, err := ReadYAMLObjects(strings.NewReader(tt.input))
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ReadYAMLObjects() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if len(objects) != tt.wantCount {
|
||||
t.Errorf("ReadYAMLObjects() returned %d objects, want %d", len(objects), tt.wantCount)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadYAMLObjects_preservesFields(t *testing.T) {
|
||||
input := `apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: packages.cozystack.io
|
||||
spec:
|
||||
group: cozystack.io
|
||||
`
|
||||
objects, err := ReadYAMLObjects(strings.NewReader(input))
|
||||
if err != nil {
|
||||
t.Fatalf("ReadYAMLObjects() error = %v", err)
|
||||
}
|
||||
if len(objects) != 1 {
|
||||
t.Fatalf("expected 1 object, got %d", len(objects))
|
||||
}
|
||||
|
||||
obj := objects[0]
|
||||
if obj.GetKind() != "CustomResourceDefinition" {
|
||||
t.Errorf("kind = %q, want %q", obj.GetKind(), "CustomResourceDefinition")
|
||||
}
|
||||
if obj.GetName() != "packages.cozystack.io" {
|
||||
t.Errorf("name = %q, want %q", obj.GetName(), "packages.cozystack.io")
|
||||
}
|
||||
if obj.GetAPIVersion() != "apiextensions.k8s.io/v1" {
|
||||
t.Errorf("apiVersion = %q, want %q", obj.GetAPIVersion(), "apiextensions.k8s.io/v1")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseManifestFile(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
manifestPath := filepath.Join(tmpDir, "test.yaml")
|
||||
|
||||
content := `apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cm1
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cm2
|
||||
`
|
||||
if err := os.WriteFile(manifestPath, []byte(content), 0600); err != nil {
|
||||
t.Fatalf("failed to write test manifest: %v", err)
|
||||
}
|
||||
|
||||
objects, err := ParseManifestFile(manifestPath)
|
||||
if err != nil {
|
||||
t.Fatalf("ParseManifestFile() error = %v", err)
|
||||
}
|
||||
if len(objects) != 2 {
|
||||
t.Errorf("ParseManifestFile() returned %d objects, want 2", len(objects))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseManifestFile_notFound(t *testing.T) {
|
||||
_, err := ParseManifestFile("/nonexistent/path/test.yaml")
|
||||
if err == nil {
|
||||
t.Error("ParseManifestFile() expected error for nonexistent file, got nil")
|
||||
}
|
||||
}
|
||||
@@ -211,13 +211,13 @@ func (r *PackageReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct
|
||||
Namespace: "cozy-system",
|
||||
},
|
||||
Install: &helmv2.Install{
|
||||
Timeout: &metav1.Duration{Duration: 10 * 60 * 1000000000}, // 10m
|
||||
Timeout: &metav1.Duration{Duration: 10 * 60 * 1000000000}, // 10m
|
||||
Remediation: &helmv2.InstallRemediation{
|
||||
Retries: -1,
|
||||
},
|
||||
},
|
||||
Upgrade: &helmv2.Upgrade{
|
||||
Timeout: &metav1.Duration{Duration: 10 * 60 * 1000000000}, // 10m
|
||||
Timeout: &metav1.Duration{Duration: 10 * 60 * 1000000000}, // 10m
|
||||
Remediation: &helmv2.UpgradeRemediation{
|
||||
Retries: -1,
|
||||
},
|
||||
@@ -387,6 +387,7 @@ func (r *PackageReconciler) createOrUpdateHelmRelease(ctx context.Context, hr *h
|
||||
}
|
||||
hr.SetAnnotations(annotations)
|
||||
|
||||
hr.Spec.Suspend = existing.Spec.Suspend
|
||||
// Update Spec
|
||||
existing.Spec = hr.Spec
|
||||
existing.SetLabels(hr.GetLabels())
|
||||
@@ -735,53 +736,39 @@ func (r *PackageReconciler) updateDependentPackagesDependencies(ctx context.Cont
|
||||
return nil
|
||||
}
|
||||
|
||||
// reconcileNamespaces creates or updates namespaces based on components in the variant
|
||||
// reconcileNamespaces creates or updates namespaces based on components in the variant.
|
||||
// For each namespace, it checks ALL Packages sharing that namespace to determine whether
|
||||
// the namespace should be privileged — it is privileged if ANY Package has a privileged
|
||||
// component installed in it.
|
||||
func (r *PackageReconciler) reconcileNamespaces(ctx context.Context, pkg *cozyv1alpha1.Package, variant *cozyv1alpha1.Variant) error {
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
// Collect namespaces from components
|
||||
// Map: namespace -> {isPrivileged}
|
||||
type namespaceInfo struct {
|
||||
privileged bool
|
||||
}
|
||||
namespacesMap := make(map[string]namespaceInfo)
|
||||
|
||||
// Collect namespaces from this Package's components
|
||||
targetNamespaces := make(map[string]struct{})
|
||||
for _, component := range variant.Components {
|
||||
// Skip components without Install section
|
||||
if component.Install == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if component is disabled via Package spec
|
||||
if pkgComponent, ok := pkg.Spec.Components[component.Name]; ok {
|
||||
if pkgComponent.Enabled != nil && !*pkgComponent.Enabled {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Namespace must be set
|
||||
namespace := component.Install.Namespace
|
||||
if namespace == "" {
|
||||
return fmt.Errorf("component %s has empty namespace in Install section", component.Name)
|
||||
}
|
||||
targetNamespaces[namespace] = struct{}{}
|
||||
}
|
||||
|
||||
info, exists := namespacesMap[namespace]
|
||||
if !exists {
|
||||
info = namespaceInfo{
|
||||
privileged: false,
|
||||
}
|
||||
}
|
||||
|
||||
// If component is privileged, mark namespace as privileged
|
||||
if component.Install.Privileged {
|
||||
info.privileged = true
|
||||
}
|
||||
|
||||
namespacesMap[namespace] = info
|
||||
// Determine which namespaces should be privileged by checking ALL Packages
|
||||
privileged, err := r.resolvePrivilegedNamespaces(ctx, targetNamespaces)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to resolve privileged namespaces: %w", err)
|
||||
}
|
||||
|
||||
// Create or update all namespaces
|
||||
for nsName, info := range namespacesMap {
|
||||
for nsName := range targetNamespaces {
|
||||
namespace := &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nsName,
|
||||
@@ -792,36 +779,89 @@ func (r *PackageReconciler) reconcileNamespaces(ctx context.Context, pkg *cozyv1
|
||||
},
|
||||
}
|
||||
|
||||
// Add system label only for non-tenant namespaces
|
||||
if !strings.HasPrefix(nsName, "tenant-") {
|
||||
namespace.Labels["cozystack.io/system"] = "true"
|
||||
}
|
||||
|
||||
// Add privileged label if needed
|
||||
if info.privileged {
|
||||
if privileged[nsName] {
|
||||
namespace.Labels["pod-security.kubernetes.io/enforce"] = "privileged"
|
||||
}
|
||||
|
||||
if err := r.createOrUpdateNamespace(ctx, namespace); err != nil {
|
||||
logger.Error(err, "failed to reconcile namespace", "name", nsName, "privileged", info.privileged)
|
||||
logger.Error(err, "failed to reconcile namespace", "name", nsName, "privileged", privileged[nsName])
|
||||
return fmt.Errorf("failed to reconcile namespace %s: %w", nsName, err)
|
||||
}
|
||||
logger.Info("reconciled namespace", "name", nsName, "privileged", info.privileged)
|
||||
logger.Info("reconciled namespace", "name", nsName, "privileged", privileged[nsName])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createOrUpdateNamespace creates or updates a namespace using server-side apply
|
||||
// resolvePrivilegedNamespaces checks all PackageSources and their corresponding Packages
|
||||
// to determine which of the given namespaces require the privileged PodSecurity level.
|
||||
// A namespace is privileged if ANY active Package has a component with privileged: true in it.
|
||||
func (r *PackageReconciler) resolvePrivilegedNamespaces(ctx context.Context, namespaces map[string]struct{}) (map[string]bool, error) {
|
||||
result := make(map[string]bool)
|
||||
|
||||
packageSources := &cozyv1alpha1.PackageSourceList{}
|
||||
if err := r.List(ctx, packageSources); err != nil {
|
||||
return nil, fmt.Errorf("failed to list PackageSources: %w", err)
|
||||
}
|
||||
|
||||
for i := range packageSources.Items {
|
||||
ps := &packageSources.Items[i]
|
||||
|
||||
// Check if a Package exists for this PackageSource
|
||||
pkg := &cozyv1alpha1.Package{}
|
||||
if err := r.Get(ctx, types.NamespacedName{Name: ps.Name}, pkg); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
return nil, fmt.Errorf("failed to get Package %s: %w", ps.Name, err)
|
||||
}
|
||||
|
||||
// Resolve active variant
|
||||
variantName := pkg.Spec.Variant
|
||||
if variantName == "" {
|
||||
variantName = "default"
|
||||
}
|
||||
|
||||
var variant *cozyv1alpha1.Variant
|
||||
for j := range ps.Spec.Variants {
|
||||
if ps.Spec.Variants[j].Name == variantName {
|
||||
variant = &ps.Spec.Variants[j]
|
||||
break
|
||||
}
|
||||
}
|
||||
if variant == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, component := range variant.Components {
|
||||
if component.Install == nil {
|
||||
continue
|
||||
}
|
||||
if pkgComponent, ok := pkg.Spec.Components[component.Name]; ok {
|
||||
if pkgComponent.Enabled != nil && !*pkgComponent.Enabled {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if _, relevant := namespaces[component.Install.Namespace]; !relevant {
|
||||
continue
|
||||
}
|
||||
if component.Install.Privileged {
|
||||
result[component.Install.Namespace] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// createOrUpdateNamespace creates or updates a namespace using server-side apply.
|
||||
func (r *PackageReconciler) createOrUpdateNamespace(ctx context.Context, namespace *corev1.Namespace) error {
|
||||
// Ensure TypeMeta is set for server-side apply
|
||||
namespace.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Namespace"))
|
||||
|
||||
// Use server-side apply with field manager
|
||||
// This is atomic and avoids race conditions from Get/Create/Update pattern
|
||||
// Labels and annotations will be merged automatically by the server
|
||||
// Each label/annotation key is treated as a separate field, so existing ones are preserved
|
||||
return r.Patch(ctx, namespace, client.Apply, client.FieldOwner("cozystack-package-controller"))
|
||||
return r.Patch(ctx, namespace, client.Apply, client.FieldOwner("cozystack-package-controller"), client.ForceOwnership)
|
||||
}
|
||||
|
||||
// cleanupOrphanedHelmReleases removes HelmReleases that are no longer needed
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
include ../../../hack/package.mk
|
||||
|
||||
generate:
|
||||
cozyvalues-gen -v values.yaml -s values.schema.json -r README.md
|
||||
../../../hack/update-crd.sh
|
||||
|
||||
update:
|
||||
tag=$$(git ls-remote --tags --sort="v:refname" https://github.com/FerretDB/FerretDB | awk -F'[/^]' '{sub("^v", "", $$3)} END{print $$3}') && \
|
||||
pgtag=$$(skopeo list-tags docker://ghcr.io/ferretdb/postgres-documentdb | jq -r --arg tag "$$tag" '.Tags[] | select(endswith("ferretdb-" + $$tag))' | sort -V | tail -n1) && \
|
||||
sed -i "s|\(imageName: ghcr.io/ferretdb/postgres-documentdb:\).*|\1$$pgtag|" templates/postgres.yaml && \
|
||||
sed -i "s|\(image: ghcr.io/ferretdb/ferretdb:\).*|\1$$tag|" templates/ferretdb.yaml && \
|
||||
sed -i "s|\(appVersion: \).*|\1$$tag|" Chart.yaml
|
||||
@@ -1,82 +0,0 @@
|
||||
# Managed FerretDB Service
|
||||
|
||||
FerretDB is an open source MongoDB alternative.
|
||||
It translates MongoDB wire protocol queries to SQL and can be used as a direct replacement for MongoDB 5.0+.
|
||||
Internally, FerretDB service is backed by Postgres.
|
||||
|
||||
## Parameters
|
||||
|
||||
### Common parameters
|
||||
|
||||
| Name | Description | Type | Value |
|
||||
| ------------------ | ---------------------------------------------------------------------------------------------------------------------------------- | ---------- | ------- |
|
||||
| `replicas` | Number of replicas. | `int` | `2` |
|
||||
| `resources` | Explicit CPU and memory configuration for each FerretDB replica. When omitted, the preset defined in `resourcesPreset` is applied. | `object` | `{}` |
|
||||
| `resources.cpu` | CPU available to each replica. | `quantity` | `""` |
|
||||
| `resources.memory` | Memory (RAM) available to each replica. | `quantity` | `""` |
|
||||
| `resourcesPreset` | Default sizing preset used when `resources` is omitted. | `string` | `micro` |
|
||||
| `size` | Persistent Volume Claim size available for application data. | `quantity` | `10Gi` |
|
||||
| `storageClass` | StorageClass used to store the data. | `string` | `""` |
|
||||
| `external` | Enable external access from outside the cluster. | `bool` | `false` |
|
||||
|
||||
|
||||
### Application-specific parameters
|
||||
|
||||
| Name | Description | Type | Value |
|
||||
| ------------------------ | ---------------------------------------------------------------------------------- | ------------------- | ----- |
|
||||
| `quorum` | Configuration for quorum-based synchronous replication. | `object` | `{}` |
|
||||
| `quorum.minSyncReplicas` | Minimum number of synchronous replicas required for commit. | `int` | `0` |
|
||||
| `quorum.maxSyncReplicas` | Maximum number of synchronous replicas allowed (must be less than total replicas). | `int` | `0` |
|
||||
| `users` | Users configuration map. | `map[string]object` | `{}` |
|
||||
| `users[name].password` | Password for the user. | `string` | `""` |
|
||||
|
||||
|
||||
### Backup parameters
|
||||
|
||||
| Name | Description | Type | Value |
|
||||
| ------------------------ | ------------------------------------------------------------ | -------- | ----------------------------------- |
|
||||
| `backup` | Backup configuration. | `object` | `{}` |
|
||||
| `backup.enabled` | Enable regular backups (default: false). | `bool` | `false` |
|
||||
| `backup.schedule` | Cron schedule for automated backups. | `string` | `0 2 * * * *` |
|
||||
| `backup.retentionPolicy` | Retention policy. | `string` | `30d` |
|
||||
| `backup.endpointURL` | S3 endpoint URL for uploads. | `string` | `http://minio-gateway-service:9000` |
|
||||
| `backup.destinationPath` | Path to store the backup (e.g. s3://bucket/path/to/folder/). | `string` | `s3://bucket/path/to/folder/` |
|
||||
| `backup.s3AccessKey` | Access key for S3 authentication. | `string` | `<your-access-key>` |
|
||||
| `backup.s3SecretKey` | Secret key for S3 authentication. | `string` | `<your-secret-key>` |
|
||||
|
||||
|
||||
### Bootstrap (recovery) parameters
|
||||
|
||||
| Name | Description | Type | Value |
|
||||
| ------------------------ | ------------------------------------------------------------------- | -------- | ------- |
|
||||
| `bootstrap` | Bootstrap configuration. | `object` | `{}` |
|
||||
| `bootstrap.enabled` | Restore database cluster from a backup. | `bool` | `false` |
|
||||
| `bootstrap.recoveryTime` | Timestamp (RFC3339) for point-in-time recovery; empty means latest. | `string` | `""` |
|
||||
| `bootstrap.oldName` | Name of database cluster before deletion. | `string` | `""` |
|
||||
|
||||
|
||||
## Parameter examples and reference
|
||||
|
||||
### resources and resourcesPreset
|
||||
|
||||
`resources` sets explicit CPU and memory configurations for each replica.
|
||||
When left empty, the preset defined in `resourcesPreset` is applied.
|
||||
|
||||
```yaml
|
||||
resources:
|
||||
cpu: 4000m
|
||||
memory: 4Gi
|
||||
```
|
||||
|
||||
`resourcesPreset` sets named CPU and memory configurations for each replica.
|
||||
This setting is ignored if the corresponding `resources` value is set.
|
||||
|
||||
| Preset name | CPU | memory |
|
||||
|-------------|--------|---------|
|
||||
| `nano` | `250m` | `128Mi` |
|
||||
| `micro` | `500m` | `256Mi` |
|
||||
| `small` | `1` | `512Mi` |
|
||||
| `medium` | `1` | `1Gi` |
|
||||
| `large` | `2` | `2Gi` |
|
||||
| `xlarge` | `4` | `4Gi` |
|
||||
| `2xlarge` | `8` | `8Gi` |
|
||||
@@ -1,12 +0,0 @@
|
||||
<svg width="144" height="144" viewBox="0 0 144 144" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<rect x="-0.00195312" width="144" height="144" rx="24" fill="url(#paint0_linear_683_2952)"/>
|
||||
<path d="M69.5923 22.131C58.2662 23.6787 46.9037 30.8714 40.3302 40.6679C39.274 42.2521 37.4531 45.548 37.4531 45.8757C37.4531 45.9122 38.3272 45.3841 39.3833 44.6921C52.3847 36.1156 67.8989 34.5314 80.5178 40.4858C83.2674 41.7787 84.9973 43.0351 87.4555 45.4933C91.589 49.645 94.6117 55.1988 96.7058 62.5007C97.7983 66.2518 98.7088 71.3686 98.9455 74.8465C99.0001 75.7934 99.1458 76.631 99.2369 76.6856C99.7467 76.9952 102.041 73.6629 103.662 70.276C106.229 64.8861 107.431 59.5872 107.413 53.7057C107.395 45.3841 104.518 38.3917 98.727 32.5648C93.592 27.3934 87.1095 23.8426 80.3175 22.4587C78.7333 22.1492 77.5679 22.0581 74.5999 22.0035C72.5422 21.9853 70.3025 22.0399 69.5923 22.131Z" fill="white"/>
|
||||
<path d="M45.52 46.4402C44.3364 47.0229 42.3516 48.8438 40.6035 50.9379C39.8205 51.8666 38.6369 53.0137 37.7629 53.6693C35.7234 55.1989 32.2455 58.604 30.4792 60.8073C21.2654 72.2244 18.6979 85.244 23.0863 98.3182C26.6917 109.025 35.0315 116.127 47.8508 119.35C52.8401 120.624 60.324 121.335 63.456 120.843L64.2572 120.715L63.019 119.987C56.1906 116.018 51.4198 109.317 50.0905 101.869C49.6899 99.611 49.6717 95.605 50.0723 93.4017C50.9645 88.4488 53.4592 83.8965 56.8461 81.0559C58.4303 79.7266 61.1981 78.3609 63.4014 77.8329C66.7155 77.0317 68.7367 76.1212 70.8307 74.4642C72.1782 73.408 73.3618 71.8056 74.3451 69.7298C75.1827 67.9635 76.9672 62.3551 76.9672 61.4628C76.9672 60.8437 76.3299 60.0061 75.4195 59.4416C74.946 59.1502 74.1994 58.9864 72.2875 58.7861C64.0569 57.9302 59.9599 56.4371 55.007 52.5221C54.2968 51.9576 53.441 51.3203 53.095 51.1018C52.749 50.9015 52.0571 50.1367 51.5836 49.4265C50.1451 47.3325 48.3606 45.985 46.9949 45.9668C46.7036 45.9668 46.0298 46.1853 45.52 46.4402ZM54.4607 54.8711C55.0798 55.1806 55.7535 55.5812 55.972 55.7451L56.3727 56.0729L55.7353 58.6222C55.1891 60.8437 55.098 61.4082 55.1526 62.9924C55.2073 64.5584 55.2619 64.9043 55.6261 65.4142C56.227 66.2336 57.2649 66.7253 58.4303 66.7253C60.0873 66.7253 61.3802 65.7784 63.5289 62.956C64.148 62.1548 64.6396 61.7177 65.368 61.3718C66.497 60.8073 67.2982 60.7527 69.811 60.9712L71.4863 61.135V62.1183C71.4863 63.6661 72.3057 64.5584 73.9809 64.8133L74.7821 64.9226L74.4908 65.5963C73.2161 68.6736 69.9385 72.1516 66.8611 73.6994C66.3695 73.9361 65.2587 74.3731 64.4029 74.6645C63.0008 75.1197 62.6184 75.1743 60.2148 75.1743C57.8294 75.1743 57.4288 75.1197 56.1177 74.6827C52.1663 73.3716 49.2347 70.4581 47.9054 66.5432C47.4319 65.1593 47.4137 61.135 47.8872 59.4598C48.5245 57.1472 49.6535 55.2353 50.8371 54.4887C51.6018 53.997 53.0222 54.1609 54.4607 54.8711Z" fill="white"/>
|
||||
<path d="M113.022 61.7361C113.022 62.5555 112.111 66.3431 111.347 68.7102C108.47 77.5781 103.262 85.5355 96.4697 91.3443C91.6989 95.4413 88.3119 97.244 82.9402 98.5733C79.4805 99.4291 77.2226 99.7023 72.8341 99.8115C67.3532 99.9572 61.9451 99.4655 57.1014 98.4094C56.1727 98.2091 55.3898 98.0816 55.3351 98.1363C55.1166 98.3366 55.9542 101.123 56.6826 102.598C58.0119 105.329 59.5232 107.368 62.2182 110.063C65.0588 112.904 67.1711 114.47 70.4487 116.163C78.57 120.351 87.8931 120.916 97.453 117.766C107.541 114.47 114.952 108.516 118.94 100.503C121.598 95.1864 122.691 89.5051 122.29 83.0227C121.799 75.0288 118.849 67.1989 114.57 62.5738C113.896 61.8454 113.277 61.2627 113.186 61.2627C113.095 61.2627 113.022 61.4812 113.022 61.7361Z" fill="white"/>
|
||||
<defs>
|
||||
<linearGradient id="paint0_linear_683_2952" x1="5.5" y1="11" x2="141" y2="124.5" gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="#45ADC6"/>
|
||||
<stop offset="1" stop-color="#216778"/>
|
||||
</linearGradient>
|
||||
</defs>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 3.7 KiB |
@@ -1,12 +0,0 @@
|
||||
{{- if .Values.backup.enabled }}
|
||||
---
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: ScheduledBackup
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-postgres
|
||||
spec:
|
||||
schedule: {{ .Values.backup.schedule | quote }}
|
||||
backupOwnerReference: self
|
||||
cluster:
|
||||
name: {{ .Release.Name }}-postgres
|
||||
{{- end }}
|
||||
@@ -1,19 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ .Release.Name }}
|
||||
labels:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
type: {{ ternary "LoadBalancer" "ClusterIP" .Values.external }}
|
||||
{{- if .Values.external }}
|
||||
externalTrafficPolicy: Local
|
||||
{{- if (include "cozy-lib.network.disableLoadBalancerNodePorts" $ | fromYaml) }}
|
||||
allocateLoadBalancerNodePorts: false
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: ferretdb
|
||||
port: 27017
|
||||
selector:
|
||||
app: {{ .Release.Name }}
|
||||
@@ -1,29 +0,0 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ .Release.Name }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ .Release.Name }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
containers:
|
||||
- name: ferretdb
|
||||
image: ghcr.io/ferretdb/ferretdb:2.4.0
|
||||
ports:
|
||||
- containerPort: 27017
|
||||
env:
|
||||
- name: POSTGRESQL_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-postgres-superuser
|
||||
key: password
|
||||
- name: FERRETDB_POSTGRESQL_URL
|
||||
value: "postgresql://postgres:$(POSTGRESQL_PASSWORD)@{{ .Release.Name }}-postgres-rw:5432/postgres"
|
||||
@@ -1,114 +0,0 @@
|
||||
---
|
||||
apiVersion: postgresql.cnpg.io/v1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-postgres
|
||||
spec:
|
||||
instances: {{ .Values.replicas }}
|
||||
{{- if .Values.backup.enabled }}
|
||||
backup:
|
||||
barmanObjectStore:
|
||||
destinationPath: {{ .Values.backup.destinationPath }}
|
||||
endpointURL: {{ .Values.backup.endpointURL }}
|
||||
s3Credentials:
|
||||
accessKeyId:
|
||||
name: {{ .Release.Name }}-s3-creds
|
||||
key: AWS_ACCESS_KEY_ID
|
||||
secretAccessKey:
|
||||
name: {{ .Release.Name }}-s3-creds
|
||||
key: AWS_SECRET_ACCESS_KEY
|
||||
retentionPolicy: {{ .Values.backup.retentionPolicy }}
|
||||
{{- end }}
|
||||
|
||||
bootstrap:
|
||||
initdb:
|
||||
postInitSQL:
|
||||
- 'CREATE EXTENSION IF NOT EXISTS documentdb CASCADE;'
|
||||
{{- if .Values.bootstrap.enabled }}
|
||||
recovery:
|
||||
source: {{ .Values.bootstrap.oldName }}
|
||||
{{- if .Values.bootstrap.recoveryTime }}
|
||||
recoveryTarget:
|
||||
targetTime: {{ .Values.bootstrap.recoveryTime }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.bootstrap.enabled }}
|
||||
externalClusters:
|
||||
- name: {{ .Values.bootstrap.oldName }}
|
||||
barmanObjectStore:
|
||||
destinationPath: {{ .Values.backup.destinationPath }}
|
||||
endpointURL: {{ .Values.backup.endpointURL }}
|
||||
s3Credentials:
|
||||
accessKeyId:
|
||||
name: {{ .Release.Name }}-s3-creds
|
||||
key: AWS_ACCESS_KEY_ID
|
||||
secretAccessKey:
|
||||
name: {{ .Release.Name }}-s3-creds
|
||||
key: AWS_SECRET_ACCESS_KEY
|
||||
{{- end }}
|
||||
imageName: ghcr.io/ferretdb/postgres-documentdb:17-0.105.0-ferretdb-2.4.0
|
||||
postgresUID: 999
|
||||
postgresGID: 999
|
||||
enableSuperuserAccess: true
|
||||
{{- if .Values._cluster.scheduling }}
|
||||
{{- $rawConstraints := get .Values._cluster.scheduling "globalAppTopologySpreadConstraints" }}
|
||||
{{- if $rawConstraints }}
|
||||
{{- $rawConstraints | fromYaml | toYaml | nindent 2 }}
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
cnpg.io/cluster: {{ .Release.Name }}-postgres
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
minSyncReplicas: {{ .Values.quorum.minSyncReplicas }}
|
||||
maxSyncReplicas: {{ .Values.quorum.maxSyncReplicas }}
|
||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.resourcesPreset .Values.resources $) | nindent 4 }}
|
||||
monitoring:
|
||||
enablePodMonitor: true
|
||||
|
||||
postgresql:
|
||||
shared_preload_libraries:
|
||||
- pg_cron
|
||||
- pg_documentdb_core
|
||||
- pg_documentdb
|
||||
parameters:
|
||||
cron.database_name: 'postgres'
|
||||
pg_hba:
|
||||
- host postgres postgres 127.0.0.1/32 trust
|
||||
- host postgres postgres ::1/128 trust
|
||||
|
||||
storage:
|
||||
size: {{ required ".Values.size is required" .Values.size }}
|
||||
{{- with .Values.storageClass }}
|
||||
storageClass: {{ . }}
|
||||
{{- end }}
|
||||
|
||||
inheritedMetadata:
|
||||
labels:
|
||||
policy.cozystack.io/allow-to-apiserver: "true"
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
|
||||
{{- if .Values.users }}
|
||||
managed:
|
||||
roles:
|
||||
{{- range $user, $config := .Values.users }}
|
||||
- name: {{ $user }}
|
||||
ensure: present
|
||||
passwordSecret:
|
||||
name: {{ printf "%s-user-%s" $.Release.Name $user }}
|
||||
login: true
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- range $user, $config := .Values.users }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ printf "%s-user-%s" $.Release.Name $user }}
|
||||
labels:
|
||||
cnpg.io/reload: "true"
|
||||
type: kubernetes.io/basic-auth
|
||||
data:
|
||||
username: {{ $user | b64enc }}
|
||||
password: {{ $config.password | b64enc }}
|
||||
{{- end }}
|
||||
@@ -1,13 +0,0 @@
|
||||
---
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicas }}
|
||||
minReplicas: 1
|
||||
kind: ferretdb
|
||||
type: ferretdb
|
||||
selector:
|
||||
app.kubernetes.io/instance: {{ $.Release.Name }}
|
||||
version: {{ $.Chart.Version }}
|
||||
@@ -1,190 +0,0 @@
|
||||
{
|
||||
"title": "Chart Values",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"backup": {
|
||||
"description": "Backup configuration.",
|
||||
"type": "object",
|
||||
"default": {},
|
||||
"required": [
|
||||
"destinationPath",
|
||||
"enabled",
|
||||
"endpointURL",
|
||||
"retentionPolicy",
|
||||
"s3AccessKey",
|
||||
"s3SecretKey",
|
||||
"schedule"
|
||||
],
|
||||
"properties": {
|
||||
"destinationPath": {
|
||||
"description": "Path to store the backup (e.g. s3://bucket/path/to/folder/).",
|
||||
"type": "string",
|
||||
"default": "s3://bucket/path/to/folder/"
|
||||
},
|
||||
"enabled": {
|
||||
"description": "Enable regular backups (default: false).",
|
||||
"type": "boolean",
|
||||
"default": false
|
||||
},
|
||||
"endpointURL": {
|
||||
"description": "S3 endpoint URL for uploads.",
|
||||
"type": "string",
|
||||
"default": "http://minio-gateway-service:9000"
|
||||
},
|
||||
"retentionPolicy": {
|
||||
"description": "Retention policy.",
|
||||
"type": "string",
|
||||
"default": "30d"
|
||||
},
|
||||
"s3AccessKey": {
|
||||
"description": "Access key for S3 authentication.",
|
||||
"type": "string",
|
||||
"default": "\u003cyour-access-key\u003e"
|
||||
},
|
||||
"s3SecretKey": {
|
||||
"description": "Secret key for S3 authentication.",
|
||||
"type": "string",
|
||||
"default": "\u003cyour-secret-key\u003e"
|
||||
},
|
||||
"schedule": {
|
||||
"description": "Cron schedule for automated backups.",
|
||||
"type": "string",
|
||||
"default": "0 2 * * * *"
|
||||
}
|
||||
}
|
||||
},
|
||||
"bootstrap": {
|
||||
"description": "Bootstrap configuration.",
|
||||
"type": "object",
|
||||
"default": {},
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"description": "Restore database cluster from a backup.",
|
||||
"type": "boolean",
|
||||
"default": false
|
||||
},
|
||||
"oldName": {
|
||||
"description": "Name of database cluster before deletion.",
|
||||
"type": "string",
|
||||
"default": ""
|
||||
},
|
||||
"recoveryTime": {
|
||||
"description": "Timestamp (RFC3339) for point-in-time recovery; empty means latest.",
|
||||
"type": "string",
|
||||
"default": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"external": {
|
||||
"description": "Enable external access from outside the cluster.",
|
||||
"type": "boolean",
|
||||
"default": false
|
||||
},
|
||||
"quorum": {
|
||||
"description": "Configuration for quorum-based synchronous replication.",
|
||||
"type": "object",
|
||||
"default": {},
|
||||
"required": [
|
||||
"maxSyncReplicas",
|
||||
"minSyncReplicas"
|
||||
],
|
||||
"properties": {
|
||||
"maxSyncReplicas": {
|
||||
"description": "Maximum number of synchronous replicas allowed (must be less than total replicas).",
|
||||
"type": "integer",
|
||||
"default": 0
|
||||
},
|
||||
"minSyncReplicas": {
|
||||
"description": "Minimum number of synchronous replicas required for commit.",
|
||||
"type": "integer",
|
||||
"default": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"replicas": {
|
||||
"description": "Number of replicas.",
|
||||
"type": "integer",
|
||||
"default": 2
|
||||
},
|
||||
"resources": {
|
||||
"description": "Explicit CPU and memory configuration for each FerretDB replica. When omitted, the preset defined in `resourcesPreset` is applied.",
|
||||
"type": "object",
|
||||
"default": {},
|
||||
"properties": {
|
||||
"cpu": {
|
||||
"description": "CPU available to each replica.",
|
||||
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"x-kubernetes-int-or-string": true
|
||||
},
|
||||
"memory": {
|
||||
"description": "Memory (RAM) available to each replica.",
|
||||
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"x-kubernetes-int-or-string": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"description": "Default sizing preset used when `resources` is omitted.",
|
||||
"type": "string",
|
||||
"default": "micro",
|
||||
"enum": [
|
||||
"nano",
|
||||
"micro",
|
||||
"small",
|
||||
"medium",
|
||||
"large",
|
||||
"xlarge",
|
||||
"2xlarge"
|
||||
]
|
||||
},
|
||||
"size": {
|
||||
"description": "Persistent Volume Claim size available for application data.",
|
||||
"default": "10Gi",
|
||||
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"x-kubernetes-int-or-string": true
|
||||
},
|
||||
"storageClass": {
|
||||
"description": "StorageClass used to store the data.",
|
||||
"type": "string",
|
||||
"default": ""
|
||||
},
|
||||
"users": {
|
||||
"description": "Users configuration map.",
|
||||
"type": "object",
|
||||
"default": {},
|
||||
"additionalProperties": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"password": {
|
||||
"description": "Password for the user.",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,98 +0,0 @@
|
||||
##
|
||||
## @section Common parameters
|
||||
##
|
||||
|
||||
## @typedef {struct} Resources - Explicit CPU and memory configuration for each FerretDB replica.
|
||||
## @field {quantity} [cpu] - CPU available to each replica.
|
||||
## @field {quantity} [memory] - Memory (RAM) available to each replica.
|
||||
|
||||
## @enum {string} ResourcesPreset - Default sizing preset.
|
||||
## @value nano
|
||||
## @value micro
|
||||
## @value small
|
||||
## @value medium
|
||||
## @value large
|
||||
## @value xlarge
|
||||
## @value 2xlarge
|
||||
|
||||
## @param {int} replicas - Number of replicas.
|
||||
replicas: 2
|
||||
|
||||
## @param {Resources} [resources] - Explicit CPU and memory configuration for each FerretDB replica. When omitted, the preset defined in `resourcesPreset` is applied.
|
||||
resources: {}
|
||||
|
||||
## @param {ResourcesPreset} resourcesPreset="micro" - Default sizing preset used when `resources` is omitted.
|
||||
resourcesPreset: "micro"
|
||||
|
||||
## @param {quantity} size - Persistent Volume Claim size available for application data.
|
||||
size: 10Gi
|
||||
|
||||
## @param {string} storageClass - StorageClass used to store the data.
|
||||
storageClass: ""
|
||||
|
||||
## @param {bool} external - Enable external access from outside the cluster.
|
||||
external: false
|
||||
|
||||
##
|
||||
## @section Application-specific parameters
|
||||
##
|
||||
|
||||
## @typedef {struct} Quorum - Configuration for quorum-based synchronous replication.
|
||||
## @field {int} minSyncReplicas - Minimum number of synchronous replicas required for commit.
|
||||
## @field {int} maxSyncReplicas - Maximum number of synchronous replicas allowed (must be less than total replicas).
|
||||
|
||||
## @param {Quorum} quorum - Configuration for quorum-based synchronous replication.
|
||||
quorum:
|
||||
minSyncReplicas: 0
|
||||
maxSyncReplicas: 0
|
||||
|
||||
## @typedef {struct} User - User configuration.
|
||||
## @field {string} [password] - Password for the user.
|
||||
|
||||
## @param {map[string]User} users - Users configuration map.
|
||||
users: {}
|
||||
## Example:
|
||||
## users:
|
||||
## user1:
|
||||
## password: strongpassword
|
||||
## user2:
|
||||
## password: hackme
|
||||
##
|
||||
|
||||
##
|
||||
## @section Backup parameters
|
||||
##
|
||||
|
||||
## @typedef {struct} Backup - Backup configuration.
|
||||
## @field {bool} enabled - Enable regular backups (default: false).
|
||||
## @field {string} schedule - Cron schedule for automated backups.
|
||||
## @field {string} retentionPolicy - Retention policy.
|
||||
## @field {string} endpointURL - S3 endpoint URL for uploads.
|
||||
## @field {string} destinationPath - Path to store the backup (e.g. s3://bucket/path/to/folder/).
|
||||
## @field {string} s3AccessKey - Access key for S3 authentication.
|
||||
## @field {string} s3SecretKey - Secret key for S3 authentication.
|
||||
|
||||
## @param {Backup} backup - Backup configuration.
|
||||
backup:
|
||||
enabled: false
|
||||
schedule: "0 2 * * * *"
|
||||
retentionPolicy: 30d
|
||||
endpointURL: http://minio-gateway-service:9000
|
||||
destinationPath: s3://bucket/path/to/folder/
|
||||
s3AccessKey: "<your-access-key>"
|
||||
s3SecretKey: "<your-secret-key>"
|
||||
|
||||
##
|
||||
## @section Bootstrap (recovery) parameters
|
||||
##
|
||||
|
||||
## @typedef {struct} Bootstrap - Bootstrap configuration for restoring a database cluster from a backup.
|
||||
## @field {bool} [enabled] - Restore database cluster from a backup.
|
||||
## @field {string} [recoveryTime] - Timestamp (RFC3339) for point-in-time recovery; empty means latest.
|
||||
## @field {string} [oldName] - Name of database cluster before deletion.
|
||||
|
||||
## @param {Bootstrap} bootstrap - Bootstrap configuration.
|
||||
bootstrap:
|
||||
enabled: false
|
||||
recoveryTime: ""
|
||||
oldName: ""
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v2
|
||||
name: ferretdb
|
||||
description: Managed FerretDB service
|
||||
icon: /logos/ferretdb.svg
|
||||
name: harbor
|
||||
description: Managed Harbor container registry
|
||||
icon: /logos/harbor.svg
|
||||
type: application
|
||||
version: 0.0.0 # Placeholder, the actual version will be automatically set during the build process
|
||||
appVersion: 2.4.0
|
||||
appVersion: "2.14.2"
|
||||
7
packages/apps/harbor/Makefile
Normal file
7
packages/apps/harbor/Makefile
Normal file
@@ -0,0 +1,7 @@
|
||||
NAME=harbor
|
||||
|
||||
include ../../../hack/package.mk
|
||||
|
||||
generate:
|
||||
cozyvalues-gen -v values.yaml -s values.schema.json -r README.md
|
||||
../../../hack/update-crd.sh
|
||||
47
packages/apps/harbor/README.md
Normal file
47
packages/apps/harbor/README.md
Normal file
@@ -0,0 +1,47 @@
|
||||
# Managed Harbor Container Registry
|
||||
|
||||
Harbor is an open source trusted cloud native registry project that stores, signs, and scans content.
|
||||
|
||||
## Parameters
|
||||
|
||||
### Common parameters
|
||||
|
||||
| Name | Description | Type | Value |
|
||||
| -------------- | -------------------------------------------------------------------------------------------- | -------- | ----- |
|
||||
| `host` | Hostname for external access to Harbor (defaults to 'harbor' subdomain for the tenant host). | `string` | `""` |
|
||||
| `storageClass` | StorageClass used to store the data. | `string` | `""` |
|
||||
|
||||
|
||||
### Component configuration
|
||||
|
||||
| Name | Description | Type | Value |
|
||||
| ----------------------------- | -------------------------------------------------------------------------------------------------------- | ---------- | ------- |
|
||||
| `core` | Core API server configuration. | `object` | `{}` |
|
||||
| `core.resources` | Explicit CPU and memory configuration. When omitted, the preset defined in `resourcesPreset` is applied. | `object` | `{}` |
|
||||
| `core.resources.cpu` | Number of CPU cores allocated. | `quantity` | `""` |
|
||||
| `core.resources.memory` | Amount of memory allocated. | `quantity` | `""` |
|
||||
| `core.resourcesPreset` | Default sizing preset used when `resources` is omitted. | `string` | `small` |
|
||||
| `registry` | Container image registry configuration. | `object` | `{}` |
|
||||
| `registry.resources` | Explicit CPU and memory configuration. When omitted, the preset defined in `resourcesPreset` is applied. | `object` | `{}` |
|
||||
| `registry.resources.cpu` | Number of CPU cores allocated. | `quantity` | `""` |
|
||||
| `registry.resources.memory` | Amount of memory allocated. | `quantity` | `""` |
|
||||
| `registry.resourcesPreset` | Default sizing preset used when `resources` is omitted. | `string` | `small` |
|
||||
| `jobservice` | Background job service configuration. | `object` | `{}` |
|
||||
| `jobservice.resources` | Explicit CPU and memory configuration. When omitted, the preset defined in `resourcesPreset` is applied. | `object` | `{}` |
|
||||
| `jobservice.resources.cpu` | Number of CPU cores allocated. | `quantity` | `""` |
|
||||
| `jobservice.resources.memory` | Amount of memory allocated. | `quantity` | `""` |
|
||||
| `jobservice.resourcesPreset` | Default sizing preset used when `resources` is omitted. | `string` | `nano` |
|
||||
| `trivy` | Trivy vulnerability scanner configuration. | `object` | `{}` |
|
||||
| `trivy.enabled` | Enable or disable the vulnerability scanner. | `bool` | `true` |
|
||||
| `trivy.size` | Persistent Volume size for vulnerability database cache. | `quantity` | `5Gi` |
|
||||
| `trivy.resources` | Explicit CPU and memory configuration. When omitted, the preset defined in `resourcesPreset` is applied. | `object` | `{}` |
|
||||
| `trivy.resources.cpu` | Number of CPU cores allocated. | `quantity` | `""` |
|
||||
| `trivy.resources.memory` | Amount of memory allocated. | `quantity` | `""` |
|
||||
| `trivy.resourcesPreset` | Default sizing preset used when `resources` is omitted. | `string` | `nano` |
|
||||
| `database` | PostgreSQL database configuration. | `object` | `{}` |
|
||||
| `database.size` | Persistent Volume size for database storage. | `quantity` | `5Gi` |
|
||||
| `database.replicas` | Number of database instances. | `int` | `2` |
|
||||
| `redis` | Redis cache configuration. | `object` | `{}` |
|
||||
| `redis.size` | Persistent Volume size for cache storage. | `quantity` | `1Gi` |
|
||||
| `redis.replicas` | Number of Redis replicas. | `int` | `2` |
|
||||
|
||||
1
packages/apps/harbor/logos/harbor.svg
Normal file
1
packages/apps/harbor/logos/harbor.svg
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 6.0 KiB |
19
packages/apps/harbor/templates/bucket.yaml
Normal file
19
packages/apps/harbor/templates/bucket.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
{{- $seaweedfs := .Values._namespace.seaweedfs }}
|
||||
apiVersion: objectstorage.k8s.io/v1alpha1
|
||||
kind: BucketClaim
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-registry
|
||||
spec:
|
||||
bucketClassName: {{ $seaweedfs }}
|
||||
protocols:
|
||||
- s3
|
||||
---
|
||||
apiVersion: objectstorage.k8s.io/v1alpha1
|
||||
kind: BucketAccess
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-registry
|
||||
spec:
|
||||
bucketAccessClassName: {{ $seaweedfs }}
|
||||
bucketClaimName: {{ .Release.Name }}-registry
|
||||
credentialsSecretName: {{ .Release.Name }}-registry-bucket
|
||||
protocol: s3
|
||||
@@ -17,12 +17,21 @@ rules:
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}-credentials
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}-ingress
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups:
|
||||
- cozystack.io
|
||||
resources:
|
||||
- workloadmonitors
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}
|
||||
- {{ .Release.Name }}-core
|
||||
- {{ .Release.Name }}-registry
|
||||
- {{ .Release.Name }}-portal
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
@@ -30,7 +39,7 @@ apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
subjects:
|
||||
{{ include "cozy-lib.rbac.subjectsForTenantAndAccessLevel" (list "use" .Release.Namespace) }}
|
||||
{{ include "cozy-lib.rbac.subjectsForTenantAndAccessLevel" (list "super-admin" .Release.Namespace) }}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
201
packages/apps/harbor/templates/harbor.yaml
Normal file
201
packages/apps/harbor/templates/harbor.yaml
Normal file
@@ -0,0 +1,201 @@
|
||||
{{- $host := .Values._namespace.host }}
|
||||
{{- $harborHost := .Values.host | default (printf "%s.%s" .Release.Name $host) }}
|
||||
|
||||
{{- $existingSecret := lookup "v1" "Secret" .Release.Namespace (printf "%s-credentials" .Release.Name) }}
|
||||
{{- $adminPassword := randAlphaNum 16 }}
|
||||
{{- $redisPassword := randAlphaNum 32 }}
|
||||
{{- if $existingSecret }}
|
||||
{{- $adminPassword = index $existingSecret.data "admin-password" | b64dec }}
|
||||
{{- if hasKey $existingSecret.data "redis-password" }}
|
||||
{{- $redisPassword = index $existingSecret.data "redis-password" | b64dec }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- $existingCoreSecret := lookup "v1" "Secret" .Release.Namespace (printf "%s-core" .Release.Name) }}
|
||||
{{- $tokenKey := "" }}
|
||||
{{- $tokenCert := "" }}
|
||||
{{- if $existingCoreSecret }}
|
||||
{{- if hasKey $existingCoreSecret.data "tls.key" }}
|
||||
{{- $tokenKey = index $existingCoreSecret.data "tls.key" | b64dec }}
|
||||
{{- end }}
|
||||
{{- if hasKey $existingCoreSecret.data "tls.crt" }}
|
||||
{{- $tokenCert = index $existingCoreSecret.data "tls.crt" | b64dec }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-credentials
|
||||
stringData:
|
||||
admin-password: {{ $adminPassword | quote }}
|
||||
redis-password: {{ $redisPassword | quote }}
|
||||
url: https://{{ $harborHost }}
|
||||
|
||||
---
|
||||
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-system
|
||||
labels:
|
||||
sharding.fluxcd.io/key: tenants
|
||||
spec:
|
||||
chartRef:
|
||||
kind: ExternalArtifact
|
||||
name: cozystack-harbor-application-default-harbor-system
|
||||
namespace: cozy-system
|
||||
interval: 5m
|
||||
timeout: 15m
|
||||
install:
|
||||
remediation:
|
||||
retries: -1
|
||||
upgrade:
|
||||
force: true
|
||||
remediation:
|
||||
retries: -1
|
||||
valuesFrom:
|
||||
- kind: Secret
|
||||
name: cozystack-values
|
||||
- kind: Secret
|
||||
name: {{ .Release.Name }}-credentials
|
||||
valuesKey: redis-password
|
||||
targetPath: redis.password
|
||||
- kind: Secret
|
||||
name: {{ .Release.Name }}-credentials
|
||||
valuesKey: redis-password
|
||||
targetPath: harbor.redis.external.password
|
||||
values:
|
||||
bucket:
|
||||
secretName: {{ .Release.Name }}-registry-bucket
|
||||
db:
|
||||
replicas: {{ .Values.database.replicas }}
|
||||
size: {{ .Values.database.size }}
|
||||
{{- with .Values.storageClass }}
|
||||
storageClass: {{ . }}
|
||||
{{- end }}
|
||||
redis:
|
||||
replicas: {{ .Values.redis.replicas }}
|
||||
size: {{ .Values.redis.size }}
|
||||
{{- with .Values.storageClass }}
|
||||
storageClass: {{ . }}
|
||||
{{- end }}
|
||||
harbor:
|
||||
fullnameOverride: {{ .Release.Name }}
|
||||
harborAdminPassword: {{ $adminPassword | quote }}
|
||||
externalURL: https://{{ $harborHost }}
|
||||
expose:
|
||||
type: clusterIP
|
||||
clusterIP:
|
||||
name: {{ .Release.Name }}
|
||||
tls:
|
||||
enabled: false
|
||||
persistence:
|
||||
enabled: true
|
||||
resourcePolicy: "keep"
|
||||
imageChartStorage:
|
||||
type: s3
|
||||
s3:
|
||||
existingSecret: {{ .Release.Name }}-registry-s3
|
||||
region: us-east-1
|
||||
bucket: {{ .Release.Name }}-registry
|
||||
secure: false
|
||||
v4auth: true
|
||||
{{- if .Values.trivy.enabled }}
|
||||
persistentVolumeClaim:
|
||||
trivy:
|
||||
size: {{ .Values.trivy.size }}
|
||||
{{- with .Values.storageClass }}
|
||||
storageClass: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
portal:
|
||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list "nano" (dict) $) | nindent 10 }}
|
||||
core:
|
||||
{{- if and $tokenKey $tokenCert }}
|
||||
tokenKey: {{ $tokenKey | quote }}
|
||||
tokenCert: {{ $tokenCert | quote }}
|
||||
{{- end }}
|
||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.core.resourcesPreset .Values.core.resources $) | nindent 10 }}
|
||||
registry:
|
||||
registry:
|
||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.registry.resourcesPreset .Values.registry.resources $) | nindent 12 }}
|
||||
controller:
|
||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.registry.resourcesPreset .Values.registry.resources $) | nindent 12 }}
|
||||
jobservice:
|
||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.jobservice.resourcesPreset .Values.jobservice.resources $) | nindent 10 }}
|
||||
trivy:
|
||||
enabled: {{ .Values.trivy.enabled }}
|
||||
{{- if .Values.trivy.enabled }}
|
||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.trivy.resourcesPreset .Values.trivy.resources $) | nindent 10 }}
|
||||
{{- end }}
|
||||
database:
|
||||
type: external
|
||||
external:
|
||||
host: "{{ .Release.Name }}-db-rw"
|
||||
port: "5432"
|
||||
username: app
|
||||
coreDatabase: app
|
||||
sslmode: require
|
||||
existingSecret: "{{ .Release.Name }}-db-app"
|
||||
redis:
|
||||
type: external
|
||||
external:
|
||||
addr: "rfs-{{ .Release.Name }}-redis:26379"
|
||||
sentinelMasterSet: "mymaster"
|
||||
coreDatabaseIndex: "0"
|
||||
jobserviceDatabaseIndex: "1"
|
||||
registryDatabaseIndex: "2"
|
||||
trivyAdapterIndex: "5"
|
||||
metrics:
|
||||
enabled: true
|
||||
serviceMonitor:
|
||||
enabled: true
|
||||
|
||||
---
|
||||
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}-core
|
||||
spec:
|
||||
replicas: 1
|
||||
minReplicas: 1
|
||||
kind: harbor
|
||||
type: core
|
||||
selector:
|
||||
release: {{ $.Release.Name }}-system
|
||||
component: core
|
||||
version: {{ $.Chart.Version }}
|
||||
|
||||
---
|
||||
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}-registry
|
||||
spec:
|
||||
replicas: 1
|
||||
minReplicas: 1
|
||||
kind: harbor
|
||||
type: registry
|
||||
selector:
|
||||
release: {{ $.Release.Name }}-system
|
||||
component: registry
|
||||
version: {{ $.Chart.Version }}
|
||||
|
||||
---
|
||||
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}-portal
|
||||
spec:
|
||||
replicas: 1
|
||||
minReplicas: 1
|
||||
kind: harbor
|
||||
type: portal
|
||||
selector:
|
||||
release: {{ $.Release.Name }}-system
|
||||
component: portal
|
||||
version: {{ $.Chart.Version }}
|
||||
36
packages/apps/harbor/templates/ingress.yaml
Normal file
36
packages/apps/harbor/templates/ingress.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
{{- $ingress := .Values._namespace.ingress }}
|
||||
{{- $host := .Values._namespace.host }}
|
||||
{{- $harborHost := .Values.host | default (printf "%s.%s" .Release.Name $host) }}
|
||||
{{- $issuerType := (index .Values._cluster "clusterissuer") | default "http01" }}
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-ingress
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/proxy-body-size: "0"
|
||||
nginx.ingress.kubernetes.io/proxy-read-timeout: "900"
|
||||
nginx.ingress.kubernetes.io/proxy-send-timeout: "900"
|
||||
nginx.ingress.kubernetes.io/ssl-redirect: "true"
|
||||
nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
|
||||
{{- if ne $issuerType "cloudflare" }}
|
||||
acme.cert-manager.io/http01-ingress-class: {{ $ingress }}
|
||||
{{- end }}
|
||||
cert-manager.io/cluster-issuer: letsencrypt-prod
|
||||
spec:
|
||||
ingressClassName: {{ $ingress }}
|
||||
tls:
|
||||
- hosts:
|
||||
- {{ $harborHost | quote }}
|
||||
secretName: {{ .Release.Name }}-ingress-tls
|
||||
rules:
|
||||
- host: {{ $harborHost | quote }}
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: {{ .Release.Name }}
|
||||
port:
|
||||
number: 80
|
||||
315
packages/apps/harbor/values.schema.json
Normal file
315
packages/apps/harbor/values.schema.json
Normal file
@@ -0,0 +1,315 @@
|
||||
{
|
||||
"title": "Chart Values",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"core": {
|
||||
"description": "Core API server configuration.",
|
||||
"type": "object",
|
||||
"default": {},
|
||||
"properties": {
|
||||
"resources": {
|
||||
"description": "Explicit CPU and memory configuration. When omitted, the preset defined in `resourcesPreset` is applied.",
|
||||
"type": "object",
|
||||
"default": {},
|
||||
"properties": {
|
||||
"cpu": {
|
||||
"description": "Number of CPU cores allocated.",
|
||||
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"x-kubernetes-int-or-string": true
|
||||
},
|
||||
"memory": {
|
||||
"description": "Amount of memory allocated.",
|
||||
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"x-kubernetes-int-or-string": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"description": "Default sizing preset used when `resources` is omitted.",
|
||||
"type": "string",
|
||||
"default": "small",
|
||||
"enum": [
|
||||
"nano",
|
||||
"micro",
|
||||
"small",
|
||||
"medium",
|
||||
"large",
|
||||
"xlarge",
|
||||
"2xlarge"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"database": {
|
||||
"description": "PostgreSQL database configuration.",
|
||||
"type": "object",
|
||||
"default": {},
|
||||
"required": [
|
||||
"replicas",
|
||||
"size"
|
||||
],
|
||||
"properties": {
|
||||
"replicas": {
|
||||
"description": "Number of database instances.",
|
||||
"type": "integer",
|
||||
"default": 2
|
||||
},
|
||||
"size": {
|
||||
"description": "Persistent Volume size for database storage.",
|
||||
"default": "5Gi",
|
||||
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"x-kubernetes-int-or-string": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"host": {
|
||||
"description": "Hostname for external access to Harbor (defaults to 'harbor' subdomain for the tenant host).",
|
||||
"type": "string",
|
||||
"default": ""
|
||||
},
|
||||
"jobservice": {
|
||||
"description": "Background job service configuration.",
|
||||
"type": "object",
|
||||
"default": {},
|
||||
"properties": {
|
||||
"resources": {
|
||||
"description": "Explicit CPU and memory configuration. When omitted, the preset defined in `resourcesPreset` is applied.",
|
||||
"type": "object",
|
||||
"default": {},
|
||||
"properties": {
|
||||
"cpu": {
|
||||
"description": "Number of CPU cores allocated.",
|
||||
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"x-kubernetes-int-or-string": true
|
||||
},
|
||||
"memory": {
|
||||
"description": "Amount of memory allocated.",
|
||||
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"x-kubernetes-int-or-string": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"description": "Default sizing preset used when `resources` is omitted.",
|
||||
"type": "string",
|
||||
"default": "nano",
|
||||
"enum": [
|
||||
"nano",
|
||||
"micro",
|
||||
"small",
|
||||
"medium",
|
||||
"large",
|
||||
"xlarge",
|
||||
"2xlarge"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"redis": {
|
||||
"description": "Redis cache configuration.",
|
||||
"type": "object",
|
||||
"default": {},
|
||||
"required": [
|
||||
"replicas",
|
||||
"size"
|
||||
],
|
||||
"properties": {
|
||||
"replicas": {
|
||||
"description": "Number of Redis replicas.",
|
||||
"type": "integer",
|
||||
"default": 2
|
||||
},
|
||||
"size": {
|
||||
"description": "Persistent Volume size for cache storage.",
|
||||
"default": "1Gi",
|
||||
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"x-kubernetes-int-or-string": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"registry": {
|
||||
"description": "Container image registry configuration.",
|
||||
"type": "object",
|
||||
"default": {},
|
||||
"properties": {
|
||||
"resources": {
|
||||
"description": "Explicit CPU and memory configuration. When omitted, the preset defined in `resourcesPreset` is applied.",
|
||||
"type": "object",
|
||||
"default": {},
|
||||
"properties": {
|
||||
"cpu": {
|
||||
"description": "Number of CPU cores allocated.",
|
||||
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"x-kubernetes-int-or-string": true
|
||||
},
|
||||
"memory": {
|
||||
"description": "Amount of memory allocated.",
|
||||
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"x-kubernetes-int-or-string": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"description": "Default sizing preset used when `resources` is omitted.",
|
||||
"type": "string",
|
||||
"default": "small",
|
||||
"enum": [
|
||||
"nano",
|
||||
"micro",
|
||||
"small",
|
||||
"medium",
|
||||
"large",
|
||||
"xlarge",
|
||||
"2xlarge"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"storageClass": {
|
||||
"description": "StorageClass used to store the data.",
|
||||
"type": "string",
|
||||
"default": ""
|
||||
},
|
||||
"trivy": {
|
||||
"description": "Trivy vulnerability scanner configuration.",
|
||||
"type": "object",
|
||||
"default": {},
|
||||
"required": [
|
||||
"enabled",
|
||||
"size"
|
||||
],
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"description": "Enable or disable the vulnerability scanner.",
|
||||
"type": "boolean",
|
||||
"default": true
|
||||
},
|
||||
"resources": {
|
||||
"description": "Explicit CPU and memory configuration. When omitted, the preset defined in `resourcesPreset` is applied.",
|
||||
"type": "object",
|
||||
"default": {},
|
||||
"properties": {
|
||||
"cpu": {
|
||||
"description": "Number of CPU cores allocated.",
|
||||
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"x-kubernetes-int-or-string": true
|
||||
},
|
||||
"memory": {
|
||||
"description": "Amount of memory allocated.",
|
||||
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"x-kubernetes-int-or-string": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"description": "Default sizing preset used when `resources` is omitted.",
|
||||
"type": "string",
|
||||
"default": "nano",
|
||||
"enum": [
|
||||
"nano",
|
||||
"micro",
|
||||
"small",
|
||||
"medium",
|
||||
"large",
|
||||
"xlarge",
|
||||
"2xlarge"
|
||||
]
|
||||
},
|
||||
"size": {
|
||||
"description": "Persistent Volume size for vulnerability database cache.",
|
||||
"default": "5Gi",
|
||||
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"x-kubernetes-int-or-string": true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
84
packages/apps/harbor/values.yaml
Normal file
84
packages/apps/harbor/values.yaml
Normal file
@@ -0,0 +1,84 @@
|
||||
##
|
||||
## @section Common parameters
|
||||
##
|
||||
|
||||
## @param {string} [host] - Hostname for external access to Harbor (defaults to 'harbor' subdomain for the tenant host).
|
||||
host: ""
|
||||
|
||||
## @param {string} storageClass - StorageClass used to store the data.
|
||||
storageClass: ""
|
||||
|
||||
##
|
||||
## @section Component configuration
|
||||
##
|
||||
|
||||
## @typedef {struct} Resources - Resource configuration.
|
||||
## @field {quantity} [cpu] - Number of CPU cores allocated.
|
||||
## @field {quantity} [memory] - Amount of memory allocated.
|
||||
|
||||
## @enum {string} ResourcesPreset - Default sizing preset.
|
||||
## @value nano
|
||||
## @value micro
|
||||
## @value small
|
||||
## @value medium
|
||||
## @value large
|
||||
## @value xlarge
|
||||
## @value 2xlarge
|
||||
|
||||
## @typedef {struct} Core - Core API server configuration.
|
||||
## @field {Resources} [resources] - Explicit CPU and memory configuration. When omitted, the preset defined in `resourcesPreset` is applied.
|
||||
## @field {ResourcesPreset} [resourcesPreset] - Default sizing preset used when `resources` is omitted.
|
||||
|
||||
## @param {Core} core - Core API server configuration.
|
||||
core:
|
||||
resources: {}
|
||||
resourcesPreset: "small"
|
||||
|
||||
## @typedef {struct} Registry - Container image registry configuration.
|
||||
## @field {Resources} [resources] - Explicit CPU and memory configuration. When omitted, the preset defined in `resourcesPreset` is applied.
|
||||
## @field {ResourcesPreset} [resourcesPreset] - Default sizing preset used when `resources` is omitted.
|
||||
|
||||
## @param {Registry} registry - Container image registry configuration.
|
||||
registry:
|
||||
resources: {}
|
||||
resourcesPreset: "small"
|
||||
|
||||
## @typedef {struct} Jobservice - Background job service configuration.
|
||||
## @field {Resources} [resources] - Explicit CPU and memory configuration. When omitted, the preset defined in `resourcesPreset` is applied.
|
||||
## @field {ResourcesPreset} [resourcesPreset] - Default sizing preset used when `resources` is omitted.
|
||||
|
||||
## @param {Jobservice} jobservice - Background job service configuration.
|
||||
jobservice:
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
|
||||
## @typedef {struct} Trivy - Trivy vulnerability scanner configuration.
|
||||
## @field {bool} enabled - Enable or disable the vulnerability scanner.
|
||||
## @field {quantity} size - Persistent Volume size for vulnerability database cache.
|
||||
## @field {Resources} [resources] - Explicit CPU and memory configuration. When omitted, the preset defined in `resourcesPreset` is applied.
|
||||
## @field {ResourcesPreset} [resourcesPreset] - Default sizing preset used when `resources` is omitted.
|
||||
|
||||
## @param {Trivy} trivy - Trivy vulnerability scanner configuration.
|
||||
trivy:
|
||||
enabled: true
|
||||
size: 5Gi
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
|
||||
## @typedef {struct} Database - PostgreSQL database configuration (provisioned via CloudNativePG).
|
||||
## @field {quantity} size - Persistent Volume size for database storage.
|
||||
## @field {int} replicas - Number of database instances.
|
||||
|
||||
## @param {Database} database - PostgreSQL database configuration.
|
||||
database:
|
||||
size: 5Gi
|
||||
replicas: 2
|
||||
|
||||
## @typedef {struct} Redis - Redis cache configuration (provisioned via redis-operator).
|
||||
## @field {quantity} size - Persistent Volume size for cache storage.
|
||||
## @field {int} replicas - Number of Redis replicas.
|
||||
|
||||
## @param {Redis} redis - Redis cache configuration.
|
||||
redis:
|
||||
size: 1Gi
|
||||
replicas: 2
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/nginx-cache:0.0.0@sha256:9e34fd50393b418d9516aadb488067a3a63675b045811beb1c0afc9c61e149e8
|
||||
ghcr.io/cozystack/cozystack/nginx-cache:0.0.0@sha256:cb25e40cb665b8bbeee8cb1ec39da4c9a7452ef3f2f371912bbc0d1b1e2d40a8
|
||||
|
||||
@@ -2,3 +2,4 @@
|
||||
/logos
|
||||
/Makefile
|
||||
/hack
|
||||
/images/*/*
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.0.0@sha256:6f2b1d6b0b2bdc66f1cbb30c59393369cbf070cb8f5fec748f176952273483cc
|
||||
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.0.0@sha256:7deeee117e7eec599cb453836ca95eadd131dfc8c875dc457ef29dc1433395e0
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.0.0@sha256:726d9287e8caaea94eaf24c4f44734e3fbf4f8aa032b66b81848ebf95297cffe
|
||||
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.0.0@sha256:604561e23df1b8eb25c24cf73fd93c7aaa6d1e7c56affbbda5c6f0f83424e4b1
|
||||
|
||||
@@ -1,31 +1,23 @@
|
||||
# Source: https://github.com/kubevirt/csi-driver/blob/main/Dockerfile
|
||||
ARG builder_image=docker.io/library/golang:1.22.5
|
||||
FROM ${builder_image} AS builder
|
||||
RUN git clone https://github.com/kubevirt/csi-driver /src/kubevirt-csi-driver \
|
||||
&& cd /src/kubevirt-csi-driver \
|
||||
&& git checkout a8d6605bc9997bcfda3fb9f1f82ba6445b4984cc
|
||||
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
ENV GOOS=$TARGETOS
|
||||
ENV GOARCH=$TARGETARCH
|
||||
|
||||
WORKDIR /src/kubevirt-csi-driver
|
||||
WORKDIR /src
|
||||
|
||||
RUN make build
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
|
||||
COPY *.go ./
|
||||
RUN CGO_ENABLED=0 GOOS=$TARGETOS GOARCH=$TARGETARCH go build \
|
||||
-ldflags "-X kubevirt.io/csi-driver/pkg/service.VendorVersion=0.2.0" \
|
||||
-o kubevirt-csi-driver .
|
||||
|
||||
FROM quay.io/centos/centos:stream9
|
||||
ARG git_url=https://github.com/kubevirt/csi-driver.git
|
||||
|
||||
LABEL maintainers="The KubeVirt Project <kubevirt-dev@googlegroups.com>" \
|
||||
description="KubeVirt CSI Driver" \
|
||||
multi.GIT_URL=${git_url}
|
||||
RUN dnf install -y e2fsprogs xfsprogs nfs-utils && dnf clean all
|
||||
|
||||
COPY --from=builder /src/kubevirt-csi-driver .
|
||||
|
||||
ENTRYPOINT ["./kubevirt-csi-driver"]
|
||||
|
||||
RUN dnf install -y e2fsprogs xfsprogs && dnf clean all
|
||||
|
||||
ARG git_sha=NONE
|
||||
LABEL multi.GIT_SHA=${git_sha}
|
||||
|
||||
COPY --from=builder /src/kubevirt-csi-driver/kubevirt-csi-driver .
|
||||
|
||||
@@ -0,0 +1,536 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
csi "github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/klog/v2"
|
||||
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
|
||||
|
||||
kubevirtclient "kubevirt.io/csi-driver/pkg/kubevirt"
|
||||
"kubevirt.io/csi-driver/pkg/service"
|
||||
"kubevirt.io/csi-driver/pkg/util"
|
||||
)
|
||||
|
||||
const (
|
||||
nfsVolumeKey = "nfsVolume"
|
||||
nfsExportKey = "nfsExport"
|
||||
busParameter = "bus"
|
||||
serialParameter = "serial"
|
||||
)
|
||||
|
||||
var ciliumNetworkPolicyGVR = schema.GroupVersionResource{
|
||||
Group: "cilium.io",
|
||||
Version: "v2",
|
||||
Resource: "ciliumnetworkpolicies",
|
||||
}
|
||||
|
||||
var _ csi.ControllerServer = &WrappedControllerService{}
|
||||
|
||||
// WrappedControllerService embeds the upstream ControllerService and adds RWX Filesystem (NFS) support.
|
||||
type WrappedControllerService struct {
|
||||
*service.ControllerService
|
||||
infraClient kubernetes.Interface
|
||||
dynamicClient dynamic.Interface
|
||||
virtClient kubevirtclient.Client
|
||||
infraNamespace string
|
||||
infraClusterLabels map[string]string
|
||||
storageClassEnforcement util.StorageClassEnforcement
|
||||
}
|
||||
|
||||
// isRWXFilesystem checks if the volume capabilities request RWX access with filesystem mode.
|
||||
func isRWXFilesystem(caps []*csi.VolumeCapability) bool {
|
||||
hasRWX := false
|
||||
hasMount := false
|
||||
for _, cap := range caps {
|
||||
if cap == nil {
|
||||
continue
|
||||
}
|
||||
if cap.GetMount() != nil {
|
||||
hasMount = true
|
||||
}
|
||||
if am := cap.GetAccessMode(); am != nil && am.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER {
|
||||
hasRWX = true
|
||||
}
|
||||
}
|
||||
return hasRWX && hasMount
|
||||
}
|
||||
|
||||
// CreateVolume intercepts RWX Filesystem requests and creates a DataVolume in the infra
|
||||
// cluster with AccessMode=RWX and VolumeMode=Filesystem. Upstream rejects RWX+Filesystem,
|
||||
// so we handle DataVolume creation ourselves. Using DataVolume (not bare PVC) preserves
|
||||
// compatibility with upstream snapshot and clone operations.
|
||||
// For all other requests, delegates to upstream.
|
||||
func (w *WrappedControllerService) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
|
||||
if req == nil {
|
||||
return nil, status.Error(codes.InvalidArgument, "missing request")
|
||||
}
|
||||
if !isRWXFilesystem(req.GetVolumeCapabilities()) {
|
||||
return w.ControllerService.CreateVolume(ctx, req)
|
||||
}
|
||||
|
||||
if len(req.GetName()) == 0 {
|
||||
return nil, status.Error(codes.InvalidArgument, "name missing in request")
|
||||
}
|
||||
|
||||
// Storage class enforcement
|
||||
storageClassName := req.Parameters[kubevirtclient.InfraStorageClassNameParameter]
|
||||
if !w.storageClassEnforcement.AllowAll {
|
||||
if storageClassName == "" {
|
||||
if !w.storageClassEnforcement.AllowDefault {
|
||||
return nil, status.Error(codes.InvalidArgument, "infraStorageclass is not in the allowed list")
|
||||
}
|
||||
} else if !util.Contains(w.storageClassEnforcement.AllowList, storageClassName) {
|
||||
return nil, status.Error(codes.InvalidArgument, "infraStorageclass is not in the allowed list")
|
||||
}
|
||||
}
|
||||
|
||||
storageSize := req.GetCapacityRange().GetRequiredBytes()
|
||||
dvName := req.Name
|
||||
|
||||
// Determine DataVolume source (blank, snapshot, or clone)
|
||||
source, err := w.determineDvSource(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Handle CSI clone: CDI doesn't allow cloning PVCs in use by a pod,
|
||||
// so use DataSourceRef instead (same approach as upstream)
|
||||
sourcePVCName := ""
|
||||
if source.PVC != nil {
|
||||
sourcePVCName = source.PVC.Name
|
||||
source = nil
|
||||
}
|
||||
|
||||
volumeMode := corev1.PersistentVolumeFilesystem
|
||||
dv := &cdiv1.DataVolume{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "DataVolume",
|
||||
APIVersion: cdiv1.SchemeGroupVersion.String(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: dvName,
|
||||
Namespace: w.infraNamespace,
|
||||
Labels: w.infraClusterLabels,
|
||||
Annotations: map[string]string{
|
||||
"cdi.kubevirt.io/storage.deleteAfterCompletion": "false",
|
||||
"cdi.kubevirt.io/storage.bind.immediate.requested": "true",
|
||||
},
|
||||
},
|
||||
Spec: cdiv1.DataVolumeSpec{
|
||||
Storage: &cdiv1.StorageSpec{
|
||||
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany},
|
||||
VolumeMode: &volumeMode,
|
||||
Resources: corev1.ResourceRequirements{
|
||||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceStorage: *resource.NewScaledQuantity(storageSize, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
Source: source,
|
||||
},
|
||||
}
|
||||
|
||||
if sourcePVCName != "" {
|
||||
dv.Spec.Storage.DataSourceRef = &corev1.TypedObjectReference{
|
||||
Kind: "PersistentVolumeClaim",
|
||||
Name: sourcePVCName,
|
||||
}
|
||||
}
|
||||
|
||||
if storageClassName != "" {
|
||||
dv.Spec.Storage.StorageClassName = &storageClassName
|
||||
}
|
||||
|
||||
// Idempotency: check if DataVolume already exists
|
||||
if existingDv, err := w.virtClient.GetDataVolume(ctx, w.infraNamespace, dvName); errors.IsNotFound(err) {
|
||||
klog.Infof("Creating NFS DataVolume %s/%s", w.infraNamespace, dvName)
|
||||
dv, err = w.virtClient.CreateDataVolume(ctx, w.infraNamespace, dv)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed creating NFS DataVolume %s: %v", dvName, err)
|
||||
return nil, err
|
||||
}
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
if existingDv != nil && existingDv.Spec.Storage != nil {
|
||||
existingRequest := existingDv.Spec.Storage.Resources.Requests[corev1.ResourceStorage]
|
||||
newRequest := dv.Spec.Storage.Resources.Requests[corev1.ResourceStorage]
|
||||
if newRequest.Cmp(existingRequest) != 0 {
|
||||
return nil, status.Error(codes.AlreadyExists, "requested storage size does not match existing size")
|
||||
}
|
||||
dv = existingDv
|
||||
}
|
||||
}
|
||||
|
||||
serial := string(dv.GetUID())
|
||||
|
||||
return &csi.CreateVolumeResponse{
|
||||
Volume: &csi.Volume{
|
||||
CapacityBytes: storageSize,
|
||||
VolumeId: dvName,
|
||||
VolumeContext: map[string]string{
|
||||
busParameter: "scsi",
|
||||
serialParameter: serial,
|
||||
nfsVolumeKey: "true",
|
||||
},
|
||||
ContentSource: req.GetVolumeContentSource(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// determineDvSource determines the DataVolume source from the CSI request content source.
|
||||
// Mirrors upstream logic for blank, snapshot, and clone sources.
|
||||
func (w *WrappedControllerService) determineDvSource(ctx context.Context, req *csi.CreateVolumeRequest) (*cdiv1.DataVolumeSource, error) {
|
||||
res := &cdiv1.DataVolumeSource{}
|
||||
if req.GetVolumeContentSource() != nil {
|
||||
source := req.GetVolumeContentSource()
|
||||
switch source.Type.(type) {
|
||||
case *csi.VolumeContentSource_Snapshot:
|
||||
snapshot, err := w.virtClient.GetVolumeSnapshot(ctx, w.infraNamespace, source.GetSnapshot().GetSnapshotId())
|
||||
if errors.IsNotFound(err) {
|
||||
return nil, status.Errorf(codes.NotFound, "source snapshot %s not found", source.GetSnapshot().GetSnapshotId())
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if snapshot != nil {
|
||||
res.Snapshot = &cdiv1.DataVolumeSourceSnapshot{
|
||||
Name: snapshot.Name,
|
||||
Namespace: w.infraNamespace,
|
||||
}
|
||||
}
|
||||
case *csi.VolumeContentSource_Volume:
|
||||
volume, err := w.virtClient.GetDataVolume(ctx, w.infraNamespace, source.GetVolume().GetVolumeId())
|
||||
if errors.IsNotFound(err) {
|
||||
return nil, status.Errorf(codes.NotFound, "source volume %s not found", source.GetVolume().GetVolumeId())
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if volume != nil {
|
||||
res.PVC = &cdiv1.DataVolumeSourcePVC{
|
||||
Name: volume.Name,
|
||||
Namespace: w.infraNamespace,
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, status.Error(codes.InvalidArgument, "unknown content type")
|
||||
}
|
||||
} else {
|
||||
res.Blank = &cdiv1.DataVolumeBlankImage{}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// ControllerPublishVolume for NFS volumes: annotates infra PVC for WFFC binding,
|
||||
// waits for PVC bound, extracts NFS export from PV, and creates CiliumNetworkPolicy.
|
||||
// For RWO volumes, delegates to upstream (hotplug SCSI).
|
||||
func (w *WrappedControllerService) ControllerPublishVolume(ctx context.Context, req *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) {
|
||||
if req.GetVolumeContext()[nfsVolumeKey] != "true" {
|
||||
return w.ControllerService.ControllerPublishVolume(ctx, req)
|
||||
}
|
||||
|
||||
if len(req.GetVolumeId()) == 0 {
|
||||
return nil, status.Error(codes.InvalidArgument, "volume id missing in request")
|
||||
}
|
||||
if len(req.GetNodeId()) == 0 {
|
||||
return nil, status.Error(codes.InvalidArgument, "node id missing in request")
|
||||
}
|
||||
|
||||
dvName := req.GetVolumeId()
|
||||
vmNamespace, vmName, err := cache.SplitMetaNamespaceKey(req.GetNodeId())
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "failed to parse node ID %q: %v", req.GetNodeId(), err)
|
||||
}
|
||||
|
||||
klog.V(3).Infof("Publishing NFS volume %s to node %s/%s", dvName, vmNamespace, vmName)
|
||||
|
||||
// Get VMI for CiliumNetworkPolicy ownerReference
|
||||
vmi, err := w.virtClient.GetVirtualMachine(ctx, vmNamespace, vmName)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "failed to get VMI %s/%s: %v", vmNamespace, vmName, err)
|
||||
}
|
||||
|
||||
// Wait for PVC to be bound (CDI handles immediate binding via annotation)
|
||||
klog.V(3).Infof("Waiting for PVC %s to be bound", dvName)
|
||||
if err := wait.PollUntilContextTimeout(ctx, time.Second, 2*time.Minute, true, func(ctx context.Context) (bool, error) {
|
||||
p, err := w.infraClient.CoreV1().PersistentVolumeClaims(w.infraNamespace).Get(ctx, dvName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return p.Status.Phase == corev1.ClaimBound, nil
|
||||
}); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "timed out waiting for PVC %s to be bound: %v", dvName, err)
|
||||
}
|
||||
|
||||
// Read PV to get NFS export
|
||||
pvc, err := w.infraClient.CoreV1().PersistentVolumeClaims(w.infraNamespace).Get(ctx, dvName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "failed to re-read PVC %s: %v", dvName, err)
|
||||
}
|
||||
pv, err := w.infraClient.CoreV1().PersistentVolumes().Get(ctx, pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "failed to get PV %s: %v", pvc.Spec.VolumeName, err)
|
||||
}
|
||||
nfsExport, err := getNFSExport(pv)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "failed to extract NFS export from PV %s: %v", pv.Name, err)
|
||||
}
|
||||
klog.V(3).Infof("NFS export for volume %s: %s", dvName, nfsExport)
|
||||
|
||||
// Parse NFS URL for CiliumNetworkPolicy port
|
||||
_, port, _, err := parseNFSExport(nfsExport)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "failed to parse NFS export URL: %v", err)
|
||||
}
|
||||
|
||||
// Create or update CiliumNetworkPolicy allowing egress to NFS server
|
||||
cnpName := fmt.Sprintf("csi-nfs-%s", dvName)
|
||||
vmiOwnerRef := map[string]interface{}{
|
||||
"apiVersion": "kubevirt.io/v1",
|
||||
"kind": "VirtualMachineInstance",
|
||||
"name": vmName,
|
||||
"uid": string(vmi.UID),
|
||||
}
|
||||
cnp := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": "cilium.io/v2",
|
||||
"kind": "CiliumNetworkPolicy",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": cnpName,
|
||||
"namespace": vmNamespace,
|
||||
"ownerReferences": []interface{}{vmiOwnerRef},
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"endpointSelector": map[string]interface{}{
|
||||
"matchLabels": map[string]interface{}{
|
||||
"kubevirt.io/vm": vmName,
|
||||
},
|
||||
},
|
||||
"egress": []interface{}{
|
||||
map[string]interface{}{
|
||||
"toEndpoints": []interface{}{
|
||||
map[string]interface{}{
|
||||
"matchLabels": map[string]interface{}{
|
||||
"k8s:app.kubernetes.io/component": "linstor-csi-nfs-server",
|
||||
"k8s:io.kubernetes.pod.namespace": "cozy-linstor",
|
||||
},
|
||||
},
|
||||
},
|
||||
"toPorts": []interface{}{
|
||||
map[string]interface{}{
|
||||
"ports": []interface{}{
|
||||
map[string]interface{}{
|
||||
"port": port,
|
||||
"protocol": "TCP",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := w.dynamicClient.Resource(ciliumNetworkPolicyGVR).Namespace(vmNamespace).Create(ctx, cnp, metav1.CreateOptions{}); err != nil {
|
||||
if !errors.IsAlreadyExists(err) {
|
||||
return nil, status.Errorf(codes.Internal, "failed to create CiliumNetworkPolicy %s: %v", cnpName, err)
|
||||
}
|
||||
// CNP exists — add ownerReference for this VMI
|
||||
if err := w.addCNPOwnerReference(ctx, vmNamespace, cnpName, vmiOwnerRef); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(3).Infof("Successfully published NFS volume %s", dvName)
|
||||
return &csi.ControllerPublishVolumeResponse{
|
||||
PublishContext: map[string]string{
|
||||
nfsExportKey: nfsExport,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ControllerUnpublishVolume for NFS volumes: deletes CiliumNetworkPolicy.
|
||||
// For RWO volumes, delegates to upstream (hotplug removal).
|
||||
func (w *WrappedControllerService) ControllerUnpublishVolume(ctx context.Context, req *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) {
|
||||
dvName := req.GetVolumeId()
|
||||
|
||||
// Determine if NFS by checking infra PVC access modes
|
||||
pvc, err := w.infraClient.CoreV1().PersistentVolumeClaims(w.infraNamespace).Get(ctx, dvName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return &csi.ControllerUnpublishVolumeResponse{}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !hasRWXAccessMode(pvc) {
|
||||
return w.ControllerService.ControllerUnpublishVolume(ctx, req)
|
||||
}
|
||||
|
||||
// NFS volume: remove VMI ownerReference from CiliumNetworkPolicy
|
||||
vmNamespace, vmName, err := cache.SplitMetaNamespaceKey(req.GetNodeId())
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "failed to parse node ID %q: %v", req.GetNodeId(), err)
|
||||
}
|
||||
|
||||
cnpName := fmt.Sprintf("csi-nfs-%s", dvName)
|
||||
klog.V(3).Infof("Removing VMI %s ownerReference from CiliumNetworkPolicy %s/%s", vmName, vmNamespace, cnpName)
|
||||
if err := w.removeCNPOwnerReference(ctx, vmNamespace, cnpName, vmName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
klog.V(3).Infof("Successfully unpublished NFS volume %s", dvName)
|
||||
return &csi.ControllerUnpublishVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
// ControllerExpandVolume delegates to upstream for the actual DataVolume/PVC resize.
|
||||
// For NFS volumes, LINSTOR handles NFS server resize automatically, so no node expansion is needed.
|
||||
func (w *WrappedControllerService) ControllerExpandVolume(ctx context.Context, req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) {
|
||||
resp, err := w.ControllerService.ControllerExpandVolume(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// For NFS volumes, no node-side expansion is needed
|
||||
pvc, err := w.infraClient.CoreV1().PersistentVolumeClaims(w.infraNamespace).Get(ctx, req.GetVolumeId(), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
klog.Warningf("Failed to check PVC access mode for %s/%s: %v", w.infraNamespace, req.GetVolumeId(), err)
|
||||
} else if hasRWXAccessMode(pvc) {
|
||||
resp.NodeExpansionRequired = false
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// addCNPOwnerReference adds a VMI ownerReference to an existing CiliumNetworkPolicy.
|
||||
func (w *WrappedControllerService) addCNPOwnerReference(ctx context.Context, namespace, cnpName string, ownerRef map[string]interface{}) error {
|
||||
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
existing, err := w.dynamicClient.Resource(ciliumNetworkPolicyGVR).Namespace(namespace).Get(ctx, cnpName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "failed to get CiliumNetworkPolicy %s: %v", cnpName, err)
|
||||
}
|
||||
|
||||
ownerRefs, _, _ := unstructured.NestedSlice(existing.Object, "metadata", "ownerReferences")
|
||||
uid, _, _ := unstructured.NestedString(ownerRef, "uid")
|
||||
for _, ref := range ownerRefs {
|
||||
if refMap, ok := ref.(map[string]interface{}); ok {
|
||||
if refMap["uid"] == uid {
|
||||
return nil // already present
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ownerRefs = append(ownerRefs, ownerRef)
|
||||
if err := unstructured.SetNestedSlice(existing.Object, ownerRefs, "metadata", "ownerReferences"); err != nil {
|
||||
return status.Errorf(codes.Internal, "failed to set ownerReferences: %v", err)
|
||||
}
|
||||
if _, err := w.dynamicClient.Resource(ciliumNetworkPolicyGVR).Namespace(namespace).Update(ctx, existing, metav1.UpdateOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
klog.V(3).Infof("Added ownerReference to CiliumNetworkPolicy %s", cnpName)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// removeCNPOwnerReference removes a VMI ownerReference from a CiliumNetworkPolicy.
|
||||
// Deletes the CNP if no ownerReferences remain.
|
||||
func (w *WrappedControllerService) removeCNPOwnerReference(ctx context.Context, namespace, cnpName, vmName string) error {
|
||||
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
existing, err := w.dynamicClient.Resource(ciliumNetworkPolicyGVR).Namespace(namespace).Get(ctx, cnpName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return status.Errorf(codes.Internal, "failed to get CiliumNetworkPolicy %s: %v", cnpName, err)
|
||||
}
|
||||
|
||||
ownerRefs, _, _ := unstructured.NestedSlice(existing.Object, "metadata", "ownerReferences")
|
||||
var remaining []interface{}
|
||||
for _, ref := range ownerRefs {
|
||||
if refMap, ok := ref.(map[string]interface{}); ok {
|
||||
if refMap["name"] == vmName {
|
||||
continue
|
||||
}
|
||||
}
|
||||
remaining = append(remaining, ref)
|
||||
}
|
||||
|
||||
if len(remaining) == 0 {
|
||||
// Last owner — delete CNP
|
||||
if err := w.dynamicClient.Resource(ciliumNetworkPolicyGVR).Namespace(namespace).Delete(ctx, cnpName, metav1.DeleteOptions{}); err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
return status.Errorf(codes.Internal, "failed to delete CiliumNetworkPolicy %s: %v", cnpName, err)
|
||||
}
|
||||
}
|
||||
klog.V(3).Infof("Deleted CiliumNetworkPolicy %s (no more owners)", cnpName)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := unstructured.SetNestedSlice(existing.Object, remaining, "metadata", "ownerReferences"); err != nil {
|
||||
return status.Errorf(codes.Internal, "failed to set ownerReferences: %v", err)
|
||||
}
|
||||
if _, err := w.dynamicClient.Resource(ciliumNetworkPolicyGVR).Namespace(namespace).Update(ctx, existing, metav1.UpdateOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
klog.V(3).Infof("Removed VMI %s ownerReference from CiliumNetworkPolicy %s", vmName, cnpName)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func hasRWXAccessMode(pvc *corev1.PersistentVolumeClaim) bool {
|
||||
for _, mode := range pvc.Spec.AccessModes {
|
||||
if mode == corev1.ReadWriteMany {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// getNFSExport extracts the NFS export URL from a PersistentVolume.
|
||||
// Supports both native NFS PVs and CSI PVs with nfs-export volume attribute.
|
||||
func getNFSExport(pv *corev1.PersistentVolume) (string, error) {
|
||||
if pv.Spec.NFS != nil {
|
||||
return fmt.Sprintf("nfs://%s:2049%s", pv.Spec.NFS.Server, pv.Spec.NFS.Path), nil
|
||||
}
|
||||
if pv.Spec.CSI != nil && pv.Spec.CSI.VolumeAttributes != nil {
|
||||
if export, ok := pv.Spec.CSI.VolumeAttributes["linstor.csi.linbit.com/nfs-export"]; ok {
|
||||
return export, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("no NFS export info found in PV %s", pv.Name)
|
||||
}
|
||||
|
||||
// parseNFSExport parses an NFS URL of the form nfs://host:port/path.
|
||||
func parseNFSExport(nfsURL string) (host, port, path string, err error) {
|
||||
u, err := url.Parse(nfsURL)
|
||||
if err != nil {
|
||||
return "", "", "", fmt.Errorf("failed to parse NFS URL %q: %w", nfsURL, err)
|
||||
}
|
||||
host = u.Hostname()
|
||||
port = u.Port()
|
||||
if port == "" {
|
||||
port = "2049"
|
||||
}
|
||||
path = u.Path
|
||||
if path == "" {
|
||||
path = "/"
|
||||
}
|
||||
return host, port, path, nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user