mirror of
https://github.com/outbackdingo/cozystack.git
synced 2026-01-29 02:18:47 +00:00
Compare commits
90 Commits
802-apps-a
...
ingress-ap
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
102ec2a6f2 | ||
|
|
b55db668d1 | ||
|
|
49984e64a0 | ||
|
|
7897190c3f | ||
|
|
29b49496f2 | ||
|
|
3c27192d3e | ||
|
|
dca732cde0 | ||
|
|
0346dc05bb | ||
|
|
a03cdeff04 | ||
|
|
062d72805a | ||
|
|
70fed8148d | ||
|
|
12c6df83f5 | ||
|
|
f61a7817e6 | ||
|
|
c482289b14 | ||
|
|
1e59e5fbb6 | ||
|
|
6106a9fe51 | ||
|
|
ec9e26c054 | ||
|
|
108fc647ea | ||
|
|
a9b235048d | ||
|
|
e1c14619d2 | ||
|
|
f644bf20c5 | ||
|
|
93bdf41144 | ||
|
|
bacf15f037 | ||
|
|
9239852ec8 | ||
|
|
87a286fc74 | ||
|
|
6d253b937b | ||
|
|
255176c321 | ||
|
|
fa341deaac | ||
|
|
f08566d3f1 | ||
|
|
a29040faf7 | ||
|
|
637551eb33 | ||
|
|
58d959b305 | ||
|
|
fcc7056e5c | ||
|
|
5d7e56bffe | ||
|
|
69b3ddf717 | ||
|
|
79b5c6b5af | ||
|
|
076128c783 | ||
|
|
894cb14d49 | ||
|
|
a0935e9ae4 | ||
|
|
f2c248acbd | ||
|
|
590f14a614 | ||
|
|
4c8dba880a | ||
|
|
de0c7b94f4 | ||
|
|
2682a6e674 | ||
|
|
e3e0b21612 | ||
|
|
455d66fbe4 | ||
|
|
7db7277636 | ||
|
|
7be5db8cff | ||
|
|
249950d94b | ||
|
|
44565dca88 | ||
|
|
cefcd24ebb | ||
|
|
13d7df47d7 | ||
|
|
1ccd3074dc | ||
|
|
70d3591ed2 | ||
|
|
700991f4fa | ||
|
|
d89acbf44d | ||
|
|
59ef3296f0 | ||
|
|
3ed0cdee1c | ||
|
|
9f5230a342 | ||
|
|
b895ccfdeb | ||
|
|
d54a407d68 | ||
|
|
f9ec630509 | ||
|
|
3f47181c10 | ||
|
|
19409d801d | ||
|
|
8a4793d571 | ||
|
|
0fc3fdcb3d | ||
|
|
04e2b3952b | ||
|
|
b56624a781 | ||
|
|
07d7fadb1a | ||
|
|
8db92d53d1 | ||
|
|
7537235f43 | ||
|
|
4bb524e53d | ||
|
|
e7ded52f93 | ||
|
|
8547dc3b21 | ||
|
|
c22603bf7e | ||
|
|
89525dedb5 | ||
|
|
1c53a6f9f6 | ||
|
|
16ee0f2c3a | ||
|
|
72d0394475 | ||
|
|
0a998c8b49 | ||
|
|
7bfad655c2 | ||
|
|
e81cbf780c | ||
|
|
e8cc44450a | ||
|
|
d3a8a4a7de | ||
|
|
fc2c5a0f6b | ||
|
|
0f8b8e1744 | ||
|
|
703073a164 | ||
|
|
6a0fc64475 | ||
|
|
63ebab5c2a | ||
|
|
0ddaff9380 |
53
.github/workflows/backport.yaml
vendored
Normal file
53
.github/workflows/backport.yaml
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
name: Automatic Backport
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [closed] # fires when PR is closed (merged)
|
||||
|
||||
concurrency:
|
||||
group: backport-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
backport:
|
||||
if: |
|
||||
github.event.pull_request.merged == true &&
|
||||
contains(github.event.pull_request.labels.*.name, 'backport')
|
||||
runs-on: [self-hosted]
|
||||
|
||||
steps:
|
||||
# 1. Decide which maintenance branch should receive the back‑port
|
||||
- name: Determine target maintenance branch
|
||||
id: target
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
let rel;
|
||||
try {
|
||||
rel = await github.rest.repos.getLatestRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
} catch (e) {
|
||||
core.setFailed('No existing releases found; cannot determine backport target.');
|
||||
return;
|
||||
}
|
||||
const [maj, min] = rel.data.tag_name.replace(/^v/, '').split('.');
|
||||
const branch = `release-${maj}.${min}`;
|
||||
core.setOutput('branch', branch);
|
||||
console.log(`Latest release ${rel.data.tag_name}; backporting to ${branch}`);
|
||||
# 2. Checkout (required by backport‑action)
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# 3. Create the back‑port pull request
|
||||
- name: Create back‑port PR
|
||||
uses: korthout/backport-action@v3
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
label_pattern: '' # don't read labels for targets
|
||||
target_branches: ${{ steps.target.outputs.branch }}
|
||||
11
.github/workflows/pre-commit.yml
vendored
11
.github/workflows/pre-commit.yml
vendored
@@ -1,12 +1,13 @@
|
||||
name: Pre-Commit Checks
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
types: [labeled, opened, synchronize, reopened]
|
||||
|
||||
concurrency:
|
||||
group: pre-commit-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
pre-commit:
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
136
.github/workflows/pull-requests-release.yaml
vendored
136
.github/workflows/pull-requests-release.yaml
vendored
@@ -4,6 +4,10 @@ on:
|
||||
pull_request:
|
||||
types: [labeled, opened, synchronize, reopened, closed]
|
||||
|
||||
concurrency:
|
||||
group: pull-requests-release-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
verify:
|
||||
name: Test Release
|
||||
@@ -12,8 +16,8 @@ jobs:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
# Run only when the PR carries the "release" label and not closed.
|
||||
if: |
|
||||
contains(github.event.pull_request.labels.*.name, 'ok-to-test') &&
|
||||
contains(github.event.pull_request.labels.*.name, 'release') &&
|
||||
github.event.action != 'closed'
|
||||
|
||||
@@ -39,38 +43,112 @@ jobs:
|
||||
runs-on: [self-hosted]
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
|
||||
if: |
|
||||
github.event.pull_request.merged == true &&
|
||||
contains(github.event.pull_request.labels.*.name, 'release')
|
||||
|
||||
|
||||
steps:
|
||||
# Extract tag from branch name (branch = release-X.Y.Z*)
|
||||
- name: Extract tag from branch name
|
||||
id: get_tag
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const branch = context.payload.pull_request.head.ref;
|
||||
const match = branch.match(/^release-(\d+\.\d+\.\d+(?:[-\w\.]+)?)$/);
|
||||
|
||||
if (!match) {
|
||||
core.setFailed(`Branch '${branch}' does not match expected format 'release-X.Y.Z[-suffix]'`);
|
||||
} else {
|
||||
const tag = `v${match[1]}`;
|
||||
core.setOutput('tag', tag);
|
||||
console.log(`✅ Extracted tag: ${tag}`);
|
||||
const m = branch.match(/^release-(\d+\.\d+\.\d+(?:[-\w\.]+)?)$/);
|
||||
if (!m) {
|
||||
core.setFailed(`Branch '${branch}' does not match 'release-X.Y.Z[-suffix]'`);
|
||||
return;
|
||||
}
|
||||
|
||||
const tag = `v${m[1]}`;
|
||||
core.setOutput('tag', tag);
|
||||
console.log(`✅ Tag to publish: ${tag}`);
|
||||
|
||||
# Checkout repo & create / push annotated tag
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Create tag on merged commit
|
||||
|
||||
- name: Create tag on merge commit
|
||||
run: |
|
||||
git tag ${{ steps.get_tag.outputs.tag }} ${{ github.sha }} --force
|
||||
git push origin ${{ steps.get_tag.outputs.tag }} --force
|
||||
|
||||
git tag -f ${{ steps.get_tag.outputs.tag }} ${{ github.sha }}
|
||||
git push -f origin ${{ steps.get_tag.outputs.tag }}
|
||||
|
||||
# Ensure maintenance branch release-X.Y
|
||||
- name: Ensure maintenance branch release-X.Y
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const tag = '${{ steps.get_tag.outputs.tag }}'; // e.g. v0.1.3 or v0.1.3-rc3
|
||||
const match = tag.match(/^v(\d+)\.(\d+)\.\d+(?:[-\w\.]+)?$/);
|
||||
if (!match) {
|
||||
core.setFailed(`❌ tag '${tag}' must match 'vX.Y.Z' or 'vX.Y.Z-suffix'`);
|
||||
return;
|
||||
}
|
||||
const line = `${match[1]}.${match[2]}`;
|
||||
const branch = `release-${line}`;
|
||||
try {
|
||||
await github.rest.repos.getBranch({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
branch
|
||||
});
|
||||
console.log(`Branch '${branch}' already exists`);
|
||||
} catch (_) {
|
||||
await github.rest.git.createRef({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
ref: `refs/heads/${branch}`,
|
||||
sha: context.sha
|
||||
});
|
||||
console.log(`✅ Branch '${branch}' created at ${context.sha}`);
|
||||
}
|
||||
|
||||
# Get the latest published release
|
||||
- name: Get the latest published release
|
||||
id: latest_release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
try {
|
||||
const rel = await github.rest.repos.getLatestRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
core.setOutput('tag', rel.data.tag_name);
|
||||
} catch (_) {
|
||||
core.setOutput('tag', '');
|
||||
}
|
||||
|
||||
# Compare current tag vs latest using semver-utils
|
||||
- name: Semver compare
|
||||
id: semver
|
||||
uses: madhead/semver-utils@v4.3.0
|
||||
with:
|
||||
version: ${{ steps.get_tag.outputs.tag }}
|
||||
compare-to: ${{ steps.latest_release.outputs.tag }}
|
||||
|
||||
# Derive flags: prerelease? make_latest?
|
||||
- name: Calculate publish flags
|
||||
id: flags
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const tag = '${{ steps.get_tag.outputs.tag }}'; // v0.31.5-rc.1
|
||||
const m = tag.match(/^v(\d+\.\d+\.\d+)(-rc\.\d+)?$/);
|
||||
if (!m) {
|
||||
core.setFailed(`❌ tag '${tag}' must match 'vX.Y.Z' or 'vX.Y.Z-rc.N'`);
|
||||
return;
|
||||
}
|
||||
const version = m[1] + (m[2] ?? ''); // 0.31.5‑rc.1
|
||||
const isRc = Boolean(m[2]);
|
||||
core.setOutput('is_rc', isRc);
|
||||
const outdated = '${{ steps.semver.outputs.comparison-result }}' === '<';
|
||||
core.setOutput('make_latest', isRc || outdated ? 'false' : 'legacy');
|
||||
|
||||
# Publish draft release with correct flags
|
||||
- name: Publish draft release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
@@ -78,19 +156,17 @@ jobs:
|
||||
const tag = '${{ steps.get_tag.outputs.tag }}';
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
repo: context.repo.repo
|
||||
});
|
||||
|
||||
const release = releases.data.find(r => r.tag_name === tag && r.draft);
|
||||
if (!release) {
|
||||
throw new Error(`Draft release with tag ${tag} not found`);
|
||||
}
|
||||
|
||||
const draft = releases.data.find(r => r.tag_name === tag && r.draft);
|
||||
if (!draft) throw new Error(`Draft release for ${tag} not found`);
|
||||
await github.rest.repos.updateRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
release_id: release.id,
|
||||
draft: false
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
release_id: draft.id,
|
||||
draft: false,
|
||||
prerelease: ${{ steps.flags.outputs.is_rc }},
|
||||
make_latest: '${{ steps.flags.outputs.make_latest }}'
|
||||
});
|
||||
|
||||
console.log(`✅ Published release for ${tag}`);
|
||||
|
||||
console.log(`🚀 Published release for ${tag}`);
|
||||
|
||||
16
.github/workflows/pull-requests.yaml
vendored
16
.github/workflows/pull-requests.yaml
vendored
@@ -4,6 +4,10 @@ on:
|
||||
pull_request:
|
||||
types: [labeled, opened, synchronize, reopened]
|
||||
|
||||
concurrency:
|
||||
group: pull-requests-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
e2e:
|
||||
name: Build and Test
|
||||
@@ -12,8 +16,8 @@ jobs:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
# Never run when the PR carries the "release" label.
|
||||
if: |
|
||||
contains(github.event.pull_request.labels.*.name, 'ok-to-test') &&
|
||||
!contains(github.event.pull_request.labels.*.name, 'release')
|
||||
|
||||
steps:
|
||||
@@ -30,10 +34,8 @@ jobs:
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
|
||||
- name: make build
|
||||
run: |
|
||||
make build
|
||||
- name: Build
|
||||
run: make build
|
||||
|
||||
- name: make test
|
||||
run: |
|
||||
make test
|
||||
- name: Test
|
||||
run: make test
|
||||
|
||||
230
.github/workflows/tags.yaml
vendored
230
.github/workflows/tags.yaml
vendored
@@ -1,10 +1,15 @@
|
||||
name: Versioned Tag
|
||||
|
||||
on:
|
||||
# Trigger on push if it includes a tag like vX.Y.Z
|
||||
push:
|
||||
tags:
|
||||
- 'v*.*.*'
|
||||
- 'v*.*.*' # vX.Y.Z
|
||||
- 'v*.*.*-rc.*' # vX.Y.Z-rc.N
|
||||
|
||||
|
||||
concurrency:
|
||||
group: tags-${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
prepare-release:
|
||||
@@ -14,9 +19,10 @@ jobs:
|
||||
contents: write
|
||||
packages: write
|
||||
pull-requests: write
|
||||
actions: write
|
||||
|
||||
steps:
|
||||
# 1) Check if a non-draft release with this tag already exists
|
||||
# Check if a non-draft release with this tag already exists
|
||||
- name: Check if release already exists
|
||||
id: check_release
|
||||
uses: actions/github-script@v7
|
||||
@@ -25,57 +31,67 @@ jobs:
|
||||
const tag = context.ref.replace('refs/tags/', '');
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
repo: context.repo.repo
|
||||
});
|
||||
const existing = releases.data.find(r => r.tag_name === tag && !r.draft);
|
||||
if (existing) {
|
||||
core.setOutput('skip', 'true');
|
||||
} else {
|
||||
core.setOutput('skip', 'false');
|
||||
}
|
||||
const exists = releases.data.some(r => r.tag_name === tag && !r.draft);
|
||||
core.setOutput('skip', exists);
|
||||
console.log(exists ? `Release ${tag} already published` : `No published release ${tag}`);
|
||||
|
||||
# If a published release already exists, skip the rest of the workflow
|
||||
- name: Skip if release already exists
|
||||
if: steps.check_release.outputs.skip == 'true'
|
||||
run: echo "Release already exists, skipping workflow."
|
||||
|
||||
# 2) Determine the base branch from which the tag was pushed
|
||||
# Parse tag meta‑data (rc?, maintenance line, etc.)
|
||||
- name: Parse tag
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: tag
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const ref = context.ref.replace('refs/tags/', ''); // e.g. v0.31.5-rc.1
|
||||
const m = ref.match(/^v(\d+\.\d+\.\d+)(-rc\.\d+)?$/); // ['0.31.5', '-rc.1']
|
||||
if (!m) {
|
||||
core.setFailed(`❌ tag '${ref}' must match 'vX.Y.Z' or 'vX.Y.Z-rc.N'`);
|
||||
return;
|
||||
}
|
||||
const version = m[1] + (m[2] ?? ''); // 0.31.5‑rc.1
|
||||
const isRc = Boolean(m[2]);
|
||||
const [maj, min] = m[1].split('.');
|
||||
core.setOutput('tag', ref); // v0.31.5-rc.1
|
||||
core.setOutput('version', version); // 0.31.5-rc.1
|
||||
core.setOutput('is_rc', isRc); // true
|
||||
core.setOutput('line', `${maj}.${min}`); // 0.31
|
||||
|
||||
# Detect base branch (main or release‑X.Y) the tag was pushed from
|
||||
- name: Get base branch
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: get_base
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
/*
|
||||
For a push event with a tag, GitHub sets context.payload.base_ref
|
||||
if the tag was pushed from a branch.
|
||||
If it's empty, we can't determine the correct base branch and must fail.
|
||||
*/
|
||||
const baseRef = context.payload.base_ref;
|
||||
if (!baseRef) {
|
||||
core.setFailed(`❌ base_ref is empty. Make sure you push the tag from a branch (e.g. 'git push origin HEAD:refs/tags/vX.Y.Z').`);
|
||||
core.setFailed(`❌ base_ref is empty. Push the tag via 'git push origin HEAD:refs/tags/<tag>'.`);
|
||||
return;
|
||||
}
|
||||
|
||||
const shortBranch = baseRef.replace("refs/heads/", "");
|
||||
const releasePattern = /^release-\d+\.\d+$/;
|
||||
if (shortBranch !== "main" && !releasePattern.test(shortBranch)) {
|
||||
core.setFailed(`❌ Tagged commit must belong to branch 'main' or 'release-X.Y'. Got '${shortBranch}'`);
|
||||
const branch = baseRef.replace('refs/heads/', '');
|
||||
const ok = branch === 'main' || /^release-\d+\.\d+$/.test(branch);
|
||||
if (!ok) {
|
||||
core.setFailed(`❌ Tagged commit must belong to 'main' or 'release-X.Y'. Got '${branch}'`);
|
||||
return;
|
||||
}
|
||||
core.setOutput('branch', branch);
|
||||
|
||||
core.setOutput('branch', shortBranch);
|
||||
|
||||
# 3) Checkout full git history and tags
|
||||
# Checkout & login once
|
||||
- name: Checkout code
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
fetch-tags: true
|
||||
|
||||
# 4) Login to GitHub Container Registry
|
||||
- name: Login to GitHub Container Registry
|
||||
- name: Login to GHCR
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
@@ -83,113 +99,129 @@ jobs:
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
|
||||
# 5) Build project artifacts
|
||||
# Build project artifacts
|
||||
- name: Build
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: make build
|
||||
|
||||
# 6) Optionally commit built artifacts to the repository
|
||||
# Commit built artifacts
|
||||
- name: Commit release artifacts
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
env:
|
||||
GIT_AUTHOR_NAME: ${{ github.actor }}
|
||||
GIT_AUTHOR_EMAIL: ${{ github.actor }}@users.noreply.github.com
|
||||
run: |
|
||||
git config user.name "github-actions"
|
||||
git config user.name "github-actions"
|
||||
git config user.email "github-actions@github.com"
|
||||
git add .
|
||||
git commit -m "Prepare release ${GITHUB_REF#refs/tags/}" -s || echo "No changes to commit"
|
||||
git push origin HEAD || true
|
||||
|
||||
# 7) Create a release branch like release-X.Y.Z
|
||||
# Get `latest_version` from latest published release
|
||||
- name: Get latest published release
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: latest_release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
try {
|
||||
const rel = await github.rest.repos.getLatestRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
core.setOutput('tag', rel.data.tag_name);
|
||||
} catch (_) {
|
||||
core.setOutput('tag', '');
|
||||
}
|
||||
|
||||
# Compare tag (A) with latest (B)
|
||||
- name: Semver compare
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: semver
|
||||
uses: madhead/semver-utils@v4.3.0
|
||||
with:
|
||||
version: ${{ steps.tag.outputs.tag }} # A
|
||||
compare-to: ${{ steps.latest_release.outputs.tag }} # B
|
||||
|
||||
# Create or reuse DRAFT GitHub Release
|
||||
- name: Create / reuse draft release
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const tag = '${{ steps.tag.outputs.tag }}';
|
||||
const isRc = ${{ steps.tag.outputs.is_rc }};
|
||||
const outdated = '${{ steps.semver.outputs.comparison-result }}' === '<';
|
||||
const makeLatest = outdated ? false : 'legacy';
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
let rel = releases.data.find(r => r.tag_name === tag);
|
||||
if (!rel) {
|
||||
rel = await github.rest.repos.createRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
tag_name: tag,
|
||||
name: tag,
|
||||
draft: true,
|
||||
prerelease: isRc,
|
||||
make_latest: makeLatest
|
||||
});
|
||||
console.log(`Draft release created for ${tag}`);
|
||||
} else {
|
||||
console.log(`Re‑using existing release ${tag}`);
|
||||
}
|
||||
core.setOutput('upload_url', rel.upload_url);
|
||||
|
||||
# Build + upload assets (optional)
|
||||
- name: Build & upload assets
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: |
|
||||
make assets
|
||||
make upload_assets VERSION=${{ steps.tag.outputs.tag }}
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# Create release‑X.Y.Z branch and push (force‑update)
|
||||
- name: Create release branch
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: |
|
||||
BRANCH_NAME="release-${GITHUB_REF#refs/tags/v}"
|
||||
git branch -f "$BRANCH_NAME"
|
||||
git push origin "$BRANCH_NAME" --force
|
||||
BRANCH="release-${GITHUB_REF#refs/tags/v}"
|
||||
git branch -f "$BRANCH"
|
||||
git push -f origin "$BRANCH"
|
||||
|
||||
# 8) Create a pull request from release-X.Y.Z to the original base branch
|
||||
# Create pull request into original base branch (if absent)
|
||||
- name: Create pull request if not exists
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const version = context.ref.replace('refs/tags/v', '');
|
||||
const base = '${{ steps.get_base.outputs.branch }}';
|
||||
const head = `release-${version}`;
|
||||
const base = '${{ steps.get_base.outputs.branch }}';
|
||||
const head = `release-${version}`;
|
||||
|
||||
const prs = await github.rest.pulls.list({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
head: `${context.repo.owner}:${head}`,
|
||||
repo: context.repo.repo,
|
||||
head: `${context.repo.owner}:${head}`,
|
||||
base
|
||||
});
|
||||
|
||||
if (prs.data.length === 0) {
|
||||
const newPr = await github.rest.pulls.create({
|
||||
const pr = await github.rest.pulls.create({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
repo: context.repo.repo,
|
||||
head,
|
||||
base,
|
||||
title: `Release v${version}`,
|
||||
body:
|
||||
`This PR prepares the release \`v${version}\`.\n` +
|
||||
`(Please merge it before releasing draft)`,
|
||||
body: `This PR prepares the release \`v${version}\`.`,
|
||||
draft: false
|
||||
});
|
||||
|
||||
console.log(`Created pull request #${newPr.data.number} from ${head} to ${base}`);
|
||||
await github.rest.issues.addLabels({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: newPr.data.number,
|
||||
repo: context.repo.repo,
|
||||
issue_number: pr.data.number,
|
||||
labels: ['release']
|
||||
});
|
||||
console.log(`Created PR #${pr.data.number}`);
|
||||
} else {
|
||||
console.log(`Pull request already exists from ${head} to ${base}`);
|
||||
console.log(`PR already exists from ${head} to ${base}`);
|
||||
}
|
||||
|
||||
# 9) Create or reuse an existing draft GitHub release for this tag
|
||||
- name: Create or reuse draft release
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: create_release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const tag = context.ref.replace('refs/tags/', '');
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
|
||||
let release = releases.data.find(r => r.tag_name === tag);
|
||||
if (!release) {
|
||||
release = await github.rest.repos.createRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
tag_name: tag,
|
||||
name: `${tag}`,
|
||||
draft: true,
|
||||
prerelease: false
|
||||
});
|
||||
}
|
||||
core.setOutput('upload_url', release.upload_url);
|
||||
|
||||
# 10) Build additional assets for the release (if needed)
|
||||
- name: Build assets
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: make assets
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# 11) Upload assets to the draft release
|
||||
- name: Upload assets
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: make upload_assets VERSION=${GITHUB_REF#refs/tags/}
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# 12) Run tests
|
||||
- name: Run tests
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: make test
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,6 +1,7 @@
|
||||
_out
|
||||
.git
|
||||
.idea
|
||||
.vscode
|
||||
|
||||
# User-specific stuff
|
||||
.idea/**/workspace.xml
|
||||
@@ -75,4 +76,4 @@ fabric.properties
|
||||
.idea/caches/build_file_checksums.ser
|
||||
|
||||
.DS_Store
|
||||
**/.DS_Store
|
||||
**/.DS_Store
|
||||
|
||||
1
Makefile
1
Makefile
@@ -47,7 +47,6 @@ assets:
|
||||
test:
|
||||
make -C packages/core/testing apply
|
||||
make -C packages/core/testing test
|
||||
#make -C packages/core/testing test-applications
|
||||
|
||||
generate:
|
||||
hack/update-codegen.sh
|
||||
|
||||
@@ -39,6 +39,8 @@ import (
|
||||
cozystackiov1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||
"github.com/cozystack/cozystack/internal/controller"
|
||||
"github.com/cozystack/cozystack/internal/telemetry"
|
||||
|
||||
helmv2 "github.com/fluxcd/helm-controller/api/v2"
|
||||
// +kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
@@ -51,6 +53,7 @@ func init() {
|
||||
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
|
||||
|
||||
utilruntime.Must(cozystackiov1alpha1.AddToScheme(scheme))
|
||||
utilruntime.Must(helmv2.AddToScheme(scheme))
|
||||
// +kubebuilder:scaffold:scheme
|
||||
}
|
||||
|
||||
@@ -182,6 +185,14 @@ func main() {
|
||||
if err = (&controller.WorkloadReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "WorkloadReconciler")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&controller.TenantHelmReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "Workload")
|
||||
os.Exit(1)
|
||||
|
||||
@@ -1,10 +1,37 @@
|
||||
# Release Workflow
|
||||
|
||||
This section explains how Cozystack builds and releases are made.
|
||||
This document describes Cozystack’s release process.
|
||||
|
||||
## Introduction
|
||||
|
||||
Cozystack uses a staged release process to ensure stability and flexibility during development.
|
||||
|
||||
There are three types of releases:
|
||||
|
||||
- **Release Candidates (RC)** – Preview versions (e.g., `v0.42.0-rc.1`) used for final testing and validation.
|
||||
- **Regular Releases** – Final versions (e.g., `v0.42.0`) that are feature-complete and thoroughly tested.
|
||||
- **Patch Releases** – Bugfix-only updates (e.g., `v0.42.1`) made after a stable release, based on a dedicated release branch.
|
||||
|
||||
Each type plays a distinct role in delivering reliable and tested updates while allowing ongoing development to continue smoothly.
|
||||
|
||||
## Release Candidates
|
||||
|
||||
Release candidates are Cozystack versions that introduce new features and are published before a stable release.
|
||||
Their purpose is to help validate stability before finalizing a new feature release.
|
||||
They allow for final rounds of testing and bug fixes without freezing development.
|
||||
|
||||
Release candidates are given numbers `vX.Y.0-rc.N`, for example, `v0.42.0-rc.1`.
|
||||
They are created directly in the `main` branch.
|
||||
An RC is typically tagged when all major features for the upcoming release have been merged into main and the release enters its testing phase.
|
||||
However, new features and changes can still be added before the regular release `vX.Y.0`.
|
||||
|
||||
Each RC contributes to a cumulative set of release notes that will be finalized when `vX.Y.0` is released.
|
||||
After testing, if no critical issues remain, the regular release (`vX.Y.0`) is tagged from the last RC or a later commit in main.
|
||||
This begins the regular release process, creates a dedicated `release-X.Y` branch, and opens the way for patch releases.
|
||||
|
||||
## Regular Releases
|
||||
|
||||
When making regular releases, we take a commit in `main` and decide to make it a release `x.y.0`.
|
||||
When making a regular release, we tag the latest RC or a subsequent minimal-change commit as `vX.Y.0`.
|
||||
In this explanation, we'll use version `v0.42.0` as an example:
|
||||
|
||||
```mermaid
|
||||
|
||||
@@ -1,165 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
RESET='\033[0m'
|
||||
YELLOW='\033[0;33m'
|
||||
|
||||
|
||||
ROOT_NS="tenant-root"
|
||||
TEST_TENANT="tenant-e2e"
|
||||
|
||||
values_base_path="/hack/testdata/"
|
||||
checks_base_path="/hack/testdata/"
|
||||
|
||||
function delete_hr() {
|
||||
local release_name="$1"
|
||||
local namespace="$2"
|
||||
|
||||
if [[ -z "$release_name" ]]; then
|
||||
echo -e "${RED}Error: Release name is required.${RESET}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "$namespace" ]]; then
|
||||
echo -e "${RED}Error: Namespace name is required.${RESET}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$release_name" == "tenant-e2e" ]]; then
|
||||
echo -e "${YELLOW}Skipping deletion for release tenant-e2e.${RESET}"
|
||||
return 0
|
||||
fi
|
||||
|
||||
kubectl delete helmrelease $release_name -n $namespace
|
||||
}
|
||||
|
||||
function install_helmrelease() {
|
||||
local release_name="$1"
|
||||
local namespace="$2"
|
||||
local chart_path="$3"
|
||||
local repo_name="$4"
|
||||
local repo_ns="$5"
|
||||
local values_file="$6"
|
||||
|
||||
if [[ -z "$release_name" ]]; then
|
||||
echo -e "${RED}Error: Release name is required.${RESET}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "$namespace" ]]; then
|
||||
echo -e "${RED}Error: Namespace name is required.${RESET}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "$chart_path" ]]; then
|
||||
echo -e "${RED}Error: Chart path name is required.${RESET}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -n "$values_file" && -f "$values_file" ]]; then
|
||||
local values_section
|
||||
values_section=$(echo " values:" && sed 's/^/ /' "$values_file")
|
||||
fi
|
||||
|
||||
local helmrelease_file=$(mktemp /tmp/HelmRelease.XXXXXX.yaml)
|
||||
{
|
||||
echo "apiVersion: helm.toolkit.fluxcd.io/v2"
|
||||
echo "kind: HelmRelease"
|
||||
echo "metadata:"
|
||||
echo " labels:"
|
||||
echo " cozystack.io/ui: \"true\""
|
||||
echo " name: \"$release_name\""
|
||||
echo " namespace: \"$namespace\""
|
||||
echo "spec:"
|
||||
echo " chart:"
|
||||
echo " spec:"
|
||||
echo " chart: \"$chart_path\""
|
||||
echo " reconcileStrategy: Revision"
|
||||
echo " sourceRef:"
|
||||
echo " kind: HelmRepository"
|
||||
echo " name: \"$repo_name\""
|
||||
echo " namespace: \"$repo_ns\""
|
||||
echo " version: '*'"
|
||||
echo " interval: 1m0s"
|
||||
echo " timeout: 5m0s"
|
||||
[[ -n "$values_section" ]] && echo "$values_section"
|
||||
} > "$helmrelease_file"
|
||||
|
||||
kubectl apply -f "$helmrelease_file"
|
||||
|
||||
rm -f "$helmrelease_file"
|
||||
}
|
||||
|
||||
function install_tenant (){
|
||||
local release_name="$1"
|
||||
local namespace="$2"
|
||||
local values_file="${values_base_path}tenant/values.yaml"
|
||||
local repo_name="cozystack-apps"
|
||||
local repo_ns="cozy-public"
|
||||
install_helmrelease "$release_name" "$namespace" "tenant" "$repo_name" "$repo_ns" "$values_file"
|
||||
}
|
||||
|
||||
function make_extra_checks(){
|
||||
local checks_file="$1"
|
||||
echo "after exec make $checks_file"
|
||||
if [[ -n "$checks_file" && -f "$checks_file" ]]; then
|
||||
echo -e "${YELLOW}Start extra checks with file: ${checks_file}${RESET}"
|
||||
|
||||
fi
|
||||
}
|
||||
|
||||
function check_helmrelease_status() {
|
||||
local release_name="$1"
|
||||
local namespace="$2"
|
||||
local checks_file="$3"
|
||||
local timeout=300 # Timeout in seconds
|
||||
local interval=5 # Interval between checks in seconds
|
||||
local elapsed=0
|
||||
|
||||
|
||||
while [[ $elapsed -lt $timeout ]]; do
|
||||
local status_output
|
||||
status_output=$(kubectl get helmrelease "$release_name" -n "$namespace" -o json | jq -r '.status.conditions[-1].reason')
|
||||
|
||||
if [[ "$status_output" == "InstallSucceeded" || "$status_output" == "UpgradeSucceeded" ]]; then
|
||||
echo -e "${GREEN}Helm release '$release_name' is ready.${RESET}"
|
||||
make_extra_checks "$checks_file"
|
||||
delete_hr $release_name $namespace
|
||||
return 0
|
||||
elif [[ "$status_output" == "InstallFailed" ]]; then
|
||||
echo -e "${RED}Helm release '$release_name': InstallFailed${RESET}"
|
||||
exit 1
|
||||
else
|
||||
echo -e "${YELLOW}Helm release '$release_name' is not ready. Current status: $status_output${RESET}"
|
||||
fi
|
||||
|
||||
sleep "$interval"
|
||||
elapsed=$((elapsed + interval))
|
||||
done
|
||||
|
||||
echo -e "${RED}Timeout reached. Helm release '$release_name' is still not ready after $timeout seconds.${RESET}"
|
||||
exit 1
|
||||
}
|
||||
|
||||
chart_name="$1"
|
||||
|
||||
if [ -z "$chart_name" ]; then
|
||||
echo -e "${RED}No chart name provided. Exiting...${RESET}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
checks_file="${checks_base_path}${chart_name}/check.sh"
|
||||
repo_name="cozystack-apps"
|
||||
repo_ns="cozy-public"
|
||||
release_name="$chart_name-e2e"
|
||||
values_file="${values_base_path}${chart_name}/values.yaml"
|
||||
|
||||
install_tenant $TEST_TENANT $ROOT_NS
|
||||
check_helmrelease_status $TEST_TENANT $ROOT_NS "${checks_base_path}tenant/check.sh"
|
||||
|
||||
echo -e "${YELLOW}Running tests for chart: $chart_name${RESET}"
|
||||
|
||||
install_helmrelease $release_name $TEST_TENANT $chart_name $repo_name $repo_ns $values_file
|
||||
check_helmrelease_status $release_name $TEST_TENANT $checks_file
|
||||
21
hack/e2e.sh
21
hack/e2e.sh
@@ -60,7 +60,8 @@ done
|
||||
|
||||
# Prepare system drive
|
||||
if [ ! -f nocloud-amd64.raw ]; then
|
||||
wget https://github.com/cozystack/cozystack/releases/latest/download/nocloud-amd64.raw.xz -O nocloud-amd64.raw.xz
|
||||
wget https://github.com/cozystack/cozystack/releases/latest/download/nocloud-amd64.raw.xz \
|
||||
-O nocloud-amd64.raw.xz --show-progress --output-file /dev/stdout --progress=dot:giga 2>/dev/null
|
||||
rm -f nocloud-amd64.raw
|
||||
xz --decompress nocloud-amd64.raw.xz
|
||||
fi
|
||||
@@ -85,7 +86,8 @@ done
|
||||
# Start VMs
|
||||
for i in 1 2 3; do
|
||||
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 8 -m 16384 \
|
||||
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5$i -netdev tap,id=net0,ifname=cozy-srv$i,script=no,downscript=no \
|
||||
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5$i \
|
||||
-netdev tap,id=net0,ifname=cozy-srv$i,script=no,downscript=no \
|
||||
-drive file=srv$i/system.img,if=virtio,format=raw \
|
||||
-drive file=srv$i/seed.img,if=virtio,format=raw \
|
||||
-drive file=srv$i/data.img,if=virtio,format=raw \
|
||||
@@ -121,7 +123,7 @@ machine:
|
||||
files:
|
||||
- content: |
|
||||
[plugins]
|
||||
[plugins."io.containerd.grpc.v1.cri"]
|
||||
[plugins."io.containerd.cri.v1.runtime"]
|
||||
device_ownership_from_security_context = true
|
||||
path: /etc/cri/conf.d/20-customization.part
|
||||
op: create
|
||||
@@ -231,11 +233,18 @@ timeout 60 sh -c 'until kubectl get hr -A | grep cozy; do sleep 1; done'
|
||||
|
||||
sleep 5
|
||||
|
||||
# Wait for all HelmReleases to be installed
|
||||
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n " $1 " hr/" $2 " &"} END{print "wait"}' | sh -x
|
||||
|
||||
failed_hrs=$(kubectl get hr -A | grep -v True)
|
||||
if [ -n "$(echo "$failed_hrs" | grep -v NAME)" ]; then
|
||||
printf 'Failed HelmReleases:\n%s\n' "$failed_hrs" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Wait for Cluster-API providers
|
||||
timeout 30 sh -c 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager; do sleep 1; done'
|
||||
kubectl wait deploy --timeout=30s --for=condition=available -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager
|
||||
timeout 60 sh -c 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager; do sleep 1; done'
|
||||
kubectl wait deploy --timeout=1m --for=condition=available -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager
|
||||
|
||||
# Wait for linstor controller
|
||||
kubectl wait deploy --timeout=5m --for=condition=available -n cozy-linstor linstor-controller
|
||||
@@ -357,5 +366,5 @@ kubectl patch -n cozy-system cm/cozystack --type=merge -p '{"data":{
|
||||
"oidc-enabled": "true"
|
||||
}}'
|
||||
|
||||
timeout 60 sh -c 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator; do sleep 1; done'
|
||||
timeout 120 sh -c 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator; do sleep 1; done'
|
||||
kubectl wait --timeout=10m --for=condition=ready -n cozy-keycloak hr keycloak keycloak-configure keycloak-operator
|
||||
|
||||
1
hack/testdata/http-cache/check.sh
vendored
1
hack/testdata/http-cache/check.sh
vendored
@@ -1 +0,0 @@
|
||||
return 0
|
||||
2
hack/testdata/http-cache/values.yaml
vendored
2
hack/testdata/http-cache/values.yaml
vendored
@@ -1,2 +0,0 @@
|
||||
endpoints:
|
||||
- 8.8.8.8:443
|
||||
1
hack/testdata/kubernetes/check.sh
vendored
1
hack/testdata/kubernetes/check.sh
vendored
@@ -1 +0,0 @@
|
||||
return 0
|
||||
62
hack/testdata/kubernetes/values.yaml
vendored
62
hack/testdata/kubernetes/values.yaml
vendored
@@ -1,62 +0,0 @@
|
||||
## @section Common parameters
|
||||
|
||||
## @param host The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host).
|
||||
## @param controlPlane.replicas Number of replicas for Kubernetes contorl-plane components
|
||||
## @param storageClass StorageClass used to store user data
|
||||
##
|
||||
host: ""
|
||||
controlPlane:
|
||||
replicas: 2
|
||||
storageClass: replicated
|
||||
|
||||
## @param nodeGroups [object] nodeGroups configuration
|
||||
##
|
||||
nodeGroups:
|
||||
md0:
|
||||
minReplicas: 0
|
||||
maxReplicas: 10
|
||||
instanceType: "u1.medium"
|
||||
ephemeralStorage: 20Gi
|
||||
roles:
|
||||
- ingress-nginx
|
||||
|
||||
resources:
|
||||
cpu: ""
|
||||
memory: ""
|
||||
|
||||
## @section Cluster Addons
|
||||
##
|
||||
addons:
|
||||
|
||||
## Cert-manager: automatically creates and manages SSL/TLS certificate
|
||||
##
|
||||
certManager:
|
||||
## @param addons.certManager.enabled Enables the cert-manager
|
||||
## @param addons.certManager.valuesOverride Custom values to override
|
||||
enabled: true
|
||||
valuesOverride: {}
|
||||
|
||||
## Ingress-NGINX Controller
|
||||
##
|
||||
ingressNginx:
|
||||
## @param addons.ingressNginx.enabled Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role)
|
||||
## @param addons.ingressNginx.valuesOverride Custom values to override
|
||||
##
|
||||
enabled: true
|
||||
## @param addons.ingressNginx.hosts List of domain names that should be passed through to the cluster by upper cluster
|
||||
## e.g:
|
||||
## hosts:
|
||||
## - example.org
|
||||
## - foo.example.net
|
||||
##
|
||||
hosts: []
|
||||
valuesOverride: {}
|
||||
|
||||
## Flux CD
|
||||
##
|
||||
fluxcd:
|
||||
## @param addons.fluxcd.enabled Enables Flux CD
|
||||
## @param addons.fluxcd.valuesOverride Custom values to override
|
||||
##
|
||||
enabled: true
|
||||
valuesOverride: {}
|
||||
1
hack/testdata/nats/check.sh
vendored
1
hack/testdata/nats/check.sh
vendored
@@ -1 +0,0 @@
|
||||
return 0
|
||||
10
hack/testdata/nats/values.yaml
vendored
10
hack/testdata/nats/values.yaml
vendored
@@ -1,10 +0,0 @@
|
||||
|
||||
## @section Common parameters
|
||||
|
||||
## @param external Enable external access from outside the cluster
|
||||
## @param replicas Persistent Volume size for NATS
|
||||
## @param storageClass StorageClass used to store the data
|
||||
##
|
||||
external: false
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
1
hack/testdata/tenant/check.sh
vendored
1
hack/testdata/tenant/check.sh
vendored
@@ -1 +0,0 @@
|
||||
return 0
|
||||
6
hack/testdata/tenant/values.yaml
vendored
6
hack/testdata/tenant/values.yaml
vendored
@@ -1,6 +0,0 @@
|
||||
host: ""
|
||||
etcd: false
|
||||
monitoring: false
|
||||
ingress: false
|
||||
seaweedfs: false
|
||||
isolated: true
|
||||
158
internal/controller/tenant_helm_reconciler.go
Normal file
158
internal/controller/tenant_helm_reconciler.go
Normal file
@@ -0,0 +1,158 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
e "errors"
|
||||
|
||||
helmv2 "github.com/fluxcd/helm-controller/api/v2"
|
||||
"gopkg.in/yaml.v2"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
)
|
||||
|
||||
type TenantHelmReconciler struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
func (r *TenantHelmReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
hr := &helmv2.HelmRelease{}
|
||||
if err := r.Get(ctx, req.NamespacedName, hr); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
logger.Error(err, "unable to fetch HelmRelease")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(hr.Name, "tenant-") {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if len(hr.Status.Conditions) == 0 || hr.Status.Conditions[0].Type != "Ready" {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if len(hr.Status.History) == 0 {
|
||||
logger.Info("no history in HelmRelease status", "name", hr.Name)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if hr.Status.History[0].Status != "deployed" {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
newDigest := hr.Status.History[0].Digest
|
||||
var hrList helmv2.HelmReleaseList
|
||||
childNamespace := getChildNamespace(hr.Namespace, hr.Name)
|
||||
if childNamespace == "tenant-root" && hr.Name == "tenant-root" {
|
||||
if hr.Spec.Values == nil {
|
||||
logger.Error(e.New("hr.Spec.Values is nil"), "cant annotate tenant-root ns")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
err := annotateTenantRootNs(*hr.Spec.Values, r.Client)
|
||||
if err != nil {
|
||||
logger.Error(err, "cant annotate tenant-root ns")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
logger.Info("namespace 'tenant-root' annotated")
|
||||
}
|
||||
|
||||
if err := r.List(ctx, &hrList, client.InNamespace(childNamespace)); err != nil {
|
||||
logger.Error(err, "unable to list HelmReleases in namespace", "namespace", hr.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
for _, item := range hrList.Items {
|
||||
if item.Name == hr.Name {
|
||||
continue
|
||||
}
|
||||
oldDigest := item.GetAnnotations()["cozystack.io/tenant-config-digest"]
|
||||
if oldDigest == newDigest {
|
||||
continue
|
||||
}
|
||||
patchTarget := item.DeepCopy()
|
||||
|
||||
if patchTarget.Annotations == nil {
|
||||
patchTarget.Annotations = map[string]string{}
|
||||
}
|
||||
ts := time.Now().Format(time.RFC3339Nano)
|
||||
|
||||
patchTarget.Annotations["cozystack.io/tenant-config-digest"] = newDigest
|
||||
patchTarget.Annotations["reconcile.fluxcd.io/forceAt"] = ts
|
||||
patchTarget.Annotations["reconcile.fluxcd.io/requestedAt"] = ts
|
||||
|
||||
patch := client.MergeFrom(item.DeepCopy())
|
||||
if err := r.Patch(ctx, patchTarget, patch); err != nil {
|
||||
logger.Error(err, "failed to patch HelmRelease", "name", patchTarget.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Info("patched HelmRelease with new digest", "name", patchTarget.Name, "digest", newDigest, "version", hr.Status.History[0].Version)
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *TenantHelmReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&helmv2.HelmRelease{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func getChildNamespace(currentNamespace, hrName string) string {
|
||||
tenantName := strings.TrimPrefix(hrName, "tenant-")
|
||||
|
||||
switch {
|
||||
case currentNamespace == "tenant-root" && hrName == "tenant-root":
|
||||
// 1) root tenant inside root namespace
|
||||
return "tenant-root"
|
||||
|
||||
case currentNamespace == "tenant-root":
|
||||
// 2) any other tenant in root namespace
|
||||
return fmt.Sprintf("tenant-%s", tenantName)
|
||||
|
||||
default:
|
||||
// 3) tenant in a dedicated namespace
|
||||
return fmt.Sprintf("%s-%s", currentNamespace, tenantName)
|
||||
}
|
||||
}
|
||||
|
||||
func annotateTenantRootNs(values apiextensionsv1.JSON, c client.Client) error {
|
||||
var data map[string]interface{}
|
||||
if err := yaml.Unmarshal(values.Raw, &data); err != nil {
|
||||
return fmt.Errorf("failed to parse HelmRelease values: %w", err)
|
||||
}
|
||||
|
||||
host, ok := data["host"].(string)
|
||||
if !ok || host == "" {
|
||||
return fmt.Errorf("host field not found or not a string")
|
||||
}
|
||||
|
||||
var ns corev1.Namespace
|
||||
if err := c.Get(context.TODO(), client.ObjectKey{Name: "tenant-root"}, &ns); err != nil {
|
||||
return fmt.Errorf("failed to get namespace tenant-root: %w", err)
|
||||
}
|
||||
|
||||
if ns.Annotations == nil {
|
||||
ns.Annotations = map[string]string{}
|
||||
}
|
||||
ns.Annotations["namespace.cozystack.io/host"] = host
|
||||
|
||||
if err := c.Update(context.TODO(), &ns); err != nil {
|
||||
return fmt.Errorf("failed to update namespace: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -39,6 +39,15 @@ func (r *WorkloadReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c
|
||||
}
|
||||
|
||||
t := getMonitoredObject(w)
|
||||
|
||||
if t == nil {
|
||||
err = r.Delete(ctx, w)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to delete workload")
|
||||
}
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
err = r.Get(ctx, types.NamespacedName{Name: t.GetName(), Namespace: t.GetNamespace()}, t)
|
||||
|
||||
// found object, nothing to do
|
||||
@@ -68,20 +77,23 @@ func (r *WorkloadReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
}
|
||||
|
||||
func getMonitoredObject(w *cozyv1alpha1.Workload) client.Object {
|
||||
if strings.HasPrefix(w.Name, "pvc-") {
|
||||
switch {
|
||||
case strings.HasPrefix(w.Name, "pvc-"):
|
||||
obj := &corev1.PersistentVolumeClaim{}
|
||||
obj.Name = strings.TrimPrefix(w.Name, "pvc-")
|
||||
obj.Namespace = w.Namespace
|
||||
return obj
|
||||
}
|
||||
if strings.HasPrefix(w.Name, "svc-") {
|
||||
case strings.HasPrefix(w.Name, "svc-"):
|
||||
obj := &corev1.Service{}
|
||||
obj.Name = strings.TrimPrefix(w.Name, "svc-")
|
||||
obj.Namespace = w.Namespace
|
||||
return obj
|
||||
case strings.HasPrefix(w.Name, "pod-"):
|
||||
obj := &corev1.Pod{}
|
||||
obj.Name = strings.TrimPrefix(w.Name, "pod-")
|
||||
obj.Namespace = w.Namespace
|
||||
return obj
|
||||
}
|
||||
obj := &corev1.Pod{}
|
||||
obj.Name = w.Name
|
||||
obj.Namespace = w.Namespace
|
||||
var obj client.Object
|
||||
return obj
|
||||
}
|
||||
|
||||
26
internal/controller/workload_controller_test.go
Normal file
26
internal/controller/workload_controller_test.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestUnprefixedMonitoredObjectReturnsNil(t *testing.T) {
|
||||
w := &cozyv1alpha1.Workload{}
|
||||
w.Name = "unprefixed-name"
|
||||
obj := getMonitoredObject(w)
|
||||
if obj != nil {
|
||||
t.Errorf(`getMonitoredObject(&Workload{Name: "%s"}) == %v, want nil`, w.Name, obj)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodMonitoredObject(t *testing.T) {
|
||||
w := &cozyv1alpha1.Workload{}
|
||||
w.Name = "pod-mypod"
|
||||
obj := getMonitoredObject(w)
|
||||
if pod, ok := obj.(*corev1.Pod); !ok || pod.Name != "mypod" {
|
||||
t.Errorf(`getMonitoredObject(&Workload{Name: "%s"}) == %v, want &Pod{Name: "mypod"}`, w.Name, obj)
|
||||
}
|
||||
}
|
||||
@@ -212,15 +212,12 @@ func (r *WorkloadMonitorReconciler) reconcilePodForMonitor(
|
||||
) error {
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
// Combine both init containers and normal containers to sum resources properly
|
||||
combinedContainers := append(pod.Spec.InitContainers, pod.Spec.Containers...)
|
||||
|
||||
// totalResources will store the sum of all container resource limits
|
||||
// totalResources will store the sum of all container resource requests
|
||||
totalResources := make(map[string]resource.Quantity)
|
||||
|
||||
// Iterate over all containers to aggregate their Limits
|
||||
for _, container := range combinedContainers {
|
||||
for name, qty := range container.Resources.Limits {
|
||||
// Iterate over all containers to aggregate their requests
|
||||
for _, container := range pod.Spec.Containers {
|
||||
for name, qty := range container.Resources.Requests {
|
||||
if existing, exists := totalResources[name.String()]; exists {
|
||||
existing.Add(qty)
|
||||
totalResources[name.String()] = existing
|
||||
@@ -249,7 +246,7 @@ func (r *WorkloadMonitorReconciler) reconcilePodForMonitor(
|
||||
|
||||
workload := &cozyv1alpha1.Workload{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pod.Name,
|
||||
Name: fmt.Sprintf("pod-%s", pod.Name),
|
||||
Namespace: pod.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
3
packages/apps/bucket/README.md
Normal file
3
packages/apps/bucket/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# S3 bucket
|
||||
|
||||
## Parameters
|
||||
@@ -11,7 +11,7 @@ spec:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '*'
|
||||
version: '>= 0.0.0-0'
|
||||
interval: 1m0s
|
||||
timeout: 5m0s
|
||||
values:
|
||||
|
||||
5
packages/apps/bucket/values.schema.json
Normal file
5
packages/apps/bucket/values.schema.json
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"title": "Chart Values",
|
||||
"type": "object",
|
||||
"properties": {}
|
||||
}
|
||||
1
packages/apps/bucket/values.yaml
Normal file
1
packages/apps/bucket/values.yaml
Normal file
@@ -0,0 +1 @@
|
||||
{}
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/postgres-backup:0.10.0@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f
|
||||
ghcr.io/cozystack/cozystack/postgres-backup:0.10.1@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/nginx-cache:0.4.0@sha256:bef7344da098c4dc400a9e20ffad10ac991df67d09a30026207454abbc91f28b
|
||||
ghcr.io/cozystack/cozystack/nginx-cache:0.4.0@sha256:4e1f5153d2673a399b315252238f4dc3eb5d6c59295aef594691710cc5b72eb4
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.18.1
|
||||
version: 0.20.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
UBUNTU_CONTAINER_DISK_TAG = v1.30.1
|
||||
KUBERNETES_VERSION = v1.32
|
||||
KUBERNETES_PKG_TAG = $(shell awk '$$1 == "version:" {print $$2}' Chart.yaml)
|
||||
|
||||
include ../../../scripts/common-envs.mk
|
||||
@@ -6,21 +6,26 @@ include ../../../scripts/package.mk
|
||||
|
||||
generate:
|
||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||
yq -o json -i '.properties.controlPlane.properties.apiServer.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
|
||||
yq -o json -i '.properties.controlPlane.properties.controllerManager.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
|
||||
yq -o json -i '.properties.controlPlane.properties.scheduler.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
|
||||
yq -o json -i '.properties.controlPlane.properties.konnectivity.properties.server.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
|
||||
|
||||
image: image-ubuntu-container-disk image-kubevirt-cloud-provider image-kubevirt-csi-driver image-cluster-autoscaler
|
||||
|
||||
image-ubuntu-container-disk:
|
||||
docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/ubuntu-container-disk \
|
||||
--provenance false \
|
||||
--tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(UBUNTU_CONTAINER_DISK_TAG)) \
|
||||
--tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(UBUNTU_CONTAINER_DISK_TAG)-$(TAG)) \
|
||||
--build-arg KUBERNETES_VERSION=${KUBERNETES_VERSION} \
|
||||
--tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(KUBERNETES_VERSION)) \
|
||||
--tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(KUBERNETES_VERSION)-$(TAG)) \
|
||||
--cache-from type=registry,ref=$(REGISTRY)/ubuntu-container-disk:latest \
|
||||
--cache-to type=inline \
|
||||
--metadata-file images/ubuntu-container-disk.json \
|
||||
--push=$(PUSH) \
|
||||
--label "org.opencontainers.image.source=https://github.com/cozystack/cozystack" \
|
||||
--load=$(LOAD)
|
||||
echo "$(REGISTRY)/ubuntu-container-disk:$(call settag,$(UBUNTU_CONTAINER_DISK_TAG))@$$(yq e '."containerimage.digest"' images/ubuntu-container-disk.json -o json -r)" \
|
||||
echo "$(REGISTRY)/ubuntu-container-disk:$(call settag,$(KUBERNETES_VERSION))@$$(yq e '."containerimage.digest"' images/ubuntu-container-disk.json -o json -r)" \
|
||||
> images/ubuntu-container-disk.tag
|
||||
rm -f images/ubuntu-container-disk.json
|
||||
|
||||
|
||||
@@ -27,20 +27,47 @@ How to access to deployed cluster:
|
||||
kubectl get secret -n <namespace> kubernetes-<clusterName>-admin-kubeconfig -o go-template='{{ printf "%s\n" (index .data "super-admin.conf" | base64decode) }}' > test
|
||||
```
|
||||
|
||||
# Series
|
||||
## Parameters
|
||||
|
||||
<!-- source: https://github.com/kubevirt/common-instancetypes/blob/main/README.md -->
|
||||
### Common parameters
|
||||
|
||||
. | U | O | CX | M | RT
|
||||
----------------------------|-----|-----|------|-----|------
|
||||
*Has GPUs* | | | | |
|
||||
*Hugepages* | | | ✓ | ✓ | ✓
|
||||
*Overcommitted Memory* | | ✓ | | |
|
||||
*Dedicated CPU* | | | ✓ | | ✓
|
||||
*Burstable CPU performance* | ✓ | ✓ | | ✓ |
|
||||
*Isolated emulator threads* | | | ✓ | | ✓
|
||||
*vNUMA* | | | ✓ | | ✓
|
||||
*vCPU-To-Memory Ratio* | 1:4 | 1:4 | 1:2 | 1:8 | 1:4
|
||||
| Name | Description | Value |
|
||||
| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | ------------ |
|
||||
| `host` | The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host). | `""` |
|
||||
| `controlPlane.replicas` | Number of replicas for Kubernetes control-plane components | `2` |
|
||||
| `storageClass` | StorageClass used to store user data | `replicated` |
|
||||
| `nodeGroups` | nodeGroups configuration | `{}` |
|
||||
|
||||
### Cluster Addons
|
||||
|
||||
| Name | Description | Value |
|
||||
| --------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| `addons.certManager.enabled` | Enables the cert-manager | `false` |
|
||||
| `addons.certManager.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.cilium.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.ingressNginx.enabled` | Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role) | `false` |
|
||||
| `addons.ingressNginx.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.ingressNginx.hosts` | List of domain names that should be passed through to the cluster by upper cluster | `[]` |
|
||||
| `addons.gpuOperator.enabled` | Enables the gpu-operator | `false` |
|
||||
| `addons.gpuOperator.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.fluxcd.enabled` | Enables Flux CD | `false` |
|
||||
| `addons.fluxcd.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.monitoringAgents.enabled` | Enables MonitoringAgents (fluentbit, vmagents for sending logs and metrics to storage) if tenant monitoring enabled, send to tenant storage, else to root storage | `false` |
|
||||
| `addons.monitoringAgents.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.verticalPodAutoscaler.valuesOverride` | Custom values to override | `{}` |
|
||||
|
||||
### Kubernetes control plane configuration
|
||||
|
||||
| Name | Description | Value |
|
||||
| -------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| `controlPlane.apiServer.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `small` |
|
||||
| `controlPlane.apiServer.resources` | Resources | `{}` |
|
||||
| `controlPlane.controllerManager.resources` | Resources | `{}` |
|
||||
| `controlPlane.controllerManager.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` |
|
||||
| `controlPlane.scheduler.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` |
|
||||
| `controlPlane.scheduler.resources` | Resources | `{}` |
|
||||
| `controlPlane.konnectivity.server.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` |
|
||||
| `controlPlane.konnectivity.server.resources` | Resources | `{}` |
|
||||
|
||||
|
||||
## U Series
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.18.0@sha256:85371c6aabf5a7fea2214556deac930c600e362f92673464fe2443784e2869c3
|
||||
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.19.0@sha256:85371c6aabf5a7fea2214556deac930c600e362f92673464fe2443784e2869c3
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.18.0@sha256:795d8e1ef4b2b0df2aa1e09d96cd13476ebb545b4bf4b5779b7547a70ef64cf9
|
||||
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.19.0@sha256:795d8e1ef4b2b0df2aa1e09d96cd13476ebb545b4bf4b5779b7547a70ef64cf9
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.18.0@sha256:6f9091c3e7e4951c5e43fdafd505705fcc9f1ead290ee3ae42e97e9ec2b87b20
|
||||
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.19.0@sha256:5717919c75e609902c6d67138311a2a8fd07be822e2173f3802b67cf5f3486e9
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/ubuntu-container-disk:v1.30.1@sha256:07392e7a87a3d4ef1c86c1b146e6c5de5c2b524aed5a53bf48870dc8a296f99a
|
||||
ghcr.io/cozystack/cozystack/ubuntu-container-disk:v1.32@sha256:4a4f8bee150e04d1efcd5ff1ea83e12f495a98851cc5fd47ef41ac7aebce9b74
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# TODO: Here we use ubuntu:22.04, as guestfish has some network issues running in ubuntu:24.04
|
||||
FROM ubuntu:22.04 as guestfish
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
@@ -5,6 +6,7 @@ RUN apt-get update \
|
||||
&& apt-get -y install \
|
||||
libguestfs-tools \
|
||||
linux-image-generic \
|
||||
wget \
|
||||
make \
|
||||
bash-completion \
|
||||
&& apt-get clean
|
||||
@@ -13,7 +15,10 @@ WORKDIR /build
|
||||
|
||||
FROM guestfish as builder
|
||||
|
||||
RUN wget -O image.img https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img
|
||||
# noble is a code name for the Ubuntu 24.04 LTS release
|
||||
RUN wget -O image.img https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img --show-progress --output-file /dev/stdout --progress=dot:giga 2>/dev/null
|
||||
|
||||
ARG KUBERNETES_VERSION
|
||||
|
||||
RUN qemu-img resize image.img 5G \
|
||||
&& eval "$(guestfish --listen --network)" \
|
||||
@@ -26,8 +31,8 @@ RUN qemu-img resize image.img 5G \
|
||||
&& guestfish --remote sh "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg" \
|
||||
&& guestfish --remote sh 'echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list' \
|
||||
# kubernetes repo
|
||||
&& guestfish --remote sh "curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.30/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg" \
|
||||
&& guestfish --remote sh "echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.30/deb/ /' | tee /etc/apt/sources.list.d/kubernetes.list" \
|
||||
&& guestfish --remote sh "curl -fsSL https://pkgs.k8s.io/core:/stable:/${KUBERNETES_VERSION}/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg" \
|
||||
&& guestfish --remote sh "echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/${KUBERNETES_VERSION}/deb/ /' | tee /etc/apt/sources.list.d/kubernetes.list" \
|
||||
# install containerd
|
||||
&& guestfish --remote command "apt-get update -y" \
|
||||
&& guestfish --remote command "apt-get install -y containerd.io" \
|
||||
|
||||
@@ -39,6 +39,13 @@ spec:
|
||||
sockets: 1
|
||||
{{- end }}
|
||||
devices:
|
||||
{{- if .group.gpus }}
|
||||
gpus:
|
||||
{{- range $i, $gpu := .group.gpus }}
|
||||
- name: gpu{{ add $i 1 }}
|
||||
deviceName: {{ $gpu.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
disks:
|
||||
- name: system
|
||||
disk:
|
||||
@@ -103,22 +110,22 @@ metadata:
|
||||
kamaji.clastix.io/kubeconfig-secret-key: "super-admin.svc"
|
||||
spec:
|
||||
apiServer:
|
||||
{{- if .Values.kamajiControlPlane.apiServer.resources }}
|
||||
resources: {{- toYaml .Values.kamajiControlPlane.apiServer.resources | nindent 6 }}
|
||||
{{- else if ne .Values.kamajiControlPlane.apiServer.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.kamajiControlPlane.apiServer.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
{{- if .Values.controlPlane.apiServer.resources }}
|
||||
resources: {{- toYaml .Values.controlPlane.apiServer.resources | nindent 6 }}
|
||||
{{- else if ne .Values.controlPlane.apiServer.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.controlPlane.apiServer.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
{{- end }}
|
||||
controllerManager:
|
||||
{{- if .Values.kamajiControlPlane.controllerManager.resources }}
|
||||
resources: {{- toYaml .Values.kamajiControlPlane.controllerManager.resources | nindent 6 }}
|
||||
{{- else if ne .Values.kamajiControlPlane.controllerManager.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.kamajiControlPlane.controllerManager.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
{{- if .Values.controlPlane.controllerManager.resources }}
|
||||
resources: {{- toYaml .Values.controlPlane.controllerManager.resources | nindent 6 }}
|
||||
{{- else if ne .Values.controlPlane.controllerManager.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.controlPlane.controllerManager.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
{{- end }}
|
||||
scheduler:
|
||||
{{- if .Values.kamajiControlPlane.scheduler.resources }}
|
||||
resources: {{- toYaml .Values.kamajiControlPlane.scheduler.resources | nindent 6 }}
|
||||
{{- else if ne .Values.kamajiControlPlane.scheduler.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.kamajiControlPlane.scheduler.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
{{- if .Values.controlPlane.scheduler.resources }}
|
||||
resources: {{- toYaml .Values.controlPlane.scheduler.resources | nindent 6 }}
|
||||
{{- else if ne .Values.controlPlane.scheduler.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.controlPlane.scheduler.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
{{- end }}
|
||||
dataStoreName: "{{ $etcd }}"
|
||||
addons:
|
||||
@@ -128,10 +135,10 @@ spec:
|
||||
konnectivity:
|
||||
server:
|
||||
port: 8132
|
||||
{{- if .Values.kamajiControlPlane.addons.konnectivity.server.resources }}
|
||||
resources: {{- toYaml .Values.kamajiControlPlane.addons.konnectivity.server.resources | nindent 10 }}
|
||||
{{- else if ne .Values.kamajiControlPlane.addons.konnectivity.server.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.kamajiControlPlane.addons.konnectivity.server.resourcesPreset "Release" .Release) | nindent 10 }}
|
||||
{{- if .Values.controlPlane.konnectivity.server.resources }}
|
||||
resources: {{- toYaml .Values.controlPlane.konnectivity.server.resources | nindent 10 }}
|
||||
{{- else if ne .Values.controlPlane.konnectivity.server.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.controlPlane.konnectivity.server.resourcesPreset "Release" .Release) | nindent 10 }}
|
||||
{{- end }}
|
||||
kubelet:
|
||||
cgroupfs: systemd
|
||||
@@ -276,7 +283,7 @@ spec:
|
||||
kind: KubevirtMachineTemplate
|
||||
name: {{ $.Release.Name }}-{{ $groupName }}-{{ $kubevirtmachinetemplateHash }}
|
||||
namespace: {{ $.Release.Namespace }}
|
||||
version: v1.30.1
|
||||
version: v1.32.3
|
||||
---
|
||||
apiVersion: cluster.x-k8s.io/v1beta1
|
||||
kind: MachineHealthCheck
|
||||
|
||||
@@ -4,7 +4,7 @@ metadata:
|
||||
name: {{ .Release.Name }}-cert-manager-crds
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: cert-manager-crds
|
||||
@@ -16,6 +16,7 @@ spec:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '>= 0.0.0-0'
|
||||
kubeConfig:
|
||||
secretRef:
|
||||
name: {{ .Release.Name }}-admin-kubeconfig
|
||||
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
name: {{ .Release.Name }}-cert-manager
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: cert-manager
|
||||
@@ -17,6 +17,7 @@ spec:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '>= 0.0.0-0'
|
||||
kubeConfig:
|
||||
secretRef:
|
||||
name: {{ .Release.Name }}-admin-kubeconfig
|
||||
@@ -30,11 +31,9 @@ spec:
|
||||
upgrade:
|
||||
remediation:
|
||||
retries: -1
|
||||
{{- if .Values.addons.certManager.valuesOverride }}
|
||||
valuesFrom:
|
||||
- kind: Secret
|
||||
name: {{ .Release.Name }}-cert-manager-values-override
|
||||
valuesKey: values
|
||||
{{- with .Values.addons.certManager.valuesOverride }}
|
||||
values:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
dependsOn:
|
||||
@@ -47,13 +46,3 @@ spec:
|
||||
- name: {{ .Release.Name }}-cert-manager-crds
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
{{- if .Values.addons.certManager.valuesOverride }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-cert-manager-values-override
|
||||
stringData:
|
||||
values: |
|
||||
{{- toYaml .Values.addons.certManager.valuesOverride | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -1,10 +1,19 @@
|
||||
{{- define "cozystack.defaultCiliumValues" -}}
|
||||
cilium:
|
||||
k8sServiceHost: {{ .Release.Name }}.{{ .Release.Namespace }}.svc
|
||||
k8sServicePort: 6443
|
||||
routingMode: tunnel
|
||||
enableIPv4Masquerade: true
|
||||
ipv4NativeRoutingCIDR: ""
|
||||
{{- end }}
|
||||
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-cilium
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: cilium
|
||||
@@ -16,6 +25,7 @@ spec:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '>= 0.0.0-0'
|
||||
kubeConfig:
|
||||
secretRef:
|
||||
name: {{ .Release.Name }}-admin-kubeconfig
|
||||
@@ -30,12 +40,7 @@ spec:
|
||||
remediation:
|
||||
retries: -1
|
||||
values:
|
||||
cilium:
|
||||
k8sServiceHost: {{ .Release.Name }}.{{ .Release.Namespace }}.svc
|
||||
k8sServicePort: 6443
|
||||
routingMode: tunnel
|
||||
enableIPv4Masquerade: true
|
||||
ipv4NativeRoutingCIDR: ""
|
||||
{{- toYaml (deepCopy .Values.addons.cilium.valuesOverride | mergeOverwrite (fromYaml (include "cozystack.defaultCiliumValues" .))) | nindent 4 }}
|
||||
dependsOn:
|
||||
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
|
||||
- name: {{ .Release.Name }}
|
||||
|
||||
@@ -4,7 +4,7 @@ metadata:
|
||||
name: {{ .Release.Name }}-csi
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: csi
|
||||
@@ -16,6 +16,7 @@ spec:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '>= 0.0.0-0'
|
||||
kubeConfig:
|
||||
secretRef:
|
||||
name: {{ .Release.Name }}-admin-kubeconfig
|
||||
|
||||
@@ -20,7 +20,7 @@ spec:
|
||||
effect: "NoSchedule"
|
||||
containers:
|
||||
- name: kubectl
|
||||
image: docker.io/clastix/kubectl:v1.30.1
|
||||
image: docker.io/clastix/kubectl:v1.32
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
@@ -38,6 +38,7 @@ spec:
|
||||
{{ .Release.Name }}-ingress-nginx
|
||||
{{ .Release.Name }}-fluxcd-operator
|
||||
{{ .Release.Name }}-fluxcd
|
||||
{{ .Release.Name }}-gpu-operator
|
||||
-p '{"spec": {"suspend": true}}'
|
||||
--type=merge --field-manager=flux-client-side-apply || true
|
||||
---
|
||||
@@ -76,6 +77,7 @@ rules:
|
||||
- {{ .Release.Name }}-ingress-nginx
|
||||
- {{ .Release.Name }}-fluxcd-operator
|
||||
- {{ .Release.Name }}-fluxcd
|
||||
- {{ .Release.Name }}-gpu-operator
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
name: {{ .Release.Name }}-fluxcd-operator
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: fluxcd-operator
|
||||
@@ -17,6 +17,7 @@ spec:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '>= 0.0.0-0'
|
||||
kubeConfig:
|
||||
secretRef:
|
||||
name: {{ .Release.Name }}-admin-kubeconfig
|
||||
@@ -49,7 +50,7 @@ metadata:
|
||||
name: {{ .Release.Name }}-fluxcd
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: fluxcd
|
||||
@@ -61,6 +62,7 @@ spec:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '>= 0.0.0-0'
|
||||
kubeConfig:
|
||||
secretRef:
|
||||
name: {{ .Release.Name }}-kubeconfig
|
||||
@@ -73,11 +75,9 @@ spec:
|
||||
upgrade:
|
||||
remediation:
|
||||
retries: -1
|
||||
{{- if .Values.addons.fluxcd.valuesOverride }}
|
||||
valuesFrom:
|
||||
- kind: Secret
|
||||
name: {{ .Release.Name }}-fluxcd-values-override
|
||||
valuesKey: values
|
||||
{{- with .Values.addons.fluxcd.valuesOverride }}
|
||||
values:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
dependsOn:
|
||||
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
|
||||
@@ -89,14 +89,3 @@ spec:
|
||||
- name: {{ .Release.Name }}-fluxcd-operator
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.addons.fluxcd.valuesOverride }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-fluxcd-values-override
|
||||
stringData:
|
||||
values: |
|
||||
{{- toYaml .Values.addons.fluxcd.valuesOverride | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -0,0 +1,46 @@
|
||||
{{- if .Values.addons.gpuOperator.enabled }}
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-gpu-operator
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: gpu-operator
|
||||
chart:
|
||||
spec:
|
||||
chart: cozy-gpu-operator
|
||||
reconcileStrategy: Revision
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '>= 0.0.0-0'
|
||||
kubeConfig:
|
||||
secretRef:
|
||||
name: {{ .Release.Name }}-admin-kubeconfig
|
||||
key: super-admin.svc
|
||||
targetNamespace: cozy-gpu-operator
|
||||
storageNamespace: cozy-gpu-operator
|
||||
install:
|
||||
createNamespace: true
|
||||
remediation:
|
||||
retries: -1
|
||||
upgrade:
|
||||
remediation:
|
||||
retries: -1
|
||||
{{- with .Values.addons.gpuOperator.valuesOverride }}
|
||||
values:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
dependsOn:
|
||||
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
|
||||
- name: {{ .Release.Name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
- name: {{ .Release.Name }}-cilium
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
@@ -1,3 +1,15 @@
|
||||
{{- define "cozystack.defaultIngressValues" -}}
|
||||
ingress-nginx:
|
||||
fullnameOverride: ingress-nginx
|
||||
controller:
|
||||
kind: DaemonSet
|
||||
hostNetwork: true
|
||||
service:
|
||||
enabled: false
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/ingress-nginx: ""
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.addons.ingressNginx.enabled }}
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
@@ -5,7 +17,7 @@ metadata:
|
||||
name: {{ .Release.Name }}-ingress-nginx
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: ingress-nginx
|
||||
@@ -17,6 +29,7 @@ spec:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '>= 0.0.0-0'
|
||||
kubeConfig:
|
||||
secretRef:
|
||||
name: {{ .Release.Name }}-admin-kubeconfig
|
||||
@@ -31,21 +44,7 @@ spec:
|
||||
remediation:
|
||||
retries: -1
|
||||
values:
|
||||
ingress-nginx:
|
||||
fullnameOverride: ingress-nginx
|
||||
controller:
|
||||
kind: DaemonSet
|
||||
hostNetwork: true
|
||||
service:
|
||||
enabled: false
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/ingress-nginx: ""
|
||||
{{- if .Values.addons.ingressNginx.valuesOverride }}
|
||||
valuesFrom:
|
||||
- kind: Secret
|
||||
name: {{ .Release.Name }}-ingress-nginx-values-override
|
||||
valuesKey: values
|
||||
{{- end }}
|
||||
{{- toYaml (deepCopy .Values.addons.ingressNginx.valuesOverride | mergeOverwrite (fromYaml (include "cozystack.defaultIngressValues" .))) | nindent 4 }}
|
||||
dependsOn:
|
||||
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
|
||||
- name: {{ .Release.Name }}
|
||||
@@ -54,14 +53,3 @@ spec:
|
||||
- name: {{ .Release.Name }}-cilium
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.addons.ingressNginx.valuesOverride }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-ingress-nginx-values-override
|
||||
stringData:
|
||||
values: |
|
||||
{{- toYaml .Values.addons.ingressNginx.valuesOverride | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -7,7 +7,7 @@ metadata:
|
||||
name: {{ .Release.Name }}-monitoring-agents
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: cozy-monitoring-agents
|
||||
@@ -19,6 +19,7 @@ spec:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '>= 0.0.0-0'
|
||||
kubeConfig:
|
||||
secretRef:
|
||||
name: {{ .Release.Name }}-admin-kubeconfig
|
||||
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
name: {{ .Release.Name }}-vertical-pod-autoscaler-crds
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: vertical-pod-autoscaler-crds
|
||||
@@ -17,6 +17,7 @@ spec:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '>= 0.0.0-0'
|
||||
kubeConfig:
|
||||
secretRef:
|
||||
name: {{ .Release.Name }}-admin-kubeconfig
|
||||
|
||||
@@ -1,5 +1,28 @@
|
||||
{{- define "cozystack.defaultVPAValues" -}}
|
||||
{{- $myNS := lookup "v1" "Namespace" "" .Release.Namespace }}
|
||||
{{- $targetTenant := index $myNS.metadata.annotations "namespace.cozystack.io/monitoring" }}
|
||||
vertical-pod-autoscaler:
|
||||
recommender:
|
||||
extraArgs:
|
||||
container-name-label: container
|
||||
container-namespace-label: namespace
|
||||
container-pod-name-label: pod
|
||||
storage: prometheus
|
||||
memory-saver: true
|
||||
pod-label-prefix: label_
|
||||
metric-for-pod-labels: kube_pod_labels{job="kube-state-metrics", tenant="{{ .Release.Namespace }}", cluster="{{ .Release.Name }}"}[8d]
|
||||
pod-name-label: pod
|
||||
pod-namespace-label: namespace
|
||||
prometheus-address: http://vmselect-shortterm.{{ $targetTenant }}.svc.cozy.local:8481/select/0/prometheus/
|
||||
prometheus-cadvisor-job-name: cadvisor
|
||||
resources:
|
||||
limits:
|
||||
memory: 1600Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 1600Mi
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.addons.monitoringAgents.enabled }}
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
@@ -7,7 +30,7 @@ metadata:
|
||||
name: {{ .Release.Name }}-vertical-pod-autoscaler
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: vertical-pod-autoscaler
|
||||
@@ -19,6 +42,7 @@ spec:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '>= 0.0.0-0'
|
||||
kubeConfig:
|
||||
secretRef:
|
||||
name: {{ .Release.Name }}-admin-kubeconfig
|
||||
@@ -33,32 +57,7 @@ spec:
|
||||
remediation:
|
||||
retries: -1
|
||||
values:
|
||||
vertical-pod-autoscaler:
|
||||
recommender:
|
||||
extraArgs:
|
||||
container-name-label: container
|
||||
container-namespace-label: namespace
|
||||
container-pod-name-label: pod
|
||||
storage: prometheus
|
||||
memory-saver: true
|
||||
pod-label-prefix: label_
|
||||
metric-for-pod-labels: kube_pod_labels{job="kube-state-metrics", tenant="{{ .Release.Namespace }}", cluster="{{ .Release.Name }}"}[8d]
|
||||
pod-name-label: pod
|
||||
pod-namespace-label: namespace
|
||||
prometheus-address: http://vmselect-shortterm.{{ $targetTenant }}.svc.cozy.local:8481/select/0/prometheus/
|
||||
prometheus-cadvisor-job-name: cadvisor
|
||||
resources:
|
||||
limits:
|
||||
memory: 1600Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 1600Mi
|
||||
{{- if .Values.addons.verticalPodAutoscaler.valuesOverride }}
|
||||
valuesFrom:
|
||||
- kind: Secret
|
||||
name: {{ .Release.Name }}-vertical-pod-autoscaler-values-override
|
||||
valuesKey: values
|
||||
{{- end }}
|
||||
{{- toYaml (deepCopy .Values.addons.verticalPodAutoscaler.valuesOverride | mergeOverwrite (fromYaml (include "cozystack.defaultVPAValues" .))) | nindent 4 }}
|
||||
dependsOn:
|
||||
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
|
||||
- name: {{ .Release.Name }}
|
||||
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
name: {{ .Release.Name }}-cozy-victoria-metrics-operator
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: cozy-victoria-metrics-operator
|
||||
@@ -17,6 +17,7 @@ spec:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '>= 0.0.0-0'
|
||||
kubeConfig:
|
||||
secretRef:
|
||||
name: {{ .Release.Name }}-admin-kubeconfig
|
||||
|
||||
@@ -1,97 +1,237 @@
|
||||
{
|
||||
"title": "Chart Values",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"host": {
|
||||
"type": "string",
|
||||
"description": "The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host).",
|
||||
"default": ""
|
||||
"title": "Chart Values",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"host": {
|
||||
"type": "string",
|
||||
"description": "The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host).",
|
||||
"default": ""
|
||||
},
|
||||
"controlPlane": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"replicas": {
|
||||
"type": "number",
|
||||
"description": "Number of replicas for Kubernetes control-plane components",
|
||||
"default": 2
|
||||
},
|
||||
"controlPlane": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"replicas": {
|
||||
"type": "number",
|
||||
"description": "Number of replicas for Kubernetes contorl-plane components",
|
||||
"default": 2
|
||||
}
|
||||
"apiServer": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "small",
|
||||
"enum": [
|
||||
"none",
|
||||
"nano",
|
||||
"micro",
|
||||
"small",
|
||||
"medium",
|
||||
"large",
|
||||
"xlarge",
|
||||
"2xlarge"
|
||||
]
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"storageClass": {
|
||||
"type": "string",
|
||||
"description": "StorageClass used to store user data",
|
||||
"default": "replicated"
|
||||
},
|
||||
"addons": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"certManager": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enables the cert-manager",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
"type": "object",
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"ingressNginx": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role)",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
"type": "object",
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
},
|
||||
"hosts": {
|
||||
"type": "array",
|
||||
"description": "List of domain names that should be passed through to the cluster by upper cluster",
|
||||
"default": [],
|
||||
"items": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"fluxcd": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enables Flux CD",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
"type": "object",
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"monitoringAgents": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enables MonitoringAgents (fluentbit, vmagents for sending logs and metrics to storage) if tenant monitoring enabled, send to tenant storage, else to root storage",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
"type": "object",
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
"controllerManager": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "micro",
|
||||
"enum": [
|
||||
"none",
|
||||
"nano",
|
||||
"micro",
|
||||
"small",
|
||||
"medium",
|
||||
"large",
|
||||
"xlarge",
|
||||
"2xlarge"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"scheduler": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "micro",
|
||||
"enum": [
|
||||
"none",
|
||||
"nano",
|
||||
"micro",
|
||||
"small",
|
||||
"medium",
|
||||
"large",
|
||||
"xlarge",
|
||||
"2xlarge"
|
||||
]
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"konnectivity": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"server": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "micro",
|
||||
"enum": [
|
||||
"none",
|
||||
"nano",
|
||||
"micro",
|
||||
"small",
|
||||
"medium",
|
||||
"large",
|
||||
"xlarge",
|
||||
"2xlarge"
|
||||
]
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"storageClass": {
|
||||
"type": "string",
|
||||
"description": "StorageClass used to store user data",
|
||||
"default": "replicated"
|
||||
},
|
||||
"addons": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"certManager": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enables the cert-manager",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
"type": "object",
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"cilium": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"valuesOverride": {
|
||||
"type": "object",
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"ingressNginx": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role)",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
"type": "object",
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
},
|
||||
"hosts": {
|
||||
"type": "array",
|
||||
"description": "List of domain names that should be passed through to the cluster by upper cluster",
|
||||
"default": [],
|
||||
"items": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"gpuOperator": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enables the gpu-operator",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
"type": "object",
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"fluxcd": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enables Flux CD",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
"type": "object",
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"monitoringAgents": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enables MonitoringAgents (fluentbit, vmagents for sending logs and metrics to storage) if tenant monitoring enabled, send to tenant storage, else to root storage",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
"type": "object",
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"verticalPodAutoscaler": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"valuesOverride": {
|
||||
"type": "object",
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
## @section Common parameters
|
||||
|
||||
## @param host The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host).
|
||||
## @param controlPlane.replicas Number of replicas for Kubernetes contorl-plane components
|
||||
## @param controlPlane.replicas Number of replicas for Kubernetes control-plane components
|
||||
## @param storageClass StorageClass used to store user data
|
||||
##
|
||||
host: ""
|
||||
controlPlane:
|
||||
replicas: 2
|
||||
storageClass: replicated
|
||||
|
||||
## @param nodeGroups [object] nodeGroups configuration
|
||||
@@ -24,6 +22,14 @@ nodeGroups:
|
||||
cpu: ""
|
||||
memory: ""
|
||||
|
||||
## List of GPUs to attach (WARN: NVIDIA driver requires at least 4 GiB of RAM)
|
||||
## e.g:
|
||||
## instanceType: "u1.xlarge"
|
||||
## gpus:
|
||||
## - name: nvidia.com/AD102GL_L40S
|
||||
gpus: []
|
||||
|
||||
|
||||
## @section Cluster Addons
|
||||
##
|
||||
addons:
|
||||
@@ -36,6 +42,12 @@ addons:
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
|
||||
## Cilium CNI plugin
|
||||
##
|
||||
cilium:
|
||||
## @param addons.cilium.valuesOverride Custom values to override
|
||||
valuesOverride: {}
|
||||
|
||||
## Ingress-NGINX Controller
|
||||
##
|
||||
ingressNginx:
|
||||
@@ -52,6 +64,14 @@ addons:
|
||||
hosts: []
|
||||
valuesOverride: {}
|
||||
|
||||
## GPU-operator: NVIDIA GPU Operator
|
||||
##
|
||||
gpuOperator:
|
||||
## @param addons.gpuOperator.enabled Enables the gpu-operator
|
||||
## @param addons.gpuOperator.valuesOverride Custom values to override
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
|
||||
## Flux CD
|
||||
##
|
||||
fluxcd:
|
||||
@@ -77,62 +97,42 @@ addons:
|
||||
##
|
||||
valuesOverride: {}
|
||||
|
||||
## @section Kamaji control plane
|
||||
## @section Kubernetes control plane configuration
|
||||
##
|
||||
kamajiControlPlane:
|
||||
|
||||
controlPlane:
|
||||
replicas: 2
|
||||
|
||||
apiServer:
|
||||
## @param kamajiControlPlane.apiServer.resources Resources
|
||||
resources: {}
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 512Mi
|
||||
|
||||
## @param kamajiControlPlane.apiServer.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
## @param controlPlane.apiServer.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
## @param controlPlane.apiServer.resources Resources
|
||||
## e.g:
|
||||
## resources:
|
||||
## limits:
|
||||
## cpu: 4000m
|
||||
## memory: 4Gi
|
||||
## requests:
|
||||
## cpu: 100m
|
||||
## memory: 512Mi
|
||||
##
|
||||
resourcesPreset: "small"
|
||||
resources: {}
|
||||
|
||||
controllerManager:
|
||||
## @param kamajiControlPlane.controllerManager.resources Resources
|
||||
resources: {}
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 512Mi
|
||||
|
||||
## @param kamajiControlPlane.controllerManager.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
## @param controlPlane.controllerManager.resources Resources
|
||||
## @param controlPlane.controllerManager.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
resourcesPreset: "micro"
|
||||
resources: {}
|
||||
|
||||
scheduler:
|
||||
## @param kamajiControlPlane.scheduler.resources Resources
|
||||
resources: {}
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 512Mi
|
||||
|
||||
## @param kamajiControlPlane.scheduler.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
## @param controlPlane.scheduler.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
## @param controlPlane.scheduler.resources Resources
|
||||
resourcesPreset: "micro"
|
||||
addons:
|
||||
konnectivity:
|
||||
server:
|
||||
## @param kamajiControlPlane.addons.konnectivity.server.resources Resources
|
||||
resources: {}
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 512Mi
|
||||
|
||||
## @param kamajiControlPlane.addons.konnectivity.server.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
resourcesPreset: "micro"
|
||||
|
||||
resources: {}
|
||||
|
||||
konnectivity:
|
||||
server:
|
||||
## @param controlPlane.konnectivity.server.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
## @param controlPlane.konnectivity.server.resources Resources
|
||||
resourcesPreset: "micro"
|
||||
resources: {}
|
||||
|
||||
@@ -33,7 +33,7 @@ spec:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '*'
|
||||
version: '>= 0.0.0-0'
|
||||
interval: 1m0s
|
||||
timeout: 5m0s
|
||||
values:
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.10.0
|
||||
version: 0.10.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/postgres-backup:0.10.0@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f
|
||||
ghcr.io/cozystack/cozystack/postgres-backup:0.10.1@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f
|
||||
|
||||
@@ -13,9 +13,6 @@ spec:
|
||||
jobTemplate:
|
||||
spec:
|
||||
backoffLimit: 2
|
||||
template:
|
||||
spec:
|
||||
restartPolicy: OnFailure
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
@@ -24,7 +21,7 @@ spec:
|
||||
spec:
|
||||
imagePullSecrets:
|
||||
- name: {{ .Release.Name }}-regsecret
|
||||
restartPolicy: Never
|
||||
restartPolicy: OnFailure
|
||||
containers:
|
||||
- name: pgdump
|
||||
image: "{{ $.Files.Get "images/postgres-backup.tag" | trim }}"
|
||||
|
||||
@@ -4,4 +4,4 @@ description: Separated tenant namespace
|
||||
icon: /logos/tenant.svg
|
||||
|
||||
type: application
|
||||
version: 1.9.1
|
||||
version: 1.9.2
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cozy-tenant-configuration-hash
|
||||
namespace: {{ include "tenant.name" . }}
|
||||
data:
|
||||
cozyTenantConfigurationHash: {{ sha256sum (toJson .Values) | quote }}
|
||||
@@ -24,6 +24,7 @@ spec:
|
||||
ingress:
|
||||
- fromEntities:
|
||||
- world
|
||||
- cluster
|
||||
egress:
|
||||
- toEntities:
|
||||
- world
|
||||
|
||||
@@ -59,7 +59,8 @@ kubernetes 0.16.0 077045b0
|
||||
kubernetes 0.17.0 1fbbfcd0
|
||||
kubernetes 0.17.1 fd240701
|
||||
kubernetes 0.18.0 721c12a7
|
||||
kubernetes 0.18.1 HEAD
|
||||
kubernetes 0.19.0 93bdf411
|
||||
kubernetes 0.20.0 HEAD
|
||||
mysql 0.1.0 263e47be
|
||||
mysql 0.2.0 c24a103f
|
||||
mysql 0.3.0 53f2365e
|
||||
@@ -89,7 +90,8 @@ postgres 0.7.0 4b90bf5a
|
||||
postgres 0.7.1 1ec10165
|
||||
postgres 0.8.0 4e68e65c
|
||||
postgres 0.9.0 8267072d
|
||||
postgres 0.10.0 HEAD
|
||||
postgres 0.10.0 721c12a7
|
||||
postgres 0.10.1 HEAD
|
||||
rabbitmq 0.1.0 263e47be
|
||||
rabbitmq 0.2.0 53f2365e
|
||||
rabbitmq 0.3.0 6c5cf5bf
|
||||
@@ -130,7 +132,8 @@ tenant 1.6.8 bc95159a
|
||||
tenant 1.7.0 24fa7222
|
||||
tenant 1.8.0 160e4e2a
|
||||
tenant 1.9.0 728743db
|
||||
tenant 1.9.1 HEAD
|
||||
tenant 1.9.1 721c12a7
|
||||
tenant 1.9.2 HEAD
|
||||
virtual-machine 0.1.4 f2015d65
|
||||
virtual-machine 0.1.5 263e47be
|
||||
virtual-machine 0.2.0 c0685f43
|
||||
@@ -143,7 +146,8 @@ virtual-machine 0.7.1 0ab39f20
|
||||
virtual-machine 0.8.0 3fa4dd3a
|
||||
virtual-machine 0.8.1 93c46161
|
||||
virtual-machine 0.8.2 de19450f
|
||||
virtual-machine 0.9.0 HEAD
|
||||
virtual-machine 0.9.0 721c12a7
|
||||
virtual-machine 0.9.1 HEAD
|
||||
vm-disk 0.1.0 d971f2ff
|
||||
vm-disk 0.1.1 HEAD
|
||||
vm-instance 0.1.0 1ec10165
|
||||
@@ -153,7 +157,8 @@ vm-instance 0.4.0 e23286a3
|
||||
vm-instance 0.4.1 0ab39f20
|
||||
vm-instance 0.5.0 3fa4dd3a
|
||||
vm-instance 0.5.1 de19450f
|
||||
vm-instance 0.6.0 HEAD
|
||||
vm-instance 0.6.0 721c12a7
|
||||
vm-instance 0.6.1 HEAD
|
||||
vpn 0.1.0 263e47be
|
||||
vpn 0.2.0 53f2365e
|
||||
vpn 0.3.0 6c5cf5bf
|
||||
|
||||
@@ -17,7 +17,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.9.0
|
||||
version: 0.9.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -74,7 +74,8 @@ spec:
|
||||
{{- if .Values.gpus }}
|
||||
gpus:
|
||||
{{- range $i, $gpu := .Values.gpus }}
|
||||
- deviceName: {{ $gpu.name }}
|
||||
- name: gpu{{ add $i 1 }}
|
||||
deviceName: {{ $gpu.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
disks:
|
||||
|
||||
@@ -17,7 +17,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.6.0
|
||||
version: 0.6.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -46,7 +46,8 @@ spec:
|
||||
{{- if .Values.gpus }}
|
||||
gpus:
|
||||
{{- range $i, $gpu := .Values.gpus }}
|
||||
- deviceName: {{ $gpu.name }}
|
||||
- name: gpu{{ add $i 1 }}
|
||||
deviceName: {{ $gpu.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
disks:
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
cozystack:
|
||||
image: ghcr.io/cozystack/cozystack/installer:v0.30.2@sha256:59996588b5d59b5593fb34442b2f2ed8ef466d138b229a8d37beb6f70141a690
|
||||
image: ghcr.io/cozystack/cozystack/installer:v0.31.0-rc.1@sha256:ab0e8fd97632ba784a42a3d0714806ea327440f82ffa5c4896a87c5fb7c1ec6e
|
||||
|
||||
@@ -161,7 +161,7 @@ releases:
|
||||
releaseName: piraeus-operator
|
||||
chart: cozy-piraeus-operator
|
||||
namespace: cozy-linstor
|
||||
dependsOn: [cilium,cert-manager,victoria-metrics-operator]
|
||||
dependsOn: [cilium,cert-manager]
|
||||
|
||||
- name: snapshot-controller
|
||||
releaseName: snapshot-controller
|
||||
|
||||
@@ -134,6 +134,11 @@ releases:
|
||||
namespace: cozy-kubevirt
|
||||
privileged: true
|
||||
dependsOn: [cilium,kubeovn,kubevirt-operator]
|
||||
{{- $cpuAllocationRatio := index $cozyConfig.data "cpu-allocation-ratio" }}
|
||||
{{- if $cpuAllocationRatio }}
|
||||
values:
|
||||
cpuAllocationRatio: {{ $cpuAllocationRatio }}
|
||||
{{- end }}
|
||||
|
||||
- name: kubevirt-instancetypes
|
||||
releaseName: kubevirt-instancetypes
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
{{- $host = index $cozyConfig.data "root-host" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- $tenantRoot := list }}
|
||||
{{- $tenantRoot := dict }}
|
||||
{{- if .Capabilities.APIVersions.Has "helm.toolkit.fluxcd.io/v2" }}
|
||||
{{- $tenantRoot = lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" "tenant-root" "tenant-root" }}
|
||||
{{- end }}
|
||||
@@ -37,7 +37,7 @@ metadata:
|
||||
labels:
|
||||
cozystack.io/ui: "true"
|
||||
spec:
|
||||
interval: 1m
|
||||
interval: 0s
|
||||
releaseName: tenant-root
|
||||
install:
|
||||
remediation:
|
||||
@@ -54,12 +54,6 @@ spec:
|
||||
namespace: cozy-public
|
||||
values:
|
||||
host: "{{ $host }}"
|
||||
valuesFrom:
|
||||
- kind: ConfigMap
|
||||
name: "cozy-system-configuration-hash"
|
||||
valuesKey: "cozyTenantConfigurationHash"
|
||||
targetPath: "cozyTenantConfigurationHash"
|
||||
optional: true
|
||||
dependsOn:
|
||||
{{- range $x := $bundle.releases }}
|
||||
{{- if has $x.name (list "cilium" "kubeovn") }}
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
{{- $rootTenantConfiguration := dict "values" .Values }}
|
||||
{{- $cozyConfig := index (lookup "v1" "ConfigMap" "cozy-system" "cozystack" ) "data" }}
|
||||
{{- $cozyScheduling := index (lookup "v1" "ConfigMap" "cozy-system" "cozystack-scheduling") "data" }}
|
||||
{{- $cozyBranding := index (lookup "v1" "ConfigMap" "cozy-system" "cozystack-branding" ) "data" }}
|
||||
{{- $_ := set $rootTenantConfiguration "config" $cozyConfig }}
|
||||
{{- $_ := set $rootTenantConfiguration "scheduling" $cozyScheduling }}
|
||||
{{- $_ := set $rootTenantConfiguration "branding" $cozyBranding }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cozy-system-configuration-hash
|
||||
namespace: tenant-root
|
||||
data:
|
||||
cozyTenantConfigurationHash: {{ sha256sum (toJson $rootTenantConfiguration) | quote }}
|
||||
@@ -55,6 +55,7 @@ spec:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '>= 0.0.0-0'
|
||||
{{- with $x.valuesFiles }}
|
||||
valuesFiles:
|
||||
{{- toYaml $x.valuesFiles | nindent 6 }}
|
||||
|
||||
@@ -11,14 +11,6 @@ include ../../../scripts/common-envs.mk
|
||||
|
||||
help: ## Show this help.
|
||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
||||
show:
|
||||
helm template -n $(NAMESPACE) $(NAME) .
|
||||
|
||||
apply: ## Create sandbox in existing Kubernetes cluster.
|
||||
helm template -n $(NAMESPACE) $(NAME) . | kubectl apply -f -
|
||||
|
||||
diff:
|
||||
helm template -n $(NAMESPACE) $(NAME) . | kubectl diff -f -
|
||||
|
||||
image: image-e2e-sandbox
|
||||
|
||||
@@ -39,26 +31,11 @@ image-e2e-sandbox:
|
||||
test: ## Run the end-to-end tests in existing sandbox.
|
||||
docker exec "${SANDBOX_NAME}" sh -c 'cd /workspace && export COZYSTACK_INSTALLER_YAML=$$(helm template -n cozy-system installer ./packages/core/installer) && hack/e2e.sh'
|
||||
|
||||
test-applications: ## Run the end-to-end tests in existing sandbox for applications.
|
||||
for app in $(TESTING_APPS); do \
|
||||
docker exec ${SANDBOX_NAME} bash -c "/hack/e2e.application.sh $${app}"; \
|
||||
done
|
||||
docker exec ${SANDBOX_NAME} bash -c "kubectl get hr -A | grep -v 'True'"
|
||||
|
||||
delete: ## Remove sandbox from existing Kubernetes cluster.
|
||||
docker rm -f "${SANDBOX_NAME}" || true
|
||||
|
||||
exec: ## Opens an interactive shell in the sandbox container.
|
||||
docker exec -ti "${SANDBOX_NAME}" -- bash
|
||||
|
||||
proxy: sync-hosts ## Enable a SOCKS5 proxy server; mirrord and gost must be installed.
|
||||
mirrord exec --target deploy/cozystack-e2e-sandbox --target-namespace cozy-e2e-tests -- gost -L=127.0.0.1:10080
|
||||
|
||||
login: ## Downloads the kubeconfig into a temporary directory and runs a shell with the sandbox environment; mirrord must be installed.
|
||||
mirrord exec --target deploy/cozystack-e2e-sandbox --target-namespace cozy-e2e-tests -- "$$SHELL"
|
||||
|
||||
sync-hosts:
|
||||
kubectl exec -n $(NAMESPACE) deploy/cozystack-e2e-$(NAME) -- sh -c 'kubectl get ing -A -o go-template='\''{{ "127.0.0.1 localhost\n"}}{{ range .items }}{{ range .status.loadBalancer.ingress }}{{ .ip }}{{ end }} {{ range .spec.rules }}{{ .host }}{{ end }}{{ "\n" }}{{ end }}'\'' > /etc/hosts'
|
||||
docker exec -ti "${SANDBOX_NAME}" bash
|
||||
|
||||
apply: delete
|
||||
docker run -d --rm --name "${SANDBOX_NAME}" --privileged "$$(yq .e2e.image values.yaml)" sleep infinity
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
e2e:
|
||||
image: ghcr.io/cozystack/cozystack/e2e-sandbox:v0.30.2@sha256:31273d6b42dc88c2be2ff9ba64564d1b12e70ae8a5480953341b0d113ac7d4bd
|
||||
image: ghcr.io/cozystack/cozystack/e2e-sandbox:v0.31.0-rc.1@sha256:a20a6834527ccfc8daf7413a15234f3f7dbbd7774810c8e1966736d487ef7d0c
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/matchbox:v0.30.2@sha256:307d382f75f1dcb39820c73b93b2ce576cdb6d58032679bda7d926999c677900
|
||||
ghcr.io/cozystack/cozystack/matchbox:v0.31.0-rc.1@sha256:de69166fd6efec988cad7ad5be41bbb57c8134508c531d7496fc7f15772e4993
|
||||
|
||||
@@ -3,4 +3,4 @@ name: info
|
||||
description: Info
|
||||
icon: /logos/info.svg
|
||||
type: application
|
||||
version: 1.0.0
|
||||
version: 1.0.1
|
||||
|
||||
@@ -11,6 +11,13 @@
|
||||
{{- $k8sClient := index $k8sClientSecret.data "client-secret-key" | b64dec }}
|
||||
{{- $rootSaConfigMap := lookup "v1" "ConfigMap" "kube-system" "kube-root-ca.crt" }}
|
||||
{{- $k8sCa := index $rootSaConfigMap.data "ca.crt" | b64enc }}
|
||||
|
||||
{{- if .Capabilities.APIVersions.Has "helm.toolkit.fluxcd.io/v2" }}
|
||||
{{- $tenantRoot := lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" "tenant-root" "tenant-root" }}
|
||||
{{- if and $tenantRoot $tenantRoot.spec $tenantRoot.spec.values $tenantRoot.spec.values.host }}
|
||||
{{- $host = $tenantRoot.spec.values.host }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
|
||||
@@ -3,4 +3,4 @@ name: ingress
|
||||
description: NGINX Ingress Controller
|
||||
icon: /logos/ingress-nginx.svg
|
||||
type: application
|
||||
version: 1.4.0
|
||||
version: 1.6.0
|
||||
|
||||
@@ -4,12 +4,14 @@
|
||||
|
||||
### Common parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ---------------- | ----------------------------------------------------------------- | ------- |
|
||||
| `replicas` | Number of ingress-nginx replicas | `2` |
|
||||
| `externalIPs` | List of externalIPs for service. | `[]` |
|
||||
| `whitelist` | List of client networks | `[]` |
|
||||
| `clouflareProxy` | Restoring original visitor IPs when Cloudflare proxied is enabled | `false` |
|
||||
| `dashboard` | Should ingress serve Cozystack service dashboard | `false` |
|
||||
| `cdiUploadProxy` | Should ingress serve CDI upload proxy | `false` |
|
||||
| Name | Description | Value |
|
||||
| ----------------- | ----------------------------------------------------------------- | ------- |
|
||||
| `replicas` | Number of ingress-nginx replicas | `2` |
|
||||
| `externalIPs` | List of externalIPs for service. | `[]` |
|
||||
| `whitelist` | List of client networks | `[]` |
|
||||
| `clouflareProxy` | Restoring original visitor IPs when Cloudflare proxied is enabled | `false` |
|
||||
| `dashboard` | Should ingress serve Cozystack service dashboard | `false` |
|
||||
| `cdiUploadProxy` | Should ingress serve CDI upload proxy | `false` |
|
||||
| `virtExportProxy` | Should ingress serve KubeVirt export proxy | `false` |
|
||||
| `api` | Should ingress serve Cozystack API | `true` |
|
||||
|
||||
|
||||
29
packages/extra/ingress/templates/api.yaml
Normal file
29
packages/extra/ingress/templates/api.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
|
||||
{{- $issuerType := (index $cozyConfig.data "clusterissuer") | default "http01" }}
|
||||
|
||||
{{- $myNS := lookup "v1" "Namespace" "" .Release.Namespace }}
|
||||
{{- $host := index $myNS.metadata.annotations "namespace.cozystack.io/host" }}
|
||||
|
||||
{{- if .Values.api }}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/backend-protocol: HTTPS
|
||||
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
|
||||
name: api-{{ .Release.Namespace }}
|
||||
namespace: default
|
||||
spec:
|
||||
ingressClassName: {{ .Release.Namespace }}
|
||||
rules:
|
||||
- host: api.{{ $host }}
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
service:
|
||||
name: kubernetes
|
||||
port:
|
||||
number: 443
|
||||
path: /
|
||||
pathType: Prefix
|
||||
{{- end }}
|
||||
@@ -10,11 +10,7 @@ kind: Ingress
|
||||
metadata:
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/backend-protocol: HTTPS
|
||||
cert-manager.io/cluster-issuer: letsencrypt-prod
|
||||
{{- if eq $issuerType "cloudflare" }}
|
||||
{{- else }}
|
||||
acme.cert-manager.io/http01-ingress-class: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
|
||||
name: cdi-uploadproxy-{{ .Release.Namespace }}
|
||||
namespace: cozy-kubevirt-cdi
|
||||
spec:
|
||||
@@ -30,8 +26,4 @@ spec:
|
||||
number: 443
|
||||
path: /
|
||||
pathType: Prefix
|
||||
tls:
|
||||
- hosts:
|
||||
- cdi-uploadproxy.{{ $host }}
|
||||
secretName: cdi-uploadproxy-{{ .Release.Namespace }}-tls
|
||||
{{- end }}
|
||||
|
||||
@@ -4,6 +4,15 @@
|
||||
{{- $myNS := lookup "v1" "Namespace" "" .Release.Namespace }}
|
||||
{{- $host := index $myNS.metadata.annotations "namespace.cozystack.io/host" }}
|
||||
|
||||
{{- $tenantRoot := dict }}
|
||||
{{- if .Capabilities.APIVersions.Has "helm.toolkit.fluxcd.io/v2" }}
|
||||
{{- $tenantRoot = lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" "tenant-root" "tenant-root" }}
|
||||
{{- end }}
|
||||
{{- if and $tenantRoot $tenantRoot.spec $tenantRoot.spec.values $tenantRoot.spec.values.host }}
|
||||
{{- $host = $tenantRoot.spec.values.host }}
|
||||
{{- else }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.dashboard }}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
|
||||
@@ -11,7 +11,7 @@ spec:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '*'
|
||||
version: '>= 0.0.0-0'
|
||||
interval: 1m0s
|
||||
timeout: 5m0s
|
||||
values:
|
||||
|
||||
29
packages/extra/ingress/templates/vm-exportproxy.yaml
Normal file
29
packages/extra/ingress/templates/vm-exportproxy.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
|
||||
{{- $issuerType := (index $cozyConfig.data "clusterissuer") | default "http01" }}
|
||||
|
||||
{{- $myNS := lookup "v1" "Namespace" "" .Release.Namespace }}
|
||||
{{- $host := index $myNS.metadata.annotations "namespace.cozystack.io/host" }}
|
||||
|
||||
{{- if .Values.virtExportProxy }}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/backend-protocol: HTTPS
|
||||
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
|
||||
name: virt-exportproxy-{{ .Release.Namespace }}
|
||||
namespace: cozy-kubevirt
|
||||
spec:
|
||||
ingressClassName: {{ .Release.Namespace }}
|
||||
rules:
|
||||
- host: virt-exportproxy.{{ $host }}
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
service:
|
||||
name: virt-exportproxy
|
||||
port:
|
||||
number: 443
|
||||
path: /
|
||||
pathType: ImplementationSpecific
|
||||
{{- end }}
|
||||
@@ -35,6 +35,16 @@
|
||||
"type": "boolean",
|
||||
"description": "Should ingress serve CDI upload proxy",
|
||||
"default": false
|
||||
},
|
||||
"virtExportProxy": {
|
||||
"type": "boolean",
|
||||
"description": "Should ingress serve KubeVirt export proxy",
|
||||
"default": false
|
||||
},
|
||||
"api": {
|
||||
"type": "boolean",
|
||||
"description": "Should ingress serve Cozystack API",
|
||||
"default": true
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -30,3 +30,9 @@ dashboard: false
|
||||
|
||||
## @param cdiUploadProxy Should ingress serve CDI upload proxy
|
||||
cdiUploadProxy: false
|
||||
|
||||
## @param virtExportProxy Should ingress serve KubeVirt export proxy
|
||||
virtExportProxy: false
|
||||
|
||||
## @param api Should ingress serve Cozystack API
|
||||
api: true
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/grafana:1.9.2@sha256:c63978e1ed0304e8518b31ddee56c4e8115541b997d8efbe1c0a74da57140399
|
||||
ghcr.io/cozystack/cozystack/grafana:1.9.2@sha256:66c4547efd18b4d7475ff73b2c4e2f39e9b4471d55e85237e2fe3e87af05c302
|
||||
|
||||
@@ -14,7 +14,7 @@ spec:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '*'
|
||||
version: '>= 0.0.0-0'
|
||||
interval: 1m0s
|
||||
timeout: 5m0s
|
||||
values:
|
||||
|
||||
@@ -11,12 +11,15 @@ etcd 2.5.0 24fa7222
|
||||
etcd 2.6.0 8c460528
|
||||
etcd 2.6.1 45a7416c
|
||||
etcd 2.7.0 HEAD
|
||||
info 1.0.0 HEAD
|
||||
info 1.0.0 93bdf411
|
||||
info 1.0.1 HEAD
|
||||
ingress 1.0.0 d7cfa53c
|
||||
ingress 1.1.0 5bbc488e
|
||||
ingress 1.2.0 28fca4ef
|
||||
ingress 1.3.0 fde4bcfa
|
||||
ingress 1.4.0 HEAD
|
||||
ingress 1.4.0 fd240701
|
||||
ingress 1.5.0 93bdf411
|
||||
ingress 1.6.0 HEAD
|
||||
monitoring 1.0.0 d7cfa53c
|
||||
monitoring 1.1.0 25221fdc
|
||||
monitoring 1.2.0 f81be075
|
||||
|
||||
@@ -5,7 +5,7 @@ include ../../scripts/common-envs.mk
|
||||
repo:
|
||||
rm -rf "$(OUT)"
|
||||
mkdir -p "$(OUT)"
|
||||
helm package -d "$(OUT)" $$(find . -mindepth 2 -maxdepth 2 -name Chart.yaml | awk 'sub("/Chart.yaml", "")') --version $(VERSION)
|
||||
helm package -d "$(OUT)" $$(find . -mindepth 2 -maxdepth 2 -name Chart.yaml | awk 'sub("/Chart.yaml", "")') --version $(COZYSTACK_VERSION)
|
||||
cd "$(OUT)" && helm repo index .
|
||||
|
||||
fix-chartnames:
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/s3manager:v0.5.0@sha256:a47d2743d01bff0ce60aa745fdff54f9b7184dff8679b11ab4ecd08ac663012b
|
||||
ghcr.io/cozystack/cozystack/s3manager:v0.5.0@sha256:67e4a5da0ab43d93e8b75094d5a2db8159cb927a47b94f945f80d0ffb93d3301
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
apiVersion: v2
|
||||
appVersion: 0.18.1
|
||||
appVersion: 0.19.0
|
||||
description: Cluster API Operator
|
||||
name: cluster-api-operator
|
||||
type: application
|
||||
version: 0.18.1
|
||||
version: 0.19.0
|
||||
|
||||
@@ -1,26 +1,8 @@
|
||||
# Addon provider
|
||||
{{- if .Values.addon }}
|
||||
{{- $addons := split ";" .Values.addon }}
|
||||
{{- $addonNamespace := "" }}
|
||||
{{- $addonName := "" }}
|
||||
{{- $addonVersion := "" }}
|
||||
{{- range $addon := $addons }}
|
||||
{{- $addonArgs := split ":" $addon }}
|
||||
{{- $addonArgsLen := len $addonArgs }}
|
||||
{{- if eq $addonArgsLen 3 }}
|
||||
{{- $addonNamespace = $addonArgs._0 }}
|
||||
{{- $addonName = $addonArgs._1 }}
|
||||
{{- $addonVersion = $addonArgs._2 }}
|
||||
{{- else if eq $addonArgsLen 2 }}
|
||||
{{- $addonNamespace = print $addonArgs._0 "-addon-system" }}
|
||||
{{- $addonName = $addonArgs._0 }}
|
||||
{{- $addonVersion = $addonArgs._1 }}
|
||||
{{- else if eq $addonArgsLen 1 }}
|
||||
{{- $addonNamespace = print $addonArgs._0 "-addon-system" }}
|
||||
{{- $addonName = $addonArgs._0 }}
|
||||
{{- else }}
|
||||
{{- fail "addon provider argument should have the following format helm:v1.0.0 or mynamespace:helm:v1.0.0" }}
|
||||
{{- end }}
|
||||
{{- range $name, $addon := $.Values.addon }}
|
||||
{{- $addonNamespace := default ( printf "%s-%s" $name "addon-system" ) (get $addon "namespace") }}
|
||||
{{- $addonName := $name }}
|
||||
{{- $addonVersion := get $addon "version" }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
@@ -56,5 +38,24 @@ spec:
|
||||
{{- if $.Values.secretNamespace }}
|
||||
secretNamespace: {{ $.Values.secretNamespace }}
|
||||
{{- end }}
|
||||
{{- if $addon.manifestPatches }}
|
||||
manifestPatches: {{ toYaml $addon.manifestPatches | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $addon.additionalManifests }}
|
||||
additionalManifests:
|
||||
name: {{ $addon.additionalManifests.name }}
|
||||
{{- if $addon.additionalManifests.namespace }}
|
||||
namespace: {{ $addon.additionalManifests.namespace }}
|
||||
{{- end }} {{/* if $addon.additionalManifests.namespace */}}
|
||||
{{- end }}
|
||||
{{- if $addon.additionalManifests }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ $addon.additionalManifests.name }}
|
||||
namespace: {{ default $addonNamespace $addon.additionalManifests.namespace }}
|
||||
data:
|
||||
manifests: {{- toYaml $addon.additionalManifests.manifests | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }} {{/* range $name, $addon := .Values.addon */}}
|
||||
|
||||
@@ -1,26 +1,8 @@
|
||||
# Bootstrap provider
|
||||
{{- if .Values.bootstrap }}
|
||||
{{- $bootstraps := split ";" .Values.bootstrap }}
|
||||
{{- $bootstrapNamespace := "" }}
|
||||
{{- $bootstrapName := "" }}
|
||||
{{- $bootstrapVersion := "" }}
|
||||
{{- range $bootstrap := $bootstraps }}
|
||||
{{- $bootstrapArgs := split ":" $bootstrap }}
|
||||
{{- $bootstrapArgsLen := len $bootstrapArgs }}
|
||||
{{- if eq $bootstrapArgsLen 3 }}
|
||||
{{- $bootstrapNamespace = $bootstrapArgs._0 }}
|
||||
{{- $bootstrapName = $bootstrapArgs._1 }}
|
||||
{{- $bootstrapVersion = $bootstrapArgs._2 }}
|
||||
{{- else if eq $bootstrapArgsLen 2 }}
|
||||
{{- $bootstrapNamespace = print $bootstrapArgs._0 "-bootstrap-system" }}
|
||||
{{- $bootstrapName = $bootstrapArgs._0 }}
|
||||
{{- $bootstrapVersion = $bootstrapArgs._1 }}
|
||||
{{- else if eq $bootstrapArgsLen 1 }}
|
||||
{{- $bootstrapNamespace = print $bootstrapArgs._0 "-bootstrap-system" }}
|
||||
{{- $bootstrapName = $bootstrapArgs._0 }}
|
||||
{{- else }}
|
||||
{{- fail "bootstrap provider argument should have the following format kubeadm:v1.0.0 or mynamespace:kubeadm:v1.0.0" }}
|
||||
{{- end }}
|
||||
{{- range $name, $bootstrap := $.Values.bootstrap }}
|
||||
{{- $bootstrapNamespace := default ( printf "%s-%s" $name "bootstrap-system" ) (get $bootstrap "namespace") }}
|
||||
{{- $bootstrapName := $name }}
|
||||
{{- $bootstrapVersion := get $bootstrap "version" }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
@@ -57,5 +39,24 @@ spec:
|
||||
namespace: {{ $.Values.configSecret.namespace }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $bootstrap.manifestPatches }}
|
||||
manifestPatches: {{ toYaml $bootstrap.manifestPatches | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $bootstrap.additionalManifests }}
|
||||
additionalManifests:
|
||||
name: {{ $bootstrap.additionalManifests.name }}
|
||||
{{- if $bootstrap.additionalManifests.namespace }}
|
||||
namespace: {{ $bootstrap.additionalManifests.namespace }}
|
||||
{{- end }} {{/* if $bootstrap.additionalManifests.namespace */}}
|
||||
{{- end }}
|
||||
{{- if $bootstrap.additionalManifests }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ $bootstrap.additionalManifests.name }}
|
||||
namespace: {{ default $bootstrapNamespace $bootstrap.additionalManifests.namespace }}
|
||||
data:
|
||||
manifests: {{- toYaml $bootstrap.additionalManifests.manifests | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }} {{/* range $name, $bootstrap := .Values.bootstrap */}}
|
||||
|
||||
@@ -1,26 +1,8 @@
|
||||
# Control plane provider
|
||||
{{- if .Values.controlPlane }}
|
||||
{{- $controlPlanes := split ";" .Values.controlPlane }}
|
||||
{{- $controlPlaneNamespace := "" }}
|
||||
{{- $controlPlaneName := "" }}
|
||||
{{- $controlPlaneVersion := "" }}
|
||||
{{- range $controlPlane := $controlPlanes }}
|
||||
{{- $controlPlaneArgs := split ":" $controlPlane }}
|
||||
{{- $controlPlaneArgsLen := len $controlPlaneArgs }}
|
||||
{{- if eq $controlPlaneArgsLen 3 }}
|
||||
{{- $controlPlaneNamespace = $controlPlaneArgs._0 }}
|
||||
{{- $controlPlaneName = $controlPlaneArgs._1 }}
|
||||
{{- $controlPlaneVersion = $controlPlaneArgs._2 }}
|
||||
{{- else if eq $controlPlaneArgsLen 2 }}
|
||||
{{- $controlPlaneNamespace = print $controlPlaneArgs._0 "-control-plane-system" }}
|
||||
{{- $controlPlaneName = $controlPlaneArgs._0 }}
|
||||
{{- $controlPlaneVersion = $controlPlaneArgs._1 }}
|
||||
{{- else if eq $controlPlaneArgsLen 1 }}
|
||||
{{- $controlPlaneNamespace = print $controlPlaneArgs._0 "-control-plane-system" }}
|
||||
{{- $controlPlaneName = $controlPlaneArgs._0 }}
|
||||
{{- else }}
|
||||
{{- fail "controlplane provider argument should have the following format kubeadm:v1.0.0 or mynamespace:kubeadm:v1.0.0" }}
|
||||
{{- end }}
|
||||
{{- range $name, $controlPlane := $.Values.controlPlane }}
|
||||
{{- $controlPlaneNamespace := default ( printf "%s-%s" $name "control-plane-system" ) (get $controlPlane "namespace") }}
|
||||
{{- $controlPlaneName := $name }}
|
||||
{{- $controlPlaneVersion := get $controlPlane "version" }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
@@ -70,5 +52,24 @@ spec:
|
||||
namespace: {{ $.Values.configSecret.namespace }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $controlPlane.manifestPatches }}
|
||||
manifestPatches: {{ toYaml $controlPlane.manifestPatches | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $controlPlane.additionalManifests }}
|
||||
additionalManifests:
|
||||
name: {{ $controlPlane.additionalManifests.name }}
|
||||
{{- if $controlPlane.additionalManifests.namespace }}
|
||||
namespace: {{ $controlPlane.additionalManifests.namespace }}
|
||||
{{- end }} {{/* if $controlPlane.additionalManifests.namespace */}}
|
||||
{{- end }}
|
||||
{{- if $controlPlane.additionalManifests }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ $controlPlane.additionalManifests.name }}
|
||||
namespace: {{ default $controlPlaneNamespace $controlPlane.additionalManifests.namespace }}
|
||||
data:
|
||||
manifests: {{- toYaml $controlPlane.additionalManifests.manifests | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }} {{/* range $name, $controlPlane := .Values.controlPlane */}}
|
||||
|
||||
@@ -1,25 +1,8 @@
|
||||
# Core provider
|
||||
{{- if .Values.core }}
|
||||
{{- $coreArgs := split ":" .Values.core }}
|
||||
{{- $coreArgsLen := len $coreArgs }}
|
||||
{{- $coreVersion := "" }}
|
||||
{{- $coreNamespace := "" }}
|
||||
{{- $coreName := "" }}
|
||||
{{- $coreVersion := "" }}
|
||||
{{- if eq $coreArgsLen 3 }}
|
||||
{{- $coreNamespace = $coreArgs._0 }}
|
||||
{{- $coreName = $coreArgs._1 }}
|
||||
{{- $coreVersion = $coreArgs._2 }}
|
||||
{{- else if eq $coreArgsLen 2 }}
|
||||
{{- $coreNamespace = "capi-system" }}
|
||||
{{- $coreName = $coreArgs._0 }}
|
||||
{{- $coreVersion = $coreArgs._1 }}
|
||||
{{- else if eq $coreArgsLen 1 }}
|
||||
{{- $coreNamespace = "capi-system" }}
|
||||
{{- $coreName = $coreArgs._0 }}
|
||||
{{- else }}
|
||||
{{- fail "core provider argument should have the following format cluster-api:v1.0.0 or mynamespace:cluster-api:v1.0.0" }}
|
||||
{{- end }}
|
||||
{{- range $name, $core := $.Values.core }}
|
||||
{{- $coreNamespace := default "capi-system" (get $core "namespace") }}
|
||||
{{- $coreName := $name }}
|
||||
{{- $coreVersion := get $core "version" }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
@@ -65,4 +48,24 @@ spec:
|
||||
namespace: {{ $.Values.configSecret.namespace }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $core.manifestPatches }}
|
||||
manifestPatches: {{ toYaml $core.manifestPatches | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $core.additionalManifests }}
|
||||
additionalManifests:
|
||||
name: {{ $core.additionalManifests.name }}
|
||||
{{- if $core.additionalManifests.namespace }}
|
||||
namespace: {{ $core.additionalManifests.namespace }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $core.additionalManifests }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ $core.additionalManifests.name }}
|
||||
namespace: {{ default $coreNamespace $core.additionalManifests.namespace }}
|
||||
data:
|
||||
manifests: {{- toYaml $core.additionalManifests.manifests | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }} {{/* range $name, $core := .Values.core */}}
|
||||
|
||||
@@ -1,26 +1,8 @@
|
||||
# Infrastructure providers
|
||||
{{- if .Values.infrastructure }}
|
||||
{{- $infrastructures := split ";" .Values.infrastructure }}
|
||||
{{- $infrastructureNamespace := "" }}
|
||||
{{- $infrastructureName := "" }}
|
||||
{{- $infrastructureVersion := "" }}
|
||||
{{- range $infrastructure := $infrastructures }}
|
||||
{{- $infrastructureArgs := split ":" $infrastructure }}
|
||||
{{- $infrastructureArgsLen := len $infrastructureArgs }}
|
||||
{{- if eq $infrastructureArgsLen 3 }}
|
||||
{{- $infrastructureNamespace = $infrastructureArgs._0 }}
|
||||
{{- $infrastructureName = $infrastructureArgs._1 }}
|
||||
{{- $infrastructureVersion = $infrastructureArgs._2 }}
|
||||
{{- else if eq $infrastructureArgsLen 2 }}
|
||||
{{- $infrastructureNamespace = print $infrastructureArgs._0 "-infrastructure-system" }}
|
||||
{{- $infrastructureName = $infrastructureArgs._0 }}
|
||||
{{- $infrastructureVersion = $infrastructureArgs._1 }}
|
||||
{{- else if eq $infrastructureArgsLen 1 }}
|
||||
{{- $infrastructureNamespace = print $infrastructureArgs._0 "-infrastructure-system" }}
|
||||
{{- $infrastructureName = $infrastructureArgs._0 }}
|
||||
{{- else }}
|
||||
{{- fail "infrastructure provider argument should have the following format aws:v1.0.0 or mynamespace:aws:v1.0.0" }}
|
||||
{{- end }}
|
||||
{{- range $name, $infra := $.Values.infrastructure }}
|
||||
{{- $infrastructureNamespace := default ( printf "%s-%s" $name "infrastructure-system" ) (get $infra "namespace") }}
|
||||
{{- $infrastructureName := $name }}
|
||||
{{- $infrastructureVersion := get $infra "version" }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
@@ -83,5 +65,24 @@ spec:
|
||||
{{- if $.Values.additionalDeployments }}
|
||||
additionalDeployments: {{ toYaml $.Values.additionalDeployments | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $infra.manifestPatches }}
|
||||
manifestPatches: {{- toYaml $infra.manifestPatches | nindent 4 }}
|
||||
{{- end }} {{/* if $infra.manifestPatches */}}
|
||||
{{- if $infra.additionalManifests }}
|
||||
additionalManifests:
|
||||
name: {{ $infra.additionalManifests.name }}
|
||||
{{- if $infra.additionalManifests.namespace }}
|
||||
namespace: {{ $infra.additionalManifests.namespace }}
|
||||
{{- end }} {{/* if $infra.additionalManifests.namespace */}}
|
||||
{{- end }} {{/* if $infra.additionalManifests */}}
|
||||
{{- if $infra.additionalManifests }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ $infra.additionalManifests.name }}
|
||||
namespace: {{ default $infrastructureNamespace $infra.additionalManifests.namespace }}
|
||||
data:
|
||||
manifests: {{- toYaml $infra.additionalManifests.manifests | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }} {{/* range $name, $infra := .Values.infrastructure */}}
|
||||
|
||||
@@ -1,26 +1,8 @@
|
||||
# IPAM providers
|
||||
{{- if .Values.ipam }}
|
||||
{{- $ipams := split ";" .Values.ipam }}
|
||||
{{- $ipamNamespace := "" }}
|
||||
{{- $ipamName := "" }}
|
||||
{{- $ipamVersion := "" }}
|
||||
{{- range $ipam := $ipams }}
|
||||
{{- $ipamArgs := split ":" $ipam }}
|
||||
{{- $ipamArgsLen := len $ipamArgs }}
|
||||
{{- if eq $ipamArgsLen 3 }}
|
||||
{{- $ipamNamespace = $ipamArgs._0 }}
|
||||
{{- $ipamName = $ipamArgs._1 }}
|
||||
{{- $ipamVersion = $ipamArgs._2 }}
|
||||
{{- else if eq $ipamArgsLen 2 }}
|
||||
{{- $ipamNamespace = print $ipamArgs._0 "-ipam-system" }}
|
||||
{{- $ipamName = $ipamArgs._0 }}
|
||||
{{- $ipamVersion = $ipamArgs._1 }}
|
||||
{{- else if eq $ipamArgsLen 1 }}
|
||||
{{- $ipamNamespace = print $ipamArgs._0 "-ipam-system" }}
|
||||
{{- $ipamName = $ipamArgs._0 }}
|
||||
{{- else }}
|
||||
{{- fail "ipam provider argument should have the following format in-cluster:v1.0.0 or mynamespace:in-cluster:v1.0.0" }}
|
||||
{{- end }}
|
||||
{{- range $name, $ipam := $.Values.ipam }}
|
||||
{{- $ipamNamespace := default ( printf "%s-%s" $name "ipam-system" ) (get $ipam "namespace") }}
|
||||
{{- $ipamName := $name }}
|
||||
{{- $ipamVersion := get $ipam "version" }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
@@ -70,8 +52,27 @@ spec:
|
||||
namespace: {{ $.Values.configSecret.namespace }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $ipam.manifestPatches }}
|
||||
manifestPatches: {{ toYaml $ipam.manifestPatches | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $.Values.additionalDeployments }}
|
||||
additionalDeployments: {{ toYaml $.Values.additionalDeployments | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $ipam.additionalManifests }}
|
||||
additionalManifests:
|
||||
name: {{ $ipam.additionalManifests.name }}
|
||||
{{- if $ipam.additionalManifests.namespace }}
|
||||
namespace: {{ $ipam.additionalManifests.namespace }}
|
||||
{{- end }} {{/* if $ipam.additionalManifests.namespace */}}
|
||||
{{- end }}
|
||||
{{- if $ipam.additionalManifests }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ $ipam.additionalManifests.name }}
|
||||
namespace: {{ default $ipamNamespace $ipam.additionalManifests.namespace }}
|
||||
data:
|
||||
manifests: {{- toYaml $ipam.additionalManifests.manifests | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }} {{/* range $name, $ipam := .Values.ipam */}}
|
||||
|
||||
@@ -1305,6 +1305,13 @@ spec:
|
||||
description: Manager defines the properties that can be enabled
|
||||
on the controller manager for the additional provider deployment.
|
||||
properties:
|
||||
additionalArgs:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
AdditionalArgs is a map of additional options that will be passed
|
||||
in as container args to the provider's controller manager.
|
||||
type: object
|
||||
cacheNamespace:
|
||||
description: |-
|
||||
CacheNamespace if specified restricts the manager's cache to watch objects in
|
||||
@@ -2836,6 +2843,13 @@ spec:
|
||||
description: Manager defines the properties that can be enabled on
|
||||
the controller manager for the provider.
|
||||
properties:
|
||||
additionalArgs:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
AdditionalArgs is a map of additional options that will be passed
|
||||
in as container args to the provider's controller manager.
|
||||
type: object
|
||||
cacheNamespace:
|
||||
description: |-
|
||||
CacheNamespace if specified restricts the manager's cache to watch objects in
|
||||
@@ -3048,27 +3062,32 @@ spec:
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
Last time the condition transitioned from one status to another.
|
||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
This should be when the underlying condition changed. If that is not known, then using the time when
|
||||
the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: |-
|
||||
A human readable message indicating details about the transition.
|
||||
message is a human readable message indicating details about the transition.
|
||||
This field may be empty.
|
||||
maxLength: 10240
|
||||
minLength: 1
|
||||
type: string
|
||||
reason:
|
||||
description: |-
|
||||
The reason for the condition's last transition in CamelCase.
|
||||
reason is the reason for the condition's last transition in CamelCase.
|
||||
The specific API may choose whether or not this field is considered a guaranteed API.
|
||||
This field may be empty.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
severity:
|
||||
description: |-
|
||||
severity provides an explicit classification of Reason code, so the users or machines can immediately
|
||||
understand the current situation and act accordingly.
|
||||
The Severity field MUST be set only when Status=False.
|
||||
maxLength: 32
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
@@ -3078,6 +3097,8 @@ spec:
|
||||
type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
|
||||
can be useful (see .node.status.conditions), the ability to deconflict is important.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
@@ -4711,27 +4732,32 @@ spec:
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
Last time the condition transitioned from one status to another.
|
||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
This should be when the underlying condition changed. If that is not known, then using the time when
|
||||
the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: |-
|
||||
A human readable message indicating details about the transition.
|
||||
message is a human readable message indicating details about the transition.
|
||||
This field may be empty.
|
||||
maxLength: 10240
|
||||
minLength: 1
|
||||
type: string
|
||||
reason:
|
||||
description: |-
|
||||
The reason for the condition's last transition in CamelCase.
|
||||
reason is the reason for the condition's last transition in CamelCase.
|
||||
The specific API may choose whether or not this field is considered a guaranteed API.
|
||||
This field may be empty.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
severity:
|
||||
description: |-
|
||||
severity provides an explicit classification of Reason code, so the users or machines can immediately
|
||||
understand the current situation and act accordingly.
|
||||
The Severity field MUST be set only when Status=False.
|
||||
maxLength: 32
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
@@ -4741,6 +4767,8 @@ spec:
|
||||
type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
|
||||
can be useful (see .node.status.conditions), the ability to deconflict is important.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
@@ -6043,6 +6071,13 @@ spec:
|
||||
description: Manager defines the properties that can be enabled
|
||||
on the controller manager for the additional provider deployment.
|
||||
properties:
|
||||
additionalArgs:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
AdditionalArgs is a map of additional options that will be passed
|
||||
in as container args to the provider's controller manager.
|
||||
type: object
|
||||
cacheNamespace:
|
||||
description: |-
|
||||
CacheNamespace if specified restricts the manager's cache to watch objects in
|
||||
@@ -7574,6 +7609,13 @@ spec:
|
||||
description: Manager defines the properties that can be enabled on
|
||||
the controller manager for the provider.
|
||||
properties:
|
||||
additionalArgs:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
AdditionalArgs is a map of additional options that will be passed
|
||||
in as container args to the provider's controller manager.
|
||||
type: object
|
||||
cacheNamespace:
|
||||
description: |-
|
||||
CacheNamespace if specified restricts the manager's cache to watch objects in
|
||||
@@ -7786,27 +7828,32 @@ spec:
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
Last time the condition transitioned from one status to another.
|
||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
This should be when the underlying condition changed. If that is not known, then using the time when
|
||||
the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: |-
|
||||
A human readable message indicating details about the transition.
|
||||
message is a human readable message indicating details about the transition.
|
||||
This field may be empty.
|
||||
maxLength: 10240
|
||||
minLength: 1
|
||||
type: string
|
||||
reason:
|
||||
description: |-
|
||||
The reason for the condition's last transition in CamelCase.
|
||||
reason is the reason for the condition's last transition in CamelCase.
|
||||
The specific API may choose whether or not this field is considered a guaranteed API.
|
||||
This field may be empty.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
severity:
|
||||
description: |-
|
||||
severity provides an explicit classification of Reason code, so the users or machines can immediately
|
||||
understand the current situation and act accordingly.
|
||||
The Severity field MUST be set only when Status=False.
|
||||
maxLength: 32
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
@@ -7816,6 +7863,8 @@ spec:
|
||||
type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
|
||||
can be useful (see .node.status.conditions), the ability to deconflict is important.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
@@ -9450,27 +9499,32 @@ spec:
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
Last time the condition transitioned from one status to another.
|
||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
This should be when the underlying condition changed. If that is not known, then using the time when
|
||||
the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: |-
|
||||
A human readable message indicating details about the transition.
|
||||
message is a human readable message indicating details about the transition.
|
||||
This field may be empty.
|
||||
maxLength: 10240
|
||||
minLength: 1
|
||||
type: string
|
||||
reason:
|
||||
description: |-
|
||||
The reason for the condition's last transition in CamelCase.
|
||||
reason is the reason for the condition's last transition in CamelCase.
|
||||
The specific API may choose whether or not this field is considered a guaranteed API.
|
||||
This field may be empty.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
severity:
|
||||
description: |-
|
||||
severity provides an explicit classification of Reason code, so the users or machines can immediately
|
||||
understand the current situation and act accordingly.
|
||||
The Severity field MUST be set only when Status=False.
|
||||
maxLength: 32
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
@@ -9480,6 +9534,8 @@ spec:
|
||||
type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
|
||||
can be useful (see .node.status.conditions), the ability to deconflict is important.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
@@ -10783,6 +10839,13 @@ spec:
|
||||
description: Manager defines the properties that can be enabled
|
||||
on the controller manager for the additional provider deployment.
|
||||
properties:
|
||||
additionalArgs:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
AdditionalArgs is a map of additional options that will be passed
|
||||
in as container args to the provider's controller manager.
|
||||
type: object
|
||||
cacheNamespace:
|
||||
description: |-
|
||||
CacheNamespace if specified restricts the manager's cache to watch objects in
|
||||
@@ -12314,6 +12377,13 @@ spec:
|
||||
description: Manager defines the properties that can be enabled on
|
||||
the controller manager for the provider.
|
||||
properties:
|
||||
additionalArgs:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
AdditionalArgs is a map of additional options that will be passed
|
||||
in as container args to the provider's controller manager.
|
||||
type: object
|
||||
cacheNamespace:
|
||||
description: |-
|
||||
CacheNamespace if specified restricts the manager's cache to watch objects in
|
||||
@@ -12527,27 +12597,32 @@ spec:
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
Last time the condition transitioned from one status to another.
|
||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
This should be when the underlying condition changed. If that is not known, then using the time when
|
||||
the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: |-
|
||||
A human readable message indicating details about the transition.
|
||||
message is a human readable message indicating details about the transition.
|
||||
This field may be empty.
|
||||
maxLength: 10240
|
||||
minLength: 1
|
||||
type: string
|
||||
reason:
|
||||
description: |-
|
||||
The reason for the condition's last transition in CamelCase.
|
||||
reason is the reason for the condition's last transition in CamelCase.
|
||||
The specific API may choose whether or not this field is considered a guaranteed API.
|
||||
This field may be empty.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
severity:
|
||||
description: |-
|
||||
severity provides an explicit classification of Reason code, so the users or machines can immediately
|
||||
understand the current situation and act accordingly.
|
||||
The Severity field MUST be set only when Status=False.
|
||||
maxLength: 32
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
@@ -12557,6 +12632,8 @@ spec:
|
||||
type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
|
||||
can be useful (see .node.status.conditions), the ability to deconflict is important.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
@@ -14190,27 +14267,32 @@ spec:
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
Last time the condition transitioned from one status to another.
|
||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
This should be when the underlying condition changed. If that is not known, then using the time when
|
||||
the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: |-
|
||||
A human readable message indicating details about the transition.
|
||||
message is a human readable message indicating details about the transition.
|
||||
This field may be empty.
|
||||
maxLength: 10240
|
||||
minLength: 1
|
||||
type: string
|
||||
reason:
|
||||
description: |-
|
||||
The reason for the condition's last transition in CamelCase.
|
||||
reason is the reason for the condition's last transition in CamelCase.
|
||||
The specific API may choose whether or not this field is considered a guaranteed API.
|
||||
This field may be empty.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
severity:
|
||||
description: |-
|
||||
severity provides an explicit classification of Reason code, so the users or machines can immediately
|
||||
understand the current situation and act accordingly.
|
||||
The Severity field MUST be set only when Status=False.
|
||||
maxLength: 32
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
@@ -14220,6 +14302,8 @@ spec:
|
||||
type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
|
||||
can be useful (see .node.status.conditions), the ability to deconflict is important.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
@@ -15522,6 +15606,13 @@ spec:
|
||||
description: Manager defines the properties that can be enabled
|
||||
on the controller manager for the additional provider deployment.
|
||||
properties:
|
||||
additionalArgs:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
AdditionalArgs is a map of additional options that will be passed
|
||||
in as container args to the provider's controller manager.
|
||||
type: object
|
||||
cacheNamespace:
|
||||
description: |-
|
||||
CacheNamespace if specified restricts the manager's cache to watch objects in
|
||||
@@ -17053,6 +17144,13 @@ spec:
|
||||
description: Manager defines the properties that can be enabled on
|
||||
the controller manager for the provider.
|
||||
properties:
|
||||
additionalArgs:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
AdditionalArgs is a map of additional options that will be passed
|
||||
in as container args to the provider's controller manager.
|
||||
type: object
|
||||
cacheNamespace:
|
||||
description: |-
|
||||
CacheNamespace if specified restricts the manager's cache to watch objects in
|
||||
@@ -17265,27 +17363,32 @@ spec:
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
Last time the condition transitioned from one status to another.
|
||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
This should be when the underlying condition changed. If that is not known, then using the time when
|
||||
the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: |-
|
||||
A human readable message indicating details about the transition.
|
||||
message is a human readable message indicating details about the transition.
|
||||
This field may be empty.
|
||||
maxLength: 10240
|
||||
minLength: 1
|
||||
type: string
|
||||
reason:
|
||||
description: |-
|
||||
The reason for the condition's last transition in CamelCase.
|
||||
reason is the reason for the condition's last transition in CamelCase.
|
||||
The specific API may choose whether or not this field is considered a guaranteed API.
|
||||
This field may be empty.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
severity:
|
||||
description: |-
|
||||
severity provides an explicit classification of Reason code, so the users or machines can immediately
|
||||
understand the current situation and act accordingly.
|
||||
The Severity field MUST be set only when Status=False.
|
||||
maxLength: 32
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
@@ -17295,6 +17398,8 @@ spec:
|
||||
type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
|
||||
can be useful (see .node.status.conditions), the ability to deconflict is important.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
@@ -18929,27 +19034,32 @@ spec:
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
Last time the condition transitioned from one status to another.
|
||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
This should be when the underlying condition changed. If that is not known, then using the time when
|
||||
the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: |-
|
||||
A human readable message indicating details about the transition.
|
||||
message is a human readable message indicating details about the transition.
|
||||
This field may be empty.
|
||||
maxLength: 10240
|
||||
minLength: 1
|
||||
type: string
|
||||
reason:
|
||||
description: |-
|
||||
The reason for the condition's last transition in CamelCase.
|
||||
reason is the reason for the condition's last transition in CamelCase.
|
||||
The specific API may choose whether or not this field is considered a guaranteed API.
|
||||
This field may be empty.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
severity:
|
||||
description: |-
|
||||
severity provides an explicit classification of Reason code, so the users or machines can immediately
|
||||
understand the current situation and act accordingly.
|
||||
The Severity field MUST be set only when Status=False.
|
||||
maxLength: 32
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
@@ -18959,6 +19069,8 @@ spec:
|
||||
type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
|
||||
can be useful (see .node.status.conditions), the ability to deconflict is important.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
@@ -20262,6 +20374,13 @@ spec:
|
||||
description: Manager defines the properties that can be enabled
|
||||
on the controller manager for the additional provider deployment.
|
||||
properties:
|
||||
additionalArgs:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
AdditionalArgs is a map of additional options that will be passed
|
||||
in as container args to the provider's controller manager.
|
||||
type: object
|
||||
cacheNamespace:
|
||||
description: |-
|
||||
CacheNamespace if specified restricts the manager's cache to watch objects in
|
||||
@@ -21793,6 +21912,13 @@ spec:
|
||||
description: Manager defines the properties that can be enabled on
|
||||
the controller manager for the provider.
|
||||
properties:
|
||||
additionalArgs:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
AdditionalArgs is a map of additional options that will be passed
|
||||
in as container args to the provider's controller manager.
|
||||
type: object
|
||||
cacheNamespace:
|
||||
description: |-
|
||||
CacheNamespace if specified restricts the manager's cache to watch objects in
|
||||
@@ -22006,27 +22132,32 @@ spec:
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
Last time the condition transitioned from one status to another.
|
||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
This should be when the underlying condition changed. If that is not known, then using the time when
|
||||
the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: |-
|
||||
A human readable message indicating details about the transition.
|
||||
message is a human readable message indicating details about the transition.
|
||||
This field may be empty.
|
||||
maxLength: 10240
|
||||
minLength: 1
|
||||
type: string
|
||||
reason:
|
||||
description: |-
|
||||
The reason for the condition's last transition in CamelCase.
|
||||
reason is the reason for the condition's last transition in CamelCase.
|
||||
The specific API may choose whether or not this field is considered a guaranteed API.
|
||||
This field may be empty.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
severity:
|
||||
description: |-
|
||||
severity provides an explicit classification of Reason code, so the users or machines can immediately
|
||||
understand the current situation and act accordingly.
|
||||
The Severity field MUST be set only when Status=False.
|
||||
maxLength: 32
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
@@ -22036,6 +22167,8 @@ spec:
|
||||
type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
|
||||
can be useful (see .node.status.conditions), the ability to deconflict is important.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
@@ -23371,6 +23504,13 @@ spec:
|
||||
description: Manager defines the properties that can be enabled
|
||||
on the controller manager for the additional provider deployment.
|
||||
properties:
|
||||
additionalArgs:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
AdditionalArgs is a map of additional options that will be passed
|
||||
in as container args to the provider's controller manager.
|
||||
type: object
|
||||
cacheNamespace:
|
||||
description: |-
|
||||
CacheNamespace if specified restricts the manager's cache to watch objects in
|
||||
@@ -24902,6 +25042,13 @@ spec:
|
||||
description: Manager defines the properties that can be enabled on
|
||||
the controller manager for the provider.
|
||||
properties:
|
||||
additionalArgs:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
AdditionalArgs is a map of additional options that will be passed
|
||||
in as container args to the provider's controller manager.
|
||||
type: object
|
||||
cacheNamespace:
|
||||
description: |-
|
||||
CacheNamespace if specified restricts the manager's cache to watch objects in
|
||||
@@ -25114,27 +25261,32 @@ spec:
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
Last time the condition transitioned from one status to another.
|
||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
This should be when the underlying condition changed. If that is not known, then using the time when
|
||||
the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: |-
|
||||
A human readable message indicating details about the transition.
|
||||
message is a human readable message indicating details about the transition.
|
||||
This field may be empty.
|
||||
maxLength: 10240
|
||||
minLength: 1
|
||||
type: string
|
||||
reason:
|
||||
description: |-
|
||||
The reason for the condition's last transition in CamelCase.
|
||||
reason is the reason for the condition's last transition in CamelCase.
|
||||
The specific API may choose whether or not this field is considered a guaranteed API.
|
||||
This field may be empty.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
severity:
|
||||
description: |-
|
||||
severity provides an explicit classification of Reason code, so the users or machines can immediately
|
||||
understand the current situation and act accordingly.
|
||||
The Severity field MUST be set only when Status=False.
|
||||
maxLength: 32
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
@@ -25144,6 +25296,8 @@ spec:
|
||||
type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
|
||||
can be useful (see .node.status.conditions), the ability to deconflict is important.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
@@ -26481,6 +26635,13 @@ spec:
|
||||
description: Manager defines the properties that can be enabled
|
||||
on the controller manager for the additional provider deployment.
|
||||
properties:
|
||||
additionalArgs:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
AdditionalArgs is a map of additional options that will be passed
|
||||
in as container args to the provider's controller manager.
|
||||
type: object
|
||||
cacheNamespace:
|
||||
description: |-
|
||||
CacheNamespace if specified restricts the manager's cache to watch objects in
|
||||
@@ -28012,6 +28173,13 @@ spec:
|
||||
description: Manager defines the properties that can be enabled on
|
||||
the controller manager for the provider.
|
||||
properties:
|
||||
additionalArgs:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
AdditionalArgs is a map of additional options that will be passed
|
||||
in as container args to the provider's controller manager.
|
||||
type: object
|
||||
cacheNamespace:
|
||||
description: |-
|
||||
CacheNamespace if specified restricts the manager's cache to watch objects in
|
||||
@@ -28225,27 +28393,32 @@ spec:
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
Last time the condition transitioned from one status to another.
|
||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
This should be when the underlying condition changed. If that is not known, then using the time when
|
||||
the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: |-
|
||||
A human readable message indicating details about the transition.
|
||||
message is a human readable message indicating details about the transition.
|
||||
This field may be empty.
|
||||
maxLength: 10240
|
||||
minLength: 1
|
||||
type: string
|
||||
reason:
|
||||
description: |-
|
||||
The reason for the condition's last transition in CamelCase.
|
||||
reason is the reason for the condition's last transition in CamelCase.
|
||||
The specific API may choose whether or not this field is considered a guaranteed API.
|
||||
This field may be empty.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
severity:
|
||||
description: |-
|
||||
severity provides an explicit classification of Reason code, so the users or machines can immediately
|
||||
understand the current situation and act accordingly.
|
||||
The Severity field MUST be set only when Status=False.
|
||||
maxLength: 32
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
@@ -28255,6 +28428,8 @@ spec:
|
||||
type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
|
||||
can be useful (see .node.status.conditions), the ability to deconflict is important.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
|
||||
@@ -0,0 +1,47 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"core": {
|
||||
"oneOf": [
|
||||
{ "type": "object" },
|
||||
{ "type": "null" }
|
||||
]
|
||||
},
|
||||
"bootstrap": {
|
||||
"type": "object",
|
||||
"oneOf": [
|
||||
{ "type": "object" },
|
||||
{ "type": "null" }
|
||||
]
|
||||
},
|
||||
"controlPlane": {
|
||||
"type": "object",
|
||||
"oneOf": [
|
||||
{ "type": "object" },
|
||||
{ "type": "null" }
|
||||
]
|
||||
},
|
||||
"infrastructure": {
|
||||
"type": "object",
|
||||
"oneOf": [
|
||||
{ "type": "object" },
|
||||
{ "type": "null" }
|
||||
]
|
||||
},
|
||||
"addon": {
|
||||
"type": "object",
|
||||
"oneOf": [
|
||||
{ "type": "object" },
|
||||
{ "type": "null" }
|
||||
]
|
||||
},
|
||||
"ipam": {
|
||||
"type": "object",
|
||||
"oneOf": [
|
||||
{ "type": "object" },
|
||||
{ "type": "null" }
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,12 +1,30 @@
|
||||
---
|
||||
# ---
|
||||
# Cluster API provider options
|
||||
core: ""
|
||||
bootstrap: ""
|
||||
controlPlane: ""
|
||||
infrastructure: ""
|
||||
ipam: ""
|
||||
addon: ""
|
||||
core: {}
|
||||
# cluster-api: {} # Name, required
|
||||
# namespace: "" # Optional
|
||||
# version: "" # Optional
|
||||
bootstrap: {}
|
||||
# kubeadm: {} # Name, required
|
||||
# namespace: "" # Optional
|
||||
# version: "" # Optional
|
||||
controlPlane: {}
|
||||
# kubeadm: {} # Name, required
|
||||
# namespace: "" # Optional
|
||||
# version: "" # Optional
|
||||
infrastructure: {}
|
||||
# docker: {} # Name, required
|
||||
# namespace: "" # Optional
|
||||
# version: "" # Optional
|
||||
addon: {}
|
||||
# helm: {} # Name, required
|
||||
# namespace: "" # Optional
|
||||
# version: "" # Optional
|
||||
ipam: {}
|
||||
# in-cluster: {} # Name, required
|
||||
# namespace: "" # Optional
|
||||
# version: "" # Optional
|
||||
manager.featureGates: {}
|
||||
fetchConfig: {}
|
||||
# ---
|
||||
@@ -21,7 +39,7 @@ leaderElection:
|
||||
image:
|
||||
manager:
|
||||
repository: registry.k8s.io/capi-operator/cluster-api-operator
|
||||
tag: v0.18.1
|
||||
tag: v0.19.0
|
||||
pullPolicy: IfNotPresent
|
||||
env:
|
||||
manager: []
|
||||
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
name: cluster-api
|
||||
spec:
|
||||
# https://github.com/kubernetes-sigs/cluster-api
|
||||
version: v1.9.5
|
||||
version: v1.10.0
|
||||
---
|
||||
apiVersion: operator.cluster.x-k8s.io/v1alpha2
|
||||
kind: ControlPlaneProvider
|
||||
@@ -13,7 +13,7 @@ metadata:
|
||||
name: kamaji
|
||||
spec:
|
||||
# https://github.com/clastix/cluster-api-control-plane-provider-kamaji
|
||||
version: v0.14.1
|
||||
version: v0.14.2
|
||||
deployment:
|
||||
containers:
|
||||
- name: manager
|
||||
@@ -31,7 +31,7 @@ metadata:
|
||||
name: kubeadm
|
||||
spec:
|
||||
# https://github.com/kubernetes-sigs/cluster-api
|
||||
version: v1.9.5
|
||||
version: v1.10.0
|
||||
---
|
||||
apiVersion: operator.cluster.x-k8s.io/v1alpha2
|
||||
kind: InfrastructureProvider
|
||||
@@ -39,4 +39,4 @@ metadata:
|
||||
name: kubevirt
|
||||
spec:
|
||||
# https://github.com/kubernetes-sigs/cluster-api-provider-kubevirt
|
||||
version: v0.1.9
|
||||
version: v0.1.10
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user