mirror of
https://github.com/outbackdingo/cozystack.git
synced 2026-01-29 18:19:00 +00:00
Compare commits
1 Commits
fix-versio
...
802-apps-a
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2bf312260d |
53
.github/workflows/backport.yaml
vendored
53
.github/workflows/backport.yaml
vendored
@@ -1,53 +0,0 @@
|
||||
name: Automatic Backport
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [closed] # fires when PR is closed (merged)
|
||||
|
||||
concurrency:
|
||||
group: backport-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
backport:
|
||||
if: |
|
||||
github.event.pull_request.merged == true &&
|
||||
contains(github.event.pull_request.labels.*.name, 'backport')
|
||||
runs-on: [self-hosted]
|
||||
|
||||
steps:
|
||||
# 1. Decide which maintenance branch should receive the back‑port
|
||||
- name: Determine target maintenance branch
|
||||
id: target
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
let rel;
|
||||
try {
|
||||
rel = await github.rest.repos.getLatestRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
} catch (e) {
|
||||
core.setFailed('No existing releases found; cannot determine backport target.');
|
||||
return;
|
||||
}
|
||||
const [maj, min] = rel.data.tag_name.replace(/^v/, '').split('.');
|
||||
const branch = `release-${maj}.${min}`;
|
||||
core.setOutput('branch', branch);
|
||||
console.log(`Latest release ${rel.data.tag_name}; backporting to ${branch}`);
|
||||
# 2. Checkout (required by backport‑action)
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# 3. Create the back‑port pull request
|
||||
- name: Create back‑port PR
|
||||
uses: korthout/backport-action@v3
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
label_pattern: '' # don't read labels for targets
|
||||
target_branches: ${{ steps.target.outputs.branch }}
|
||||
11
.github/workflows/pre-commit.yml
vendored
11
.github/workflows/pre-commit.yml
vendored
@@ -1,13 +1,12 @@
|
||||
name: Pre-Commit Checks
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
types: [labeled, opened, synchronize, reopened]
|
||||
|
||||
concurrency:
|
||||
group: pre-commit-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
jobs:
|
||||
pre-commit:
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
161
.github/workflows/pull-requests-release.yaml
vendored
161
.github/workflows/pull-requests-release.yaml
vendored
@@ -4,10 +4,6 @@ on:
|
||||
pull_request:
|
||||
types: [labeled, opened, synchronize, reopened, closed]
|
||||
|
||||
concurrency:
|
||||
group: pull-requests-release-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
verify:
|
||||
name: Test Release
|
||||
@@ -16,8 +12,8 @@ jobs:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
# Run only when the PR carries the "release" label and not closed.
|
||||
if: |
|
||||
contains(github.event.pull_request.labels.*.name, 'ok-to-test') &&
|
||||
contains(github.event.pull_request.labels.*.name, 'release') &&
|
||||
github.event.action != 'closed'
|
||||
|
||||
@@ -43,137 +39,38 @@ jobs:
|
||||
runs-on: [self-hosted]
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
|
||||
if: |
|
||||
github.event.pull_request.merged == true &&
|
||||
contains(github.event.pull_request.labels.*.name, 'release')
|
||||
|
||||
|
||||
steps:
|
||||
# Extract tag from branch name (branch = release-X.Y.Z*)
|
||||
- name: Extract tag from branch name
|
||||
id: get_tag
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const branch = context.payload.pull_request.head.ref;
|
||||
const m = branch.match(/^release-(\d+\.\d+\.\d+(?:[-\w\.]+)?)$/);
|
||||
if (!m) {
|
||||
core.setFailed(`Branch '${branch}' does not match 'release-X.Y.Z[-suffix]'`);
|
||||
return;
|
||||
}
|
||||
const tag = `v${m[1]}`;
|
||||
core.setOutput('tag', tag);
|
||||
console.log(`✅ Tag to publish: ${tag}`);
|
||||
const match = branch.match(/^release-(\d+\.\d+\.\d+(?:[-\w\.]+)?)$/);
|
||||
|
||||
# Checkout repo & create / push annotated tag
|
||||
if (!match) {
|
||||
core.setFailed(`Branch '${branch}' does not match expected format 'release-X.Y.Z[-suffix]'`);
|
||||
} else {
|
||||
const tag = `v${match[1]}`;
|
||||
core.setOutput('tag', tag);
|
||||
console.log(`✅ Extracted tag: ${tag}`);
|
||||
}
|
||||
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Create tag on merge commit
|
||||
|
||||
- name: Create tag on merged commit
|
||||
run: |
|
||||
git tag -f ${{ steps.get_tag.outputs.tag }} ${{ github.sha }}
|
||||
git push -f origin ${{ steps.get_tag.outputs.tag }}
|
||||
|
||||
# Ensure maintenance branch release-X.Y
|
||||
- name: Ensure maintenance branch release-X.Y
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.GH_PAT }}
|
||||
script: |
|
||||
const tag = '${{ steps.get_tag.outputs.tag }}'; // e.g. v0.1.3 or v0.1.3-rc3
|
||||
const match = tag.match(/^v(\d+)\.(\d+)\.\d+(?:[-\w\.]+)?$/);
|
||||
if (!match) {
|
||||
core.setFailed(`❌ tag '${tag}' must match 'vX.Y.Z' or 'vX.Y.Z-suffix'`);
|
||||
return;
|
||||
}
|
||||
const line = `${match[1]}.${match[2]}`;
|
||||
const branch = `release-${line}`;
|
||||
|
||||
// Get main branch commit for the tag
|
||||
const ref = await github.rest.git.getRef({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
ref: `tags/${tag}`
|
||||
});
|
||||
|
||||
const commitSha = ref.data.object.sha;
|
||||
|
||||
try {
|
||||
await github.rest.repos.getBranch({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
branch
|
||||
});
|
||||
|
||||
await github.rest.git.updateRef({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
ref: `heads/${branch}`,
|
||||
sha: commitSha,
|
||||
force: true
|
||||
});
|
||||
console.log(`🔁 Force-updated '${branch}' to ${commitSha}`);
|
||||
} catch (err) {
|
||||
if (err.status === 404) {
|
||||
await github.rest.git.createRef({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
ref: `refs/heads/${branch}`,
|
||||
sha: commitSha
|
||||
});
|
||||
console.log(`✅ Created branch '${branch}' at ${commitSha}`);
|
||||
} else {
|
||||
console.error('Unexpected error --', err);
|
||||
core.setFailed(`Unexpected error creating/updating branch: ${err.message}`);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
# Get the latest published release
|
||||
- name: Get the latest published release
|
||||
id: latest_release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
try {
|
||||
const rel = await github.rest.repos.getLatestRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
core.setOutput('tag', rel.data.tag_name);
|
||||
} catch (_) {
|
||||
core.setOutput('tag', '');
|
||||
}
|
||||
|
||||
# Compare current tag vs latest using semver-utils
|
||||
- name: Semver compare
|
||||
id: semver
|
||||
uses: madhead/semver-utils@v4.3.0
|
||||
with:
|
||||
version: ${{ steps.get_tag.outputs.tag }}
|
||||
compare-to: ${{ steps.latest_release.outputs.tag }}
|
||||
|
||||
# Derive flags: prerelease? make_latest?
|
||||
- name: Calculate publish flags
|
||||
id: flags
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const tag = '${{ steps.get_tag.outputs.tag }}'; // v0.31.5-rc.1
|
||||
const m = tag.match(/^v(\d+\.\d+\.\d+)(-(?:alpha|beta|rc)\.\d+)?$/);
|
||||
if (!m) {
|
||||
core.setFailed(`❌ tag '${tag}' must match 'vX.Y.Z' or 'vX.Y.Z-(alpha|beta|rc).N'`);
|
||||
return;
|
||||
}
|
||||
const version = m[1] + (m[2] ?? ''); // 0.31.5-rc.1
|
||||
const isRc = Boolean(m[2]);
|
||||
core.setOutput('is_rc', isRc);
|
||||
const outdated = '${{ steps.semver.outputs.comparison-result }}' === '<';
|
||||
core.setOutput('make_latest', isRc || outdated ? 'false' : 'legacy');
|
||||
|
||||
# Publish draft release with correct flags
|
||||
git tag ${{ steps.get_tag.outputs.tag }} ${{ github.sha }} --force
|
||||
git push origin ${{ steps.get_tag.outputs.tag }} --force
|
||||
|
||||
- name: Publish draft release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
@@ -181,17 +78,19 @@ jobs:
|
||||
const tag = '${{ steps.get_tag.outputs.tag }}';
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
repo: context.repo.repo
|
||||
});
|
||||
const draft = releases.data.find(r => r.tag_name === tag && r.draft);
|
||||
if (!draft) throw new Error(`Draft release for ${tag} not found`);
|
||||
|
||||
const release = releases.data.find(r => r.tag_name === tag && r.draft);
|
||||
if (!release) {
|
||||
throw new Error(`Draft release with tag ${tag} not found`);
|
||||
}
|
||||
|
||||
await github.rest.repos.updateRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
release_id: draft.id,
|
||||
draft: false,
|
||||
prerelease: ${{ steps.flags.outputs.is_rc }},
|
||||
make_latest: '${{ steps.flags.outputs.make_latest }}'
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
release_id: release.id,
|
||||
draft: false
|
||||
});
|
||||
|
||||
console.log(`🚀 Published release for ${tag}`);
|
||||
|
||||
console.log(`✅ Published release for ${tag}`);
|
||||
|
||||
16
.github/workflows/pull-requests.yaml
vendored
16
.github/workflows/pull-requests.yaml
vendored
@@ -4,10 +4,6 @@ on:
|
||||
pull_request:
|
||||
types: [labeled, opened, synchronize, reopened]
|
||||
|
||||
concurrency:
|
||||
group: pull-requests-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
e2e:
|
||||
name: Build and Test
|
||||
@@ -16,8 +12,8 @@ jobs:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
# Never run when the PR carries the "release" label.
|
||||
if: |
|
||||
contains(github.event.pull_request.labels.*.name, 'ok-to-test') &&
|
||||
!contains(github.event.pull_request.labels.*.name, 'release')
|
||||
|
||||
steps:
|
||||
@@ -34,8 +30,10 @@ jobs:
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
|
||||
- name: Build
|
||||
run: make build
|
||||
- name: make build
|
||||
run: |
|
||||
make build
|
||||
|
||||
- name: Test
|
||||
run: make test
|
||||
- name: make test
|
||||
run: |
|
||||
make test
|
||||
|
||||
235
.github/workflows/tags.yaml
vendored
235
.github/workflows/tags.yaml
vendored
@@ -1,16 +1,10 @@
|
||||
name: Versioned Tag
|
||||
|
||||
on:
|
||||
# Trigger on push if it includes a tag like vX.Y.Z
|
||||
push:
|
||||
tags:
|
||||
- 'v*.*.*' # vX.Y.Z
|
||||
- 'v*.*.*-rc.*' # vX.Y.Z-rc.N
|
||||
- 'v*.*.*-beta.*' # vX.Y.Z-beta.N
|
||||
- 'v*.*.*-alpha.*' # vX.Y.Z-alpha.N
|
||||
|
||||
concurrency:
|
||||
group: tags-${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
- 'v*.*.*'
|
||||
|
||||
jobs:
|
||||
prepare-release:
|
||||
@@ -20,10 +14,9 @@ jobs:
|
||||
contents: write
|
||||
packages: write
|
||||
pull-requests: write
|
||||
actions: write
|
||||
|
||||
steps:
|
||||
# Check if a non-draft release with this tag already exists
|
||||
# 1) Check if a non-draft release with this tag already exists
|
||||
- name: Check if release already exists
|
||||
id: check_release
|
||||
uses: actions/github-script@v7
|
||||
@@ -32,67 +25,57 @@ jobs:
|
||||
const tag = context.ref.replace('refs/tags/', '');
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
repo: context.repo.repo
|
||||
});
|
||||
const exists = releases.data.some(r => r.tag_name === tag && !r.draft);
|
||||
core.setOutput('skip', exists);
|
||||
console.log(exists ? `Release ${tag} already published` : `No published release ${tag}`);
|
||||
const existing = releases.data.find(r => r.tag_name === tag && !r.draft);
|
||||
if (existing) {
|
||||
core.setOutput('skip', 'true');
|
||||
} else {
|
||||
core.setOutput('skip', 'false');
|
||||
}
|
||||
|
||||
# If a published release already exists, skip the rest of the workflow
|
||||
- name: Skip if release already exists
|
||||
if: steps.check_release.outputs.skip == 'true'
|
||||
run: echo "Release already exists, skipping workflow."
|
||||
|
||||
# Parse tag meta-data (rc?, maintenance line, etc.)
|
||||
- name: Parse tag
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: tag
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const ref = context.ref.replace('refs/tags/', ''); // e.g. v0.31.5-rc.1
|
||||
const m = ref.match(/^v(\d+\.\d+\.\d+)(-(?:alpha|beta|rc)\.\d+)?$/); // ['0.31.5', '-rc.1' | '-beta.1' | …]
|
||||
if (!m) {
|
||||
core.setFailed(`❌ tag '${ref}' must match 'vX.Y.Z' or 'vX.Y.Z-(alpha|beta|rc).N'`);
|
||||
return;
|
||||
}
|
||||
const version = m[1] + (m[2] ?? ''); // 0.31.5-rc.1
|
||||
const isRc = Boolean(m[2]);
|
||||
const [maj, min] = m[1].split('.');
|
||||
core.setOutput('tag', ref); // v0.31.5-rc.1
|
||||
core.setOutput('version', version); // 0.31.5-rc.1
|
||||
core.setOutput('is_rc', isRc); // true
|
||||
core.setOutput('line', `${maj}.${min}`); // 0.31
|
||||
|
||||
# Detect base branch (main or release-X.Y) the tag was pushed from
|
||||
# 2) Determine the base branch from which the tag was pushed
|
||||
- name: Get base branch
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: get_base
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
/*
|
||||
For a push event with a tag, GitHub sets context.payload.base_ref
|
||||
if the tag was pushed from a branch.
|
||||
If it's empty, we can't determine the correct base branch and must fail.
|
||||
*/
|
||||
const baseRef = context.payload.base_ref;
|
||||
if (!baseRef) {
|
||||
core.setFailed(`❌ base_ref is empty. Push the tag via 'git push origin HEAD:refs/tags/<tag>'.`);
|
||||
core.setFailed(`❌ base_ref is empty. Make sure you push the tag from a branch (e.g. 'git push origin HEAD:refs/tags/vX.Y.Z').`);
|
||||
return;
|
||||
}
|
||||
const branch = baseRef.replace('refs/heads/', '');
|
||||
const ok = branch === 'main' || /^release-\d+\.\d+$/.test(branch);
|
||||
if (!ok) {
|
||||
core.setFailed(`❌ Tagged commit must belong to 'main' or 'release-X.Y'. Got '${branch}'`);
|
||||
return;
|
||||
}
|
||||
core.setOutput('branch', branch);
|
||||
|
||||
# Checkout & login once
|
||||
const shortBranch = baseRef.replace("refs/heads/", "");
|
||||
const releasePattern = /^release-\d+\.\d+$/;
|
||||
if (shortBranch !== "main" && !releasePattern.test(shortBranch)) {
|
||||
core.setFailed(`❌ Tagged commit must belong to branch 'main' or 'release-X.Y'. Got '${shortBranch}'`);
|
||||
return;
|
||||
}
|
||||
|
||||
core.setOutput('branch', shortBranch);
|
||||
|
||||
# 3) Checkout full git history and tags
|
||||
- name: Checkout code
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
fetch-tags: true
|
||||
|
||||
- name: Login to GHCR
|
||||
# 4) Login to GitHub Container Registry
|
||||
- name: Login to GitHub Container Registry
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
@@ -100,129 +83,113 @@ jobs:
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
|
||||
# Build project artifacts
|
||||
# 5) Build project artifacts
|
||||
- name: Build
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: make build
|
||||
|
||||
# Commit built artifacts
|
||||
# 6) Optionally commit built artifacts to the repository
|
||||
- name: Commit release artifacts
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
env:
|
||||
GIT_AUTHOR_NAME: ${{ github.actor }}
|
||||
GIT_AUTHOR_EMAIL: ${{ github.actor }}@users.noreply.github.com
|
||||
run: |
|
||||
git config user.name "github-actions"
|
||||
git config user.name "github-actions"
|
||||
git config user.email "github-actions@github.com"
|
||||
git add .
|
||||
git commit -m "Prepare release ${GITHUB_REF#refs/tags/}" -s || echo "No changes to commit"
|
||||
git push origin HEAD || true
|
||||
|
||||
# Get `latest_version` from latest published release
|
||||
- name: Get latest published release
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: latest_release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
try {
|
||||
const rel = await github.rest.repos.getLatestRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
core.setOutput('tag', rel.data.tag_name);
|
||||
} catch (_) {
|
||||
core.setOutput('tag', '');
|
||||
}
|
||||
|
||||
# Compare tag (A) with latest (B)
|
||||
- name: Semver compare
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: semver
|
||||
uses: madhead/semver-utils@v4.3.0
|
||||
with:
|
||||
version: ${{ steps.tag.outputs.tag }} # A
|
||||
compare-to: ${{ steps.latest_release.outputs.tag }} # B
|
||||
|
||||
# Create or reuse DRAFT GitHub Release
|
||||
- name: Create / reuse draft release
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const tag = '${{ steps.tag.outputs.tag }}';
|
||||
const isRc = ${{ steps.tag.outputs.is_rc }};
|
||||
const outdated = '${{ steps.semver.outputs.comparison-result }}' === '<';
|
||||
const makeLatest = outdated ? false : 'legacy';
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
let rel = releases.data.find(r => r.tag_name === tag);
|
||||
if (!rel) {
|
||||
rel = await github.rest.repos.createRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
tag_name: tag,
|
||||
name: tag,
|
||||
draft: true,
|
||||
prerelease: isRc,
|
||||
make_latest: makeLatest
|
||||
});
|
||||
console.log(`Draft release created for ${tag}`);
|
||||
} else {
|
||||
console.log(`Re-using existing release ${tag}`);
|
||||
}
|
||||
core.setOutput('upload_url', rel.upload_url);
|
||||
|
||||
# Build + upload assets (optional)
|
||||
- name: Build & upload assets
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: |
|
||||
make assets
|
||||
make upload_assets VERSION=${{ steps.tag.outputs.tag }}
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# Create release-X.Y.Z branch and push (force-update)
|
||||
# 7) Create a release branch like release-X.Y.Z
|
||||
- name: Create release branch
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: |
|
||||
BRANCH="release-${GITHUB_REF#refs/tags/v}"
|
||||
git branch -f "$BRANCH"
|
||||
git push -f origin "$BRANCH"
|
||||
BRANCH_NAME="release-${GITHUB_REF#refs/tags/v}"
|
||||
git branch -f "$BRANCH_NAME"
|
||||
git push origin "$BRANCH_NAME" --force
|
||||
|
||||
# Create pull request into original base branch (if absent)
|
||||
# 8) Create a pull request from release-X.Y.Z to the original base branch
|
||||
- name: Create pull request if not exists
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const version = context.ref.replace('refs/tags/v', '');
|
||||
const base = '${{ steps.get_base.outputs.branch }}';
|
||||
const head = `release-${version}`;
|
||||
const base = '${{ steps.get_base.outputs.branch }}';
|
||||
const head = `release-${version}`;
|
||||
|
||||
const prs = await github.rest.pulls.list({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
head: `${context.repo.owner}:${head}`,
|
||||
repo: context.repo.repo,
|
||||
head: `${context.repo.owner}:${head}`,
|
||||
base
|
||||
});
|
||||
|
||||
if (prs.data.length === 0) {
|
||||
const pr = await github.rest.pulls.create({
|
||||
const newPr = await github.rest.pulls.create({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
repo: context.repo.repo,
|
||||
head,
|
||||
base,
|
||||
title: `Release v${version}`,
|
||||
body: `This PR prepares the release \`v${version}\`.`,
|
||||
body:
|
||||
`This PR prepares the release \`v${version}\`.\n` +
|
||||
`(Please merge it before releasing draft)`,
|
||||
draft: false
|
||||
});
|
||||
|
||||
console.log(`Created pull request #${newPr.data.number} from ${head} to ${base}`);
|
||||
await github.rest.issues.addLabels({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: pr.data.number,
|
||||
repo: context.repo.repo,
|
||||
issue_number: newPr.data.number,
|
||||
labels: ['release']
|
||||
});
|
||||
console.log(`Created PR #${pr.data.number}`);
|
||||
} else {
|
||||
console.log(`PR already exists from ${head} to ${base}`);
|
||||
console.log(`Pull request already exists from ${head} to ${base}`);
|
||||
}
|
||||
|
||||
# 9) Create or reuse an existing draft GitHub release for this tag
|
||||
- name: Create or reuse draft release
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: create_release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const tag = context.ref.replace('refs/tags/', '');
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
|
||||
let release = releases.data.find(r => r.tag_name === tag);
|
||||
if (!release) {
|
||||
release = await github.rest.repos.createRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
tag_name: tag,
|
||||
name: `${tag}`,
|
||||
draft: true,
|
||||
prerelease: false
|
||||
});
|
||||
}
|
||||
core.setOutput('upload_url', release.upload_url);
|
||||
|
||||
# 10) Build additional assets for the release (if needed)
|
||||
- name: Build assets
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: make assets
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# 11) Upload assets to the draft release
|
||||
- name: Upload assets
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: make upload_assets VERSION=${GITHUB_REF#refs/tags/}
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# 12) Run tests
|
||||
- name: Run tests
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: make test
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,7 +1,6 @@
|
||||
_out
|
||||
.git
|
||||
.idea
|
||||
.vscode
|
||||
|
||||
# User-specific stuff
|
||||
.idea/**/workspace.xml
|
||||
@@ -76,4 +75,4 @@ fabric.properties
|
||||
.idea/caches/build_file_checksums.ser
|
||||
|
||||
.DS_Store
|
||||
**/.DS_Store
|
||||
**/.DS_Store
|
||||
@@ -18,7 +18,6 @@ repos:
|
||||
(cd "$dir" && make generate)
|
||||
fi
|
||||
done
|
||||
git diff --color=always | cat
|
||||
'
|
||||
language: script
|
||||
files: ^.*$
|
||||
|
||||
2
Makefile
2
Makefile
@@ -20,7 +20,6 @@ build: build-deps
|
||||
make -C packages/system/kubeovn image
|
||||
make -C packages/system/kubeovn-webhook image
|
||||
make -C packages/system/dashboard image
|
||||
make -C packages/system/metallb image
|
||||
make -C packages/system/kamaji image
|
||||
make -C packages/system/bucket image
|
||||
make -C packages/core/testing image
|
||||
@@ -48,6 +47,7 @@ assets:
|
||||
test:
|
||||
make -C packages/core/testing apply
|
||||
make -C packages/core/testing test
|
||||
#make -C packages/core/testing test-applications
|
||||
|
||||
generate:
|
||||
hack/update-codegen.sh
|
||||
|
||||
@@ -39,8 +39,6 @@ import (
|
||||
cozystackiov1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||
"github.com/cozystack/cozystack/internal/controller"
|
||||
"github.com/cozystack/cozystack/internal/telemetry"
|
||||
|
||||
helmv2 "github.com/fluxcd/helm-controller/api/v2"
|
||||
// +kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
@@ -53,7 +51,6 @@ func init() {
|
||||
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
|
||||
|
||||
utilruntime.Must(cozystackiov1alpha1.AddToScheme(scheme))
|
||||
utilruntime.Must(helmv2.AddToScheme(scheme))
|
||||
// +kubebuilder:scaffold:scheme
|
||||
}
|
||||
|
||||
@@ -185,14 +182,6 @@ func main() {
|
||||
if err = (&controller.WorkloadReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "WorkloadReconciler")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&controller.TenantHelmReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "Workload")
|
||||
os.Exit(1)
|
||||
|
||||
@@ -1,97 +0,0 @@
|
||||
This is the third release candidate for the upcoming Cozystack v0.31.0 release.
|
||||
The release notes show changes accumulated since the release of previous version, Cozystack v0.30.0.
|
||||
|
||||
Cozystack 0.31.0 further advances GPU support, monitoring, and all-around convenience features.
|
||||
|
||||
## New Features and Changes
|
||||
|
||||
* [kubernetes] Introduce GPU support for tenant Kubernetes clusters. (@kvaps in https://github.com/cozystack/cozystack/pull/834)
|
||||
* Add VerticalPodAutoscaler to a few more components:
|
||||
* [kubernetes] Kubernetes clusters in user tenants. (@klinch0 in https://github.com/cozystack/cozystack/pull/806)
|
||||
* [platform] Cozystack dashboard. (@klinch0 in https://github.com/cozystack/cozystack/pull/828)
|
||||
* [platform] Cozystack etcd-operator (@klinch0 in https://github.com/cozystack/cozystack/pull/850)
|
||||
* Introduce support for cross-architecture builds and Cozystack on ARM:
|
||||
* [build] Refactor Makefiles introducing build variables. (@nbykov0 in https://github.com/cozystack/cozystack/pull/907)
|
||||
* [build] Add support for multi-architecture and cross-platform image builds. (@nbykov0 in https://github.com/cozystack/cozystack/pull/932 and https://github.com/cozystack/cozystack/pull/970)
|
||||
* [platform] Introduce a new controller to synchronize tenant HelmReleases and propagate configuration changes. (@klinch0 in https://github.com/cozystack/cozystack/pull/870)
|
||||
* [platform] Introduce options `expose-services`, `expose-ingress` and `expose-external-ips` to the ingress service. (@kvaps in https://github.com/cozystack/cozystack/pull/929)
|
||||
* [kubevirt] Enable exporting VMs. (@kvaps in https://github.com/cozystack/cozystack/pull/808)
|
||||
* [kubevirt] Make KubeVirt's CPU allocation ratio configurable. (@lllamnyp in https://github.com/cozystack/cozystack/pull/905)
|
||||
* [virtual-machine] Add support for various storages. (@kvaps in https://github.com/cozystack/cozystack/pull/974)
|
||||
* [cozystack-controller] Record the IP address pool and storage class in Workload objects. (@lllamnyp in https://github.com/cozystack/cozystack/pull/831)
|
||||
* [cilium] Enable Cilium Gateway API. (@zdenekjanda in https://github.com/cozystack/cozystack/pull/924)
|
||||
* [cilium] Enable user-added parameters in a tenant cluster Cilium. (@lllamnyp in https://github.com/cozystack/cozystack/pull/917)
|
||||
* [apps] Remove user-facing config of limits and requests. (@lllamnyp in https://github.com/cozystack/cozystack/pull/935)
|
||||
* Update the Cozystack release policy to include long-lived release branches and start with release candidates. Update CI workflows and docs accordingly.
|
||||
* Use release branches `release-X.Y` for gathering and releasing fixes after initial `vX.Y.0` release. (@kvaps in https://github.com/cozystack/cozystack/pull/816)
|
||||
* Automatically create release branches after initial `vX.Y.0` release is published. (@kvaps in https://github.com/cozystack/cozystack/pull/886)
|
||||
* Introduce Release Candidate versions. Automate patch backporting by applying patches from pull requests labeled `[backport]` to the current release branch. (@kvaps in https://github.com/cozystack/cozystack/pull/841 and https://github.com/cozystack/cozystack/pull/901, @nickvolynkin in https://github.com/cozystack/cozystack/pull/890)
|
||||
* Support alpha and beta pre-releases. (@kvaps in https://github.com/cozystack/cozystack/pull/978)
|
||||
* Commit changes in release pipelines under `github-actions <github-actions@github.com>`. (@kvaps in https://github.com/cozystack/cozystack/pull/823)
|
||||
* Describe the Cozystack release workflow. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/817 and https://github.com/cozystack/cozystack/pull/897)
|
||||
|
||||
## Fixes
|
||||
|
||||
* [virtual-machine] Add GPU names to the virtual machine specifications. (@kvaps in https://github.com/cozystack/cozystack/pull/862)
|
||||
* [virtual-machine] Count Workload resources for pods by requests, not limits. Other improvements to VM resource tracking. (@lllamnyp in https://github.com/cozystack/cozystack/pull/904)
|
||||
* [platform] Fix installing HelmReleases on initial setup. (@kvaps in https://github.com/cozystack/cozystack/pull/833)
|
||||
* [platform] Migration scripts update Kubernetes ConfigMap with the current stack version for improved version tracking. (@klinch0 in https://github.com/cozystack/cozystack/pull/840)
|
||||
* [platform] Reduce requested CPU and RAM for the `kamaji` provider. (@klinch0 in https://github.com/cozystack/cozystack/pull/825)
|
||||
* [platform] Improve the reconciliation loop for the Cozystack system HelmReleases logic. (@klinch0 in https://github.com/cozystack/cozystack/pull/809 and https://github.com/cozystack/cozystack/pull/810, @kvaps in https://github.com/cozystack/cozystack/pull/811)
|
||||
* [platform] Remove extra dependencies for the Piraeus operator. (@klinch0 in https://github.com/cozystack/cozystack/pull/856)
|
||||
* [platform] Refactor dashboard values. (@kvaps in https://github.com/cozystack/cozystack/pull/928, patched by @llamnyp in https://github.com/cozystack/cozystack/pull/952)
|
||||
* [platform] Make FluxCD artifact disabled by default. (@klinch0 in https://github.com/cozystack/cozystack/pull/964)
|
||||
* [kubernetes] Update garbage collection of HelmReleases in tenant Kubernetes clusters. (@kvaps in https://github.com/cozystack/cozystack/pull/835)
|
||||
* [kubernetes] Fix merging `valuesOverride` for tenant clusters. (@kvaps in https://github.com/cozystack/cozystack/pull/879)
|
||||
* [kubernetes] Fix `ubuntu-container-disk` tag. (@kvaps in https://github.com/cozystack/cozystack/pull/887)
|
||||
* [kubernetes] Refactor Helm manifests for tenant Kubernetes clusters. (@kvaps in https://github.com/cozystack/cozystack/pull/866)
|
||||
* [kubernetes] Fix Ingress-NGINX depends on Cert-Manager . (@kvaps in https://github.com/cozystack/cozystack/pull/976)
|
||||
* [tenant] Fix an issue with accessing external IPs of a cluster from the cluster itself. (@kvaps in https://github.com/cozystack/cozystack/pull/854)
|
||||
* [cluster-api] Remove the no longer necessary workaround for Kamaji. (@kvaps in https://github.com/cozystack/cozystack/pull/867, patched in https://github.com/cozystack/cozystack/pull/956)
|
||||
* [monitoring] Remove legacy label "POD" from the exclude filter in metrics. (@xy2 in https://github.com/cozystack/cozystack/pull/826)
|
||||
* [monitoring] Refactor management etcd monitoring config. Introduce a migration script for updating monitoring resources (`kube-rbac-proxy` daemonset). (@lllamnyp in https://github.com/cozystack/cozystack/pull/799 and https://github.com/cozystack/cozystack/pull/830)
|
||||
* [monitoring] Fix VerticalPodAutoscaler resource allocation for VMagent. (@klinch0 in https://github.com/cozystack/cozystack/pull/820)
|
||||
* [postgres] Remove duplicated `template` entry from backup manifest. (@etoshutka in https://github.com/cozystack/cozystack/pull/872)
|
||||
* [kube-ovn] Fix versions mapping in Makefile. (@kvaps in https://github.com/cozystack/cozystack/pull/883)
|
||||
* [dx] Automatically detect version for migrations in the installer.sh. (@kvaps in https://github.com/cozystack/cozystack/pull/837)
|
||||
* [e2e] Increase timeout durations for `capi` and `keycloak` to improve reliability during environment setup. (@kvaps in https://github.com/cozystack/cozystack/pull/858)
|
||||
* [e2e] Fix `device_ownership_from_security_context` CRI. (@dtrdnk in https://github.com/cozystack/cozystack/pull/896)
|
||||
* [e2e] Return `genisoimage` to the e2e-test Dockerfile (@gwynbleidd2106 in https://github.com/cozystack/cozystack/pull/962)
|
||||
* [ci] Improve the check for `versions_map` running on pull requests. (@kvaps and @klinch0 in https://github.com/cozystack/cozystack/pull/836, https://github.com/cozystack/cozystack/pull/842, and https://github.com/cozystack/cozystack/pull/845)
|
||||
* [ci] If the release step was skipped on a tag, skip tests as well. (@kvaps in https://github.com/cozystack/cozystack/pull/822)
|
||||
* [ci] Allow CI to cancel the previous job if a new one is scheduled. (@kvaps in https://github.com/cozystack/cozystack/pull/873)
|
||||
* [ci] Use the correct version name when uploading build assets to the release page. (@kvaps in https://github.com/cozystack/cozystack/pull/876)
|
||||
* [ci] Stop using `ok-to-test` label to trigger CI in pull requests. (@kvaps in https://github.com/cozystack/cozystack/pull/875)
|
||||
* [ci] Do not run tests in the release building pipeline. (@kvaps in https://github.com/cozystack/cozystack/pull/882)
|
||||
* [ci] Fix release branch creation. (@kvaps in https://github.com/cozystack/cozystack/pull/884)
|
||||
* [ci, dx] Reduce noise in the test logs by suppressing the `wget` progress bar. (@lllamnyp in https://github.com/cozystack/cozystack/pull/865)
|
||||
* [ci] Revert "automatically trigger tests in releasing PR". (@kvaps in https://github.com/cozystack/cozystack/pull/900)
|
||||
* [ci] Force-update release branch on tagged main commits . (@kvaps in https://github.com/cozystack/cozystack/pull/977)
|
||||
* [docs] Explain that tenants cannot have dashes in the names. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/980)
|
||||
|
||||
## Dependencies
|
||||
|
||||
* MetalLB s now included directly as a patched image based on version 0.14.9. (@lllamnyp in https://github.com/cozystack/cozystack/pull/945)
|
||||
* Update Kubernetes to v1.32.4. (@kvaps in https://github.com/cozystack/cozystack/pull/949)
|
||||
* Update Talos Linux to v1.10.1. (@kvaps in https://github.com/cozystack/cozystack/pull/931)
|
||||
* Update Cilium to v1.17.3. (@kvaps in https://github.com/cozystack/cozystack/pull/848)
|
||||
* Update LINSTOR to v1.31.0. (@kvaps in https://github.com/cozystack/cozystack/pull/846)
|
||||
* Update Kube-OVN to v1.13.11. (@kvaps in https://github.com/cozystack/cozystack/pull/847, @lllamnyp in https://github.com/cozystack/cozystack/pull/922)
|
||||
* Update tenant Kubernetes to v1.32. (@kvaps in https://github.com/cozystack/cozystack/pull/871)
|
||||
* Update flux-operator to 0.20.0. (@kingdonb in https://github.com/cozystack/cozystack/pull/880 and https://github.com/cozystack/cozystack/pull/934)
|
||||
* Update multiple Cluster API components. (@kvaps in https://github.com/cozystack/cozystack/pull/867 and https://github.com/cozystack/cozystack/pull/947)
|
||||
* Update KamajiControlPlane to edge-25.4.1. (@kvaps in https://github.com/cozystack/cozystack/pull/953, fixed by @nbykov0 in https://github.com/cozystack/cozystack/pull/983)
|
||||
* Update cert-manager to v1.17.2. (@kvaps in https://github.com/cozystack/cozystack/pull/975)
|
||||
|
||||
## Maintenance
|
||||
|
||||
* Add @klinch0 to CODEOWNERS. (@kvaps in https://github.com/cozystack/cozystack/pull/838)
|
||||
|
||||
## New Contributors
|
||||
|
||||
* @etoshutka made their first contribution in https://github.com/cozystack/cozystack/pull/872
|
||||
* @dtrdnk made their first contribution in https://github.com/cozystack/cozystack/pull/896
|
||||
* @zdenekjanda made their first contribution in https://github.com/cozystack/cozystack/pull/924
|
||||
* @gwynbleidd2106 made their first contribution in https://github.com/cozystack/cozystack/pull/962
|
||||
|
||||
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.30.0...v0.31.0-rc.3
|
||||
@@ -1,37 +1,10 @@
|
||||
# Release Workflow
|
||||
|
||||
This document describes Cozystack’s release process.
|
||||
|
||||
## Introduction
|
||||
|
||||
Cozystack uses a staged release process to ensure stability and flexibility during development.
|
||||
|
||||
There are three types of releases:
|
||||
|
||||
- **Release Candidates (RC)** – Preview versions (e.g., `v0.42.0-rc.1`) used for final testing and validation.
|
||||
- **Regular Releases** – Final versions (e.g., `v0.42.0`) that are feature-complete and thoroughly tested.
|
||||
- **Patch Releases** – Bugfix-only updates (e.g., `v0.42.1`) made after a stable release, based on a dedicated release branch.
|
||||
|
||||
Each type plays a distinct role in delivering reliable and tested updates while allowing ongoing development to continue smoothly.
|
||||
|
||||
## Release Candidates
|
||||
|
||||
Release candidates are Cozystack versions that introduce new features and are published before a stable release.
|
||||
Their purpose is to help validate stability before finalizing a new feature release.
|
||||
They allow for final rounds of testing and bug fixes without freezing development.
|
||||
|
||||
Release candidates are given numbers `vX.Y.0-rc.N`, for example, `v0.42.0-rc.1`.
|
||||
They are created directly in the `main` branch.
|
||||
An RC is typically tagged when all major features for the upcoming release have been merged into main and the release enters its testing phase.
|
||||
However, new features and changes can still be added before the regular release `vX.Y.0`.
|
||||
|
||||
Each RC contributes to a cumulative set of release notes that will be finalized when `vX.Y.0` is released.
|
||||
After testing, if no critical issues remain, the regular release (`vX.Y.0`) is tagged from the last RC or a later commit in main.
|
||||
This begins the regular release process, creates a dedicated `release-X.Y` branch, and opens the way for patch releases.
|
||||
This section explains how Cozystack builds and releases are made.
|
||||
|
||||
## Regular Releases
|
||||
|
||||
When making a regular release, we tag the latest RC or a subsequent minimal-change commit as `vX.Y.0`.
|
||||
When making regular releases, we take a commit in `main` and decide to make it a release `x.y.0`.
|
||||
In this explanation, we'll use version `v0.42.0` as an example:
|
||||
|
||||
```mermaid
|
||||
|
||||
117
hack/cozytest.sh
117
hack/cozytest.sh
@@ -1,117 +0,0 @@
|
||||
#!/bin/sh
|
||||
###############################################################################
|
||||
# cozytest.sh - Bats-compatible test runner with live trace and enhanced #
|
||||
# output, written in pure shell #
|
||||
###############################################################################
|
||||
set -eu
|
||||
|
||||
TEST_FILE=${1:?Usage: ./cozytest.sh <file.bats> [pattern]}
|
||||
PATTERN=${2:-*}
|
||||
LINE='----------------------------------------------------------------'
|
||||
|
||||
cols() { stty size 2>/dev/null | awk '{print $2}' || echo 80; }
|
||||
MAXW=$(( $(cols) - 12 )); [ "$MAXW" -lt 40 ] && MAXW=70
|
||||
BEGIN=$(date +%s)
|
||||
timestamp() { s=$(( $(date +%s) - BEGIN )); printf '[%02d:%02d]' $((s/60)) $((s%60)); }
|
||||
|
||||
###############################################################################
|
||||
# run_one <fn> <title> #
|
||||
###############################################################################
|
||||
run_one() {
|
||||
fn=$1 title=$2
|
||||
tmp=$(mktemp -d) || { echo "Failed to create temp directory" >&2; exit 1; }
|
||||
log="$tmp/log"
|
||||
|
||||
echo "╭ » Run test: $title"
|
||||
START=$(date +%s)
|
||||
skip_next="+ $fn" # первую строку трассировки с именем функции пропустим
|
||||
|
||||
{
|
||||
(
|
||||
PS4='+ ' # prefix for set -x
|
||||
set -eu -x # strict + trace
|
||||
"$fn"
|
||||
)
|
||||
printf '__RC__%s\n' "$?"
|
||||
} 2>&1 | tee "$log" | while IFS= read -r line; do
|
||||
case "$line" in
|
||||
'__RC__'*) : ;;
|
||||
'+ '*) cmd=${line#'+ '}
|
||||
[ "$cmd" = "${skip_next#+ }" ] && continue
|
||||
case "$cmd" in
|
||||
'set -e'|'set -x'|'set -u'|'return 0') continue ;;
|
||||
esac
|
||||
out=$cmd ;;
|
||||
*) out=$line ;;
|
||||
esac
|
||||
now=$(( $(date +%s) - START ))
|
||||
[ ${#out} -gt "$MAXW" ] && out="$(printf '%.*s…' "$MAXW" "$out")"
|
||||
printf '┊[%02d:%02d] %s\n' $((now/60)) $((now%60)) "$out"
|
||||
done
|
||||
|
||||
rc=$(awk '/^__RC__/ {print substr($0,7)}' "$log" | tail -n1)
|
||||
[ -z "$rc" ] && rc=1
|
||||
now=$(( $(date +%s) - START ))
|
||||
|
||||
if [ "$rc" -eq 0 ]; then
|
||||
printf '╰[%02d:%02d] ✅ Test OK: %s\n' $((now/60)) $((now%60)) "$title"
|
||||
else
|
||||
printf '╰[%02d:%02d] ❌ Test failed: %s (exit %s)\n' \
|
||||
$((now/60)) $((now%60)) "$title" "$rc"
|
||||
echo "----- captured output -----------------------------------------"
|
||||
grep -v '^__RC__' "$log"
|
||||
echo "$LINE"
|
||||
exit "$rc"
|
||||
fi
|
||||
|
||||
rm -rf "$tmp"
|
||||
}
|
||||
|
||||
###############################################################################
|
||||
# convert .bats -> shell-functions #
|
||||
###############################################################################
|
||||
TMP_SH=$(mktemp) || { echo "Failed to create temp file" >&2; exit 1; }
|
||||
trap 'rm -f "$TMP_SH"' EXIT
|
||||
awk '
|
||||
/^@test[[:space:]]+"/ {
|
||||
line = substr($0, index($0, "\"") + 1)
|
||||
title = substr(line, 1, index(line, "\"") - 1)
|
||||
fname = "test_"
|
||||
for (i = 1; i <= length(title); i++) {
|
||||
c = substr(title, i, 1)
|
||||
fname = fname (c ~ /[A-Za-z0-9]/ ? c : "_")
|
||||
}
|
||||
printf("### %s\n", title)
|
||||
printf("%s() {\n", fname)
|
||||
print " set -e" # ошибка → падение теста
|
||||
next
|
||||
}
|
||||
/^}$/ {
|
||||
print " return 0" # если автор не сделал exit 1 — тест ОК
|
||||
print "}"
|
||||
next
|
||||
}
|
||||
{ print }
|
||||
' "$TEST_FILE" > "$TMP_SH"
|
||||
|
||||
[ -f "$TMP_SH" ] || { echo "Failed to generate test functions" >&2; exit 1; }
|
||||
# shellcheck disable=SC1090
|
||||
. "$TMP_SH"
|
||||
|
||||
###############################################################################
|
||||
# run selected tests #
|
||||
###############################################################################
|
||||
awk -v pat="$PATTERN" '
|
||||
/^### / {
|
||||
title = substr($0, 5)
|
||||
name = "test_"
|
||||
for (i = 1; i <= length(title); i++) {
|
||||
c = substr(title, i, 1)
|
||||
name = name (c ~ /[A-Za-z0-9]/ ? c : "_")
|
||||
}
|
||||
if (pat == "*" || index(title, pat) > 0)
|
||||
printf("%s %s\n", name, title)
|
||||
}
|
||||
' "$TMP_SH" | while IFS=' ' read -r fn title; do
|
||||
run_one "$fn" "$title"
|
||||
done
|
||||
@@ -1,94 +0,0 @@
|
||||
#!/usr/bin/env bats
|
||||
# -----------------------------------------------------------------------------
|
||||
# Cozystack end‑to‑end provisioning test (Bats)
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
@test "Create tenant with isolated mode enabled" {
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: test
|
||||
namespace: tenant-root
|
||||
spec:
|
||||
etcd: false
|
||||
host: ""
|
||||
ingress: false
|
||||
isolated: true
|
||||
monitoring: false
|
||||
resourceQuotas: {}
|
||||
seaweedfs: false
|
||||
EOF
|
||||
kubectl wait hr/tenant-test -n tenant-root --timeout=1m --for=condition=ready
|
||||
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
|
||||
}
|
||||
|
||||
@test "Create a tenant Kubernetes control plane" {
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Kubernetes
|
||||
metadata:
|
||||
name: test
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
addons:
|
||||
certManager:
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
cilium:
|
||||
valuesOverride: {}
|
||||
fluxcd:
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
gatewayAPI:
|
||||
enabled: false
|
||||
gpuOperator:
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
ingressNginx:
|
||||
enabled: true
|
||||
hosts: []
|
||||
valuesOverride: {}
|
||||
monitoringAgents:
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
verticalPodAutoscaler:
|
||||
valuesOverride: {}
|
||||
controlPlane:
|
||||
apiServer:
|
||||
resources: {}
|
||||
resourcesPreset: small
|
||||
controllerManager:
|
||||
resources: {}
|
||||
resourcesPreset: micro
|
||||
konnectivity:
|
||||
server:
|
||||
resources: {}
|
||||
resourcesPreset: micro
|
||||
replicas: 2
|
||||
scheduler:
|
||||
resources: {}
|
||||
resourcesPreset: micro
|
||||
host: ""
|
||||
nodeGroups:
|
||||
md0:
|
||||
ephemeralStorage: 20Gi
|
||||
gpus: []
|
||||
instanceType: u1.medium
|
||||
maxReplicas: 10
|
||||
minReplicas: 0
|
||||
resources:
|
||||
cpu: ""
|
||||
memory: ""
|
||||
roles:
|
||||
- ingress-nginx
|
||||
storageClass: replicated
|
||||
EOF
|
||||
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
|
||||
timeout 10 sh -ec 'until kubectl get kamajicontrolplane -n tenant-test kubernetes-test; do sleep 1; done'
|
||||
kubectl wait --for=condition=TenantControlPlaneCreated kamajicontrolplane -n tenant-test kubernetes-test --timeout=4m
|
||||
kubectl wait tcp -n tenant-test kubernetes-test --timeout=2m --for=jsonpath='{.status.kubernetesResources.version.status}'=Ready
|
||||
kubectl wait deploy --timeout=4m --for=condition=available -n tenant-test kubernetes-test kubernetes-test-cluster-autoscaler kubernetes-test-kccm kubernetes-test-kcsi-controller
|
||||
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=1m --for=jsonpath='{.status.replicas}'=2
|
||||
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=5m --for=jsonpath='{.status.v1beta2.readyReplicas}'=2
|
||||
}
|
||||
@@ -1,387 +0,0 @@
|
||||
#!/usr/bin/env bats
|
||||
# -----------------------------------------------------------------------------
|
||||
# Cozystack end‑to‑end provisioning test (Bats)
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
@test "Environment variable COZYSTACK_INSTALLER_YAML is defined" {
|
||||
if [ -z "${COZYSTACK_INSTALLER_YAML:-}" ]; then
|
||||
echo 'COZYSTACK_INSTALLER_YAML environment variable is not set!' >&2
|
||||
echo >&2
|
||||
echo 'Please export it with the following command:' >&2
|
||||
echo ' export COZYSTACK_INSTALLER_YAML=$(helm template -n cozy-system installer packages/core/installer)' >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
@test "IPv4 forwarding is enabled" {
|
||||
if [ "$(cat /proc/sys/net/ipv4/ip_forward)" != 1 ]; then
|
||||
echo "IPv4 forwarding is disabled!" >&2
|
||||
echo >&2
|
||||
echo "Enable it with:" >&2
|
||||
echo " echo 1 > /proc/sys/net/ipv4/ip_forward" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
@test "Clean previous VMs" {
|
||||
kill $(cat srv1/qemu.pid srv2/qemu.pid srv3/qemu.pid 2>/dev/null) 2>/dev/null || true
|
||||
rm -rf srv1 srv2 srv3
|
||||
}
|
||||
|
||||
@test "Prepare networking and masquerading" {
|
||||
ip link del cozy-br0 2>/dev/null || true
|
||||
ip link add cozy-br0 type bridge
|
||||
ip link set cozy-br0 up
|
||||
ip address add 192.168.123.1/24 dev cozy-br0
|
||||
|
||||
# Masquerading rule – idempotent (delete first, then add)
|
||||
iptables -t nat -D POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE 2>/dev/null || true
|
||||
iptables -t nat -A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
|
||||
}
|
||||
|
||||
@test "Prepare cloud‑init drive for VMs" {
|
||||
mkdir -p srv1 srv2 srv3
|
||||
|
||||
# Generate cloud‑init ISOs
|
||||
for i in 1 2 3; do
|
||||
echo "hostname: srv${i}" > "srv${i}/meta-data"
|
||||
|
||||
cat > "srv${i}/user-data" <<'EOF'
|
||||
#cloud-config
|
||||
EOF
|
||||
|
||||
cat > "srv${i}/network-config" <<EOF
|
||||
version: 2
|
||||
ethernets:
|
||||
eth0:
|
||||
dhcp4: false
|
||||
addresses:
|
||||
- "192.168.123.1${i}/26"
|
||||
gateway4: "192.168.123.1"
|
||||
nameservers:
|
||||
search: [cluster.local]
|
||||
addresses: [8.8.8.8]
|
||||
EOF
|
||||
|
||||
( cd "srv${i}" && genisoimage \
|
||||
-output seed.img \
|
||||
-volid cidata -rational-rock -joliet \
|
||||
user-data meta-data network-config )
|
||||
done
|
||||
}
|
||||
|
||||
@test "Download Talos NoCloud image" {
|
||||
if [ ! -f nocloud-amd64.raw ]; then
|
||||
wget https://github.com/cozystack/cozystack/releases/latest/download/nocloud-amd64.raw.xz \
|
||||
-O nocloud-amd64.raw.xz --show-progress --output-file /dev/stdout --progress=dot:giga 2>/dev/null
|
||||
rm -f nocloud-amd64.raw
|
||||
xz --decompress nocloud-amd64.raw.xz
|
||||
fi
|
||||
}
|
||||
|
||||
@test "Prepare VM disks" {
|
||||
for i in 1 2 3; do
|
||||
cp nocloud-amd64.raw srv${i}/system.img
|
||||
qemu-img resize srv${i}/system.img 20G
|
||||
qemu-img create srv${i}/data.img 100G
|
||||
done
|
||||
}
|
||||
|
||||
@test "Create tap devices" {
|
||||
for i in 1 2 3; do
|
||||
ip link del cozy-srv${i} 2>/dev/null || true
|
||||
ip tuntap add dev cozy-srv${i} mode tap
|
||||
ip link set cozy-srv${i} up
|
||||
ip link set cozy-srv${i} master cozy-br0
|
||||
done
|
||||
}
|
||||
|
||||
@test "Boot QEMU VMs" {
|
||||
for i in 1 2 3; do
|
||||
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 8 -m 16384 \
|
||||
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5${i} \
|
||||
-netdev tap,id=net0,ifname=cozy-srv${i},script=no,downscript=no \
|
||||
-drive file=srv${i}/system.img,if=virtio,format=raw \
|
||||
-drive file=srv${i}/seed.img,if=virtio,format=raw \
|
||||
-drive file=srv${i}/data.img,if=virtio,format=raw \
|
||||
-display none -daemonize -pidfile srv${i}/qemu.pid
|
||||
done
|
||||
|
||||
# Give qemu a few seconds to start up networking
|
||||
sleep 5
|
||||
}
|
||||
|
||||
@test "Wait until Talos API port 50000 is reachable on all machines" {
|
||||
timeout 60 sh -ec 'until nc -nz 192.168.123.11 50000 && nc -nz 192.168.123.12 50000 && nc -nz 192.168.123.13 50000; do sleep 1; done'
|
||||
}
|
||||
|
||||
@test "Generate Talos cluster configuration" {
|
||||
# Cluster‑wide patches
|
||||
cat > patch.yaml <<'EOF'
|
||||
machine:
|
||||
kubelet:
|
||||
nodeIP:
|
||||
validSubnets:
|
||||
- 192.168.123.0/24
|
||||
extraConfig:
|
||||
maxPods: 512
|
||||
kernel:
|
||||
modules:
|
||||
- name: openvswitch
|
||||
- name: drbd
|
||||
parameters:
|
||||
- usermode_helper=disabled
|
||||
- name: zfs
|
||||
- name: spl
|
||||
registries:
|
||||
mirrors:
|
||||
docker.io:
|
||||
endpoints:
|
||||
- https://mirror.gcr.io
|
||||
files:
|
||||
- content: |
|
||||
[plugins]
|
||||
[plugins."io.containerd.cri.v1.runtime"]
|
||||
device_ownership_from_security_context = true
|
||||
path: /etc/cri/conf.d/20-customization.part
|
||||
op: create
|
||||
|
||||
cluster:
|
||||
apiServer:
|
||||
extraArgs:
|
||||
oidc-issuer-url: "https://keycloak.example.org/realms/cozy"
|
||||
oidc-client-id: "kubernetes"
|
||||
oidc-username-claim: "preferred_username"
|
||||
oidc-groups-claim: "groups"
|
||||
network:
|
||||
cni:
|
||||
name: none
|
||||
dnsDomain: cozy.local
|
||||
podSubnets:
|
||||
- 10.244.0.0/16
|
||||
serviceSubnets:
|
||||
- 10.96.0.0/16
|
||||
EOF
|
||||
|
||||
# Control‑plane‑only patches
|
||||
cat > patch-controlplane.yaml <<'EOF'
|
||||
machine:
|
||||
nodeLabels:
|
||||
node.kubernetes.io/exclude-from-external-load-balancers:
|
||||
$patch: delete
|
||||
network:
|
||||
interfaces:
|
||||
- interface: eth0
|
||||
vip:
|
||||
ip: 192.168.123.10
|
||||
cluster:
|
||||
allowSchedulingOnControlPlanes: true
|
||||
controllerManager:
|
||||
extraArgs:
|
||||
bind-address: 0.0.0.0
|
||||
scheduler:
|
||||
extraArgs:
|
||||
bind-address: 0.0.0.0
|
||||
apiServer:
|
||||
certSANs:
|
||||
- 127.0.0.1
|
||||
proxy:
|
||||
disabled: true
|
||||
discovery:
|
||||
enabled: false
|
||||
etcd:
|
||||
advertisedSubnets:
|
||||
- 192.168.123.0/24
|
||||
EOF
|
||||
|
||||
# Generate secrets once
|
||||
if [ ! -f secrets.yaml ]; then
|
||||
talosctl gen secrets
|
||||
fi
|
||||
|
||||
rm -f controlplane.yaml worker.yaml talosconfig kubeconfig
|
||||
talosctl gen config --with-secrets secrets.yaml cozystack https://192.168.123.10:6443 \
|
||||
--config-patch=@patch.yaml --config-patch-control-plane @patch-controlplane.yaml
|
||||
}
|
||||
|
||||
@test "Apply Talos configuration to the node" {
|
||||
# Apply the configuration to all three nodes
|
||||
for node in 11 12 13; do
|
||||
talosctl apply -f controlplane.yaml -n 192.168.123.${node} -e 192.168.123.${node} -i
|
||||
done
|
||||
|
||||
# Wait for Talos services to come up again
|
||||
timeout 60 sh -ec 'until nc -nz 192.168.123.11 50000 && nc -nz 192.168.123.12 50000 && nc -nz 192.168.123.13 50000; do sleep 1; done'
|
||||
}
|
||||
|
||||
@test "Bootstrap Talos cluster" {
|
||||
# Bootstrap etcd on the first node
|
||||
timeout 10 sh -ec 'until talosctl bootstrap -n 192.168.123.11 -e 192.168.123.11; do sleep 1; done'
|
||||
|
||||
# Wait until etcd is healthy
|
||||
timeout 180 sh -ec 'until talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 >/dev/null 2>&1; do sleep 1; done'
|
||||
timeout 60 sh -ec 'while talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1 | grep -q "rpc error"; do sleep 1; done'
|
||||
|
||||
# Retrieve kubeconfig
|
||||
rm -f kubeconfig
|
||||
talosctl kubeconfig kubeconfig -e 192.168.123.10 -n 192.168.123.10
|
||||
|
||||
# Wait until all three nodes register in Kubernetes
|
||||
timeout 60 sh -ec 'until [ $(kubectl get node --no-headers | wc -l) -eq 3 ]; do sleep 1; done'
|
||||
}
|
||||
|
||||
@test "Install Cozystack" {
|
||||
# Create namespace & configmap required by installer
|
||||
kubectl create namespace cozy-system --dry-run=client -o yaml | kubectl apply -f -
|
||||
kubectl create configmap cozystack -n cozy-system \
|
||||
--from-literal=bundle-name=paas-full \
|
||||
--from-literal=ipv4-pod-cidr=10.244.0.0/16 \
|
||||
--from-literal=ipv4-pod-gateway=10.244.0.1 \
|
||||
--from-literal=ipv4-svc-cidr=10.96.0.0/16 \
|
||||
--from-literal=ipv4-join-cidr=100.64.0.0/16 \
|
||||
--from-literal=root-host=example.org \
|
||||
--from-literal=api-server-endpoint=https://192.168.123.10:6443 \
|
||||
--dry-run=client -o yaml | kubectl apply -f -
|
||||
|
||||
# Apply installer manifests from env variable
|
||||
echo "$COZYSTACK_INSTALLER_YAML" | kubectl apply -f -
|
||||
|
||||
# Wait for the installer deployment to become available
|
||||
kubectl wait deployment/cozystack -n cozy-system --timeout=1m --for=condition=Available
|
||||
|
||||
# Wait until HelmReleases appear & reconcile them
|
||||
timeout 60 sh -ec 'until kubectl get hr -A | grep -q cozys; do sleep 1; done'
|
||||
sleep 5
|
||||
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n "$1" hr/"$2" &"} END {print "wait"}' | sh -ex
|
||||
|
||||
# Fail the test if any HelmRelease is not Ready
|
||||
if kubectl get hr -A | grep -v " True " | grep -v NAME; then
|
||||
kubectl get hr -A
|
||||
fail "Some HelmReleases failed to reconcile"
|
||||
fi
|
||||
}
|
||||
|
||||
@test "Wait for Cluster‑API provider deployments" {
|
||||
# Wait for Cluster‑API provider deployments
|
||||
timeout 60 sh -ec 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager >/dev/null 2>&1; do sleep 1; done'
|
||||
kubectl wait deployment/capi-controller-manager deployment/capi-kamaji-controller-manager deployment/capi-kubeadm-bootstrap-controller-manager deployment/capi-operator-cluster-api-operator deployment/capk-controller-manager -n cozy-cluster-api --timeout=1m --for=condition=available
|
||||
}
|
||||
|
||||
@test "Wait for LINSTOR and configure storage" {
|
||||
# Linstor controller and nodes
|
||||
kubectl wait deployment/linstor-controller -n cozy-linstor --timeout=5m --for=condition=available
|
||||
timeout 60 sh -ec 'until [ $(kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor node list | grep -c Online) -eq 3 ]; do sleep 1; done'
|
||||
|
||||
for node in srv1 srv2 srv3; do
|
||||
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs ${node} /dev/vdc --pool-name data --storage-pool data
|
||||
done
|
||||
|
||||
# Storage classes
|
||||
kubectl apply -f - <<'EOF'
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: local
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
provisioner: linstor.csi.linbit.com
|
||||
parameters:
|
||||
linstor.csi.linbit.com/storagePool: "data"
|
||||
linstor.csi.linbit.com/layerList: "storage"
|
||||
linstor.csi.linbit.com/allowRemoteVolumeAccess: "false"
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
allowVolumeExpansion: true
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: replicated
|
||||
provisioner: linstor.csi.linbit.com
|
||||
parameters:
|
||||
linstor.csi.linbit.com/storagePool: "data"
|
||||
linstor.csi.linbit.com/autoPlace: "3"
|
||||
linstor.csi.linbit.com/layerList: "drbd storage"
|
||||
linstor.csi.linbit.com/allowRemoteVolumeAccess: "true"
|
||||
property.linstor.csi.linbit.com/DrbdOptions/auto-quorum: suspend-io
|
||||
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-no-data-accessible: suspend-io
|
||||
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-suspended-primary-outdated: force-secondary
|
||||
property.linstor.csi.linbit.com/DrbdOptions/Net/rr-conflict: retry-connect
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
allowVolumeExpansion: true
|
||||
EOF
|
||||
}
|
||||
|
||||
@test "Wait for MetalLB and configure address pool" {
|
||||
# MetalLB address pool
|
||||
kubectl apply -f - <<'EOF'
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: L2Advertisement
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-metallb
|
||||
spec:
|
||||
ipAddressPools: [cozystack]
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: IPAddressPool
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-metallb
|
||||
spec:
|
||||
addresses: [192.168.123.200-192.168.123.250]
|
||||
autoAssign: true
|
||||
avoidBuggyIPs: false
|
||||
EOF
|
||||
}
|
||||
|
||||
@test "Check Cozystack API service" {
|
||||
kubectl wait --for=condition=Available apiservices/v1alpha1.apps.cozystack.io --timeout=2m
|
||||
}
|
||||
|
||||
@test "Configure Tenant and wait for applications" {
|
||||
# Patch root tenant and wait for its releases
|
||||
kubectl patch tenants/root -n tenant-root --type merge -p '{"spec":{"host":"example.org","ingress":true,"monitoring":true,"etcd":true,"isolated":true}}'
|
||||
|
||||
timeout 60 sh -ec 'until kubectl get hr -n tenant-root etcd ingress monitoring tenant-root >/dev/null 2>&1; do sleep 1; done'
|
||||
kubectl wait hr/etcd hr/ingress hr/tenant-root -n tenant-root --timeout=2m --for=condition=ready
|
||||
|
||||
if ! kubectl wait hr/monitoring -n tenant-root --timeout=2m --for=condition=ready; then
|
||||
flux reconcile hr monitoring -n tenant-root --force
|
||||
kubectl wait hr/monitoring -n tenant-root --timeout=2m --for=condition=ready
|
||||
fi
|
||||
|
||||
# Expose Cozystack services through ingress
|
||||
kubectl patch configmap/cozystack -n cozy-system --type merge -p '{"data":{"expose-services":"api,dashboard,cdi-uploadproxy,vm-exportproxy,keycloak"}}'
|
||||
|
||||
# NGINX ingress controller
|
||||
timeout 60 sh -ec 'until kubectl get deploy root-ingress-controller -n tenant-root >/dev/null 2>&1; do sleep 1; done'
|
||||
kubectl wait deploy/root-ingress-controller -n tenant-root --timeout=5m --for=condition=available
|
||||
|
||||
# etcd statefulset
|
||||
kubectl wait sts/etcd -n tenant-root --for=jsonpath='{.status.readyReplicas}'=3 --timeout=5m
|
||||
|
||||
# VictoriaMetrics components
|
||||
kubectl wait vmalert/vmalert-shortterm vmalertmanager/alertmanager -n tenant-root --for=jsonpath='{.status.updateStatus}'=operational --timeout=5m
|
||||
kubectl wait vlogs/generic -n tenant-root --for=jsonpath='{.status.updateStatus}'=operational --timeout=5m
|
||||
kubectl wait vmcluster/shortterm vmcluster/longterm -n tenant-root --for=jsonpath='{.status.clusterStatus}'=operational --timeout=5m
|
||||
|
||||
# Grafana
|
||||
kubectl wait clusters.postgresql.cnpg.io/grafana-db -n tenant-root --for=condition=ready --timeout=5m
|
||||
kubectl wait deploy/grafana-deployment -n tenant-root --for=condition=available --timeout=5m
|
||||
|
||||
# Verify Grafana via ingress
|
||||
ingress_ip=$(kubectl get svc root-ingress-controller -n tenant-root -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
if ! curl -sS -k "https://${ingress_ip}" -H 'Host: grafana.example.org' --max-time 30 | grep -q Found; then
|
||||
echo "Failed to access Grafana via ingress at ${ingress_ip}" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
@test "Keycloak OIDC stack is healthy" {
|
||||
kubectl patch configmap/cozystack -n cozy-system --type merge -p '{"data":{"oidc-enabled":"true"}}'
|
||||
|
||||
timeout 120 sh -ec 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator >/dev/null 2>&1; do sleep 1; done'
|
||||
kubectl wait hr/keycloak hr/keycloak-configure hr/keycloak-operator -n cozy-keycloak --timeout=10m --for=condition=ready
|
||||
}
|
||||
165
hack/e2e.application.sh
Executable file
165
hack/e2e.application.sh
Executable file
@@ -0,0 +1,165 @@
|
||||
#!/bin/bash
|
||||
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
RESET='\033[0m'
|
||||
YELLOW='\033[0;33m'
|
||||
|
||||
|
||||
ROOT_NS="tenant-root"
|
||||
TEST_TENANT="tenant-e2e"
|
||||
|
||||
values_base_path="/hack/testdata/"
|
||||
checks_base_path="/hack/testdata/"
|
||||
|
||||
function delete_hr() {
|
||||
local release_name="$1"
|
||||
local namespace="$2"
|
||||
|
||||
if [[ -z "$release_name" ]]; then
|
||||
echo -e "${RED}Error: Release name is required.${RESET}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "$namespace" ]]; then
|
||||
echo -e "${RED}Error: Namespace name is required.${RESET}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$release_name" == "tenant-e2e" ]]; then
|
||||
echo -e "${YELLOW}Skipping deletion for release tenant-e2e.${RESET}"
|
||||
return 0
|
||||
fi
|
||||
|
||||
kubectl delete helmrelease $release_name -n $namespace
|
||||
}
|
||||
|
||||
function install_helmrelease() {
|
||||
local release_name="$1"
|
||||
local namespace="$2"
|
||||
local chart_path="$3"
|
||||
local repo_name="$4"
|
||||
local repo_ns="$5"
|
||||
local values_file="$6"
|
||||
|
||||
if [[ -z "$release_name" ]]; then
|
||||
echo -e "${RED}Error: Release name is required.${RESET}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "$namespace" ]]; then
|
||||
echo -e "${RED}Error: Namespace name is required.${RESET}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "$chart_path" ]]; then
|
||||
echo -e "${RED}Error: Chart path name is required.${RESET}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -n "$values_file" && -f "$values_file" ]]; then
|
||||
local values_section
|
||||
values_section=$(echo " values:" && sed 's/^/ /' "$values_file")
|
||||
fi
|
||||
|
||||
local helmrelease_file=$(mktemp /tmp/HelmRelease.XXXXXX.yaml)
|
||||
{
|
||||
echo "apiVersion: helm.toolkit.fluxcd.io/v2"
|
||||
echo "kind: HelmRelease"
|
||||
echo "metadata:"
|
||||
echo " labels:"
|
||||
echo " cozystack.io/ui: \"true\""
|
||||
echo " name: \"$release_name\""
|
||||
echo " namespace: \"$namespace\""
|
||||
echo "spec:"
|
||||
echo " chart:"
|
||||
echo " spec:"
|
||||
echo " chart: \"$chart_path\""
|
||||
echo " reconcileStrategy: Revision"
|
||||
echo " sourceRef:"
|
||||
echo " kind: HelmRepository"
|
||||
echo " name: \"$repo_name\""
|
||||
echo " namespace: \"$repo_ns\""
|
||||
echo " version: '*'"
|
||||
echo " interval: 1m0s"
|
||||
echo " timeout: 5m0s"
|
||||
[[ -n "$values_section" ]] && echo "$values_section"
|
||||
} > "$helmrelease_file"
|
||||
|
||||
kubectl apply -f "$helmrelease_file"
|
||||
|
||||
rm -f "$helmrelease_file"
|
||||
}
|
||||
|
||||
function install_tenant (){
|
||||
local release_name="$1"
|
||||
local namespace="$2"
|
||||
local values_file="${values_base_path}tenant/values.yaml"
|
||||
local repo_name="cozystack-apps"
|
||||
local repo_ns="cozy-public"
|
||||
install_helmrelease "$release_name" "$namespace" "tenant" "$repo_name" "$repo_ns" "$values_file"
|
||||
}
|
||||
|
||||
function make_extra_checks(){
|
||||
local checks_file="$1"
|
||||
echo "after exec make $checks_file"
|
||||
if [[ -n "$checks_file" && -f "$checks_file" ]]; then
|
||||
echo -e "${YELLOW}Start extra checks with file: ${checks_file}${RESET}"
|
||||
|
||||
fi
|
||||
}
|
||||
|
||||
function check_helmrelease_status() {
|
||||
local release_name="$1"
|
||||
local namespace="$2"
|
||||
local checks_file="$3"
|
||||
local timeout=300 # Timeout in seconds
|
||||
local interval=5 # Interval between checks in seconds
|
||||
local elapsed=0
|
||||
|
||||
|
||||
while [[ $elapsed -lt $timeout ]]; do
|
||||
local status_output
|
||||
status_output=$(kubectl get helmrelease "$release_name" -n "$namespace" -o json | jq -r '.status.conditions[-1].reason')
|
||||
|
||||
if [[ "$status_output" == "InstallSucceeded" || "$status_output" == "UpgradeSucceeded" ]]; then
|
||||
echo -e "${GREEN}Helm release '$release_name' is ready.${RESET}"
|
||||
make_extra_checks "$checks_file"
|
||||
delete_hr $release_name $namespace
|
||||
return 0
|
||||
elif [[ "$status_output" == "InstallFailed" ]]; then
|
||||
echo -e "${RED}Helm release '$release_name': InstallFailed${RESET}"
|
||||
exit 1
|
||||
else
|
||||
echo -e "${YELLOW}Helm release '$release_name' is not ready. Current status: $status_output${RESET}"
|
||||
fi
|
||||
|
||||
sleep "$interval"
|
||||
elapsed=$((elapsed + interval))
|
||||
done
|
||||
|
||||
echo -e "${RED}Timeout reached. Helm release '$release_name' is still not ready after $timeout seconds.${RESET}"
|
||||
exit 1
|
||||
}
|
||||
|
||||
chart_name="$1"
|
||||
|
||||
if [ -z "$chart_name" ]; then
|
||||
echo -e "${RED}No chart name provided. Exiting...${RESET}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
checks_file="${checks_base_path}${chart_name}/check.sh"
|
||||
repo_name="cozystack-apps"
|
||||
repo_ns="cozy-public"
|
||||
release_name="$chart_name-e2e"
|
||||
values_file="${values_base_path}${chart_name}/values.yaml"
|
||||
|
||||
install_tenant $TEST_TENANT $ROOT_NS
|
||||
check_helmrelease_status $TEST_TENANT $ROOT_NS "${checks_base_path}tenant/check.sh"
|
||||
|
||||
echo -e "${YELLOW}Running tests for chart: $chart_name${RESET}"
|
||||
|
||||
install_helmrelease $release_name $TEST_TENANT $chart_name $repo_name $repo_ns $values_file
|
||||
check_helmrelease_status $release_name $TEST_TENANT $checks_file
|
||||
361
hack/e2e.sh
Executable file
361
hack/e2e.sh
Executable file
@@ -0,0 +1,361 @@
|
||||
#!/bin/bash
|
||||
if [ "$COZYSTACK_INSTALLER_YAML" = "" ]; then
|
||||
echo 'COZYSTACK_INSTALLER_YAML variable is not set!' >&2
|
||||
echo 'please set it with following command:' >&2
|
||||
echo >&2
|
||||
echo 'export COZYSTACK_INSTALLER_YAML=$(helm template -n cozy-system installer packages/core/installer)' >&2
|
||||
echo >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$(cat /proc/sys/net/ipv4/ip_forward)" != 1 ]; then
|
||||
echo "IPv4 forwarding is not enabled!" >&2
|
||||
echo 'please enable forwarding with the following command:' >&2
|
||||
echo >&2
|
||||
echo 'echo 1 > /proc/sys/net/ipv4/ip_forward' >&2
|
||||
echo >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
set -x
|
||||
set -e
|
||||
|
||||
kill `cat srv1/qemu.pid srv2/qemu.pid srv3/qemu.pid` || true
|
||||
|
||||
ip link del cozy-br0 || true
|
||||
ip link add cozy-br0 type bridge
|
||||
ip link set cozy-br0 up
|
||||
ip addr add 192.168.123.1/24 dev cozy-br0
|
||||
|
||||
# Enable masquerading
|
||||
iptables -t nat -D POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE 2>/dev/null || true
|
||||
iptables -t nat -A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
|
||||
|
||||
rm -rf srv1 srv2 srv3
|
||||
mkdir -p srv1 srv2 srv3
|
||||
|
||||
# Prepare cloud-init
|
||||
for i in 1 2 3; do
|
||||
echo "hostname: srv$i" > "srv$i/meta-data"
|
||||
echo '#cloud-config' > "srv$i/user-data"
|
||||
cat > "srv$i/network-config" <<EOT
|
||||
version: 2
|
||||
ethernets:
|
||||
eth0:
|
||||
dhcp4: false
|
||||
addresses:
|
||||
- "192.168.123.1$i/26"
|
||||
gateway4: "192.168.123.1"
|
||||
nameservers:
|
||||
search: [cluster.local]
|
||||
addresses: [8.8.8.8]
|
||||
EOT
|
||||
|
||||
( cd srv$i && genisoimage \
|
||||
-output seed.img \
|
||||
-volid cidata -rational-rock -joliet \
|
||||
user-data meta-data network-config
|
||||
)
|
||||
done
|
||||
|
||||
# Prepare system drive
|
||||
if [ ! -f nocloud-amd64.raw ]; then
|
||||
wget https://github.com/cozystack/cozystack/releases/latest/download/nocloud-amd64.raw.xz -O nocloud-amd64.raw.xz
|
||||
rm -f nocloud-amd64.raw
|
||||
xz --decompress nocloud-amd64.raw.xz
|
||||
fi
|
||||
for i in 1 2 3; do
|
||||
cp nocloud-amd64.raw srv$i/system.img
|
||||
qemu-img resize srv$i/system.img 20G
|
||||
done
|
||||
|
||||
# Prepare data drives
|
||||
for i in 1 2 3; do
|
||||
qemu-img create srv$i/data.img 100G
|
||||
done
|
||||
|
||||
# Prepare networking
|
||||
for i in 1 2 3; do
|
||||
ip link del cozy-srv$i || true
|
||||
ip tuntap add dev cozy-srv$i mode tap
|
||||
ip link set cozy-srv$i up
|
||||
ip link set cozy-srv$i master cozy-br0
|
||||
done
|
||||
|
||||
# Start VMs
|
||||
for i in 1 2 3; do
|
||||
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 8 -m 16384 \
|
||||
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5$i -netdev tap,id=net0,ifname=cozy-srv$i,script=no,downscript=no \
|
||||
-drive file=srv$i/system.img,if=virtio,format=raw \
|
||||
-drive file=srv$i/seed.img,if=virtio,format=raw \
|
||||
-drive file=srv$i/data.img,if=virtio,format=raw \
|
||||
-display none -daemonize -pidfile srv$i/qemu.pid
|
||||
done
|
||||
|
||||
sleep 5
|
||||
|
||||
# Wait for VM to start up
|
||||
timeout 60 sh -c 'until nc -nzv 192.168.123.11 50000 && nc -nzv 192.168.123.12 50000 && nc -nzv 192.168.123.13 50000; do sleep 1; done'
|
||||
|
||||
cat > patch.yaml <<\EOT
|
||||
machine:
|
||||
kubelet:
|
||||
nodeIP:
|
||||
validSubnets:
|
||||
- 192.168.123.0/24
|
||||
extraConfig:
|
||||
maxPods: 512
|
||||
kernel:
|
||||
modules:
|
||||
- name: openvswitch
|
||||
- name: drbd
|
||||
parameters:
|
||||
- usermode_helper=disabled
|
||||
- name: zfs
|
||||
- name: spl
|
||||
registries:
|
||||
mirrors:
|
||||
docker.io:
|
||||
endpoints:
|
||||
- https://mirror.gcr.io
|
||||
files:
|
||||
- content: |
|
||||
[plugins]
|
||||
[plugins."io.containerd.grpc.v1.cri"]
|
||||
device_ownership_from_security_context = true
|
||||
path: /etc/cri/conf.d/20-customization.part
|
||||
op: create
|
||||
|
||||
cluster:
|
||||
apiServer:
|
||||
extraArgs:
|
||||
oidc-issuer-url: "https://keycloak.example.org/realms/cozy"
|
||||
oidc-client-id: "kubernetes"
|
||||
oidc-username-claim: "preferred_username"
|
||||
oidc-groups-claim: "groups"
|
||||
network:
|
||||
cni:
|
||||
name: none
|
||||
dnsDomain: cozy.local
|
||||
podSubnets:
|
||||
- 10.244.0.0/16
|
||||
serviceSubnets:
|
||||
- 10.96.0.0/16
|
||||
EOT
|
||||
|
||||
cat > patch-controlplane.yaml <<\EOT
|
||||
machine:
|
||||
nodeLabels:
|
||||
node.kubernetes.io/exclude-from-external-load-balancers:
|
||||
$patch: delete
|
||||
network:
|
||||
interfaces:
|
||||
- interface: eth0
|
||||
vip:
|
||||
ip: 192.168.123.10
|
||||
cluster:
|
||||
allowSchedulingOnControlPlanes: true
|
||||
controllerManager:
|
||||
extraArgs:
|
||||
bind-address: 0.0.0.0
|
||||
scheduler:
|
||||
extraArgs:
|
||||
bind-address: 0.0.0.0
|
||||
apiServer:
|
||||
certSANs:
|
||||
- 127.0.0.1
|
||||
proxy:
|
||||
disabled: true
|
||||
discovery:
|
||||
enabled: false
|
||||
etcd:
|
||||
advertisedSubnets:
|
||||
- 192.168.123.0/24
|
||||
EOT
|
||||
|
||||
# Gen configuration
|
||||
if [ ! -f secrets.yaml ]; then
|
||||
talosctl gen secrets
|
||||
fi
|
||||
|
||||
rm -f controlplane.yaml worker.yaml talosconfig kubeconfig
|
||||
talosctl gen config --with-secrets secrets.yaml cozystack https://192.168.123.10:6443 --config-patch=@patch.yaml --config-patch-control-plane @patch-controlplane.yaml
|
||||
export TALOSCONFIG=$PWD/talosconfig
|
||||
|
||||
# Apply configuration
|
||||
talosctl apply -f controlplane.yaml -n 192.168.123.11 -e 192.168.123.11 -i
|
||||
talosctl apply -f controlplane.yaml -n 192.168.123.12 -e 192.168.123.12 -i
|
||||
talosctl apply -f controlplane.yaml -n 192.168.123.13 -e 192.168.123.13 -i
|
||||
|
||||
# Wait for VM to be configured
|
||||
timeout 60 sh -c 'until nc -nzv 192.168.123.11 50000 && nc -nzv 192.168.123.12 50000 && nc -nzv 192.168.123.13 50000; do sleep 1; done'
|
||||
|
||||
# Bootstrap
|
||||
timeout 10 sh -c 'until talosctl bootstrap -n 192.168.123.11 -e 192.168.123.11; do sleep 1; done'
|
||||
|
||||
# Wait for etcd
|
||||
timeout 180 sh -c 'until timeout -s 9 2 talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1; do sleep 1; done'
|
||||
timeout 60 sh -c 'while talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1 | grep "rpc error"; do sleep 1; done'
|
||||
|
||||
rm -f kubeconfig
|
||||
talosctl kubeconfig kubeconfig -e 192.168.123.10 -n 192.168.123.10
|
||||
export KUBECONFIG=$PWD/kubeconfig
|
||||
|
||||
# Wait for kubernetes nodes appear
|
||||
timeout 60 sh -c 'until [ $(kubectl get node -o name | wc -l) = 3 ]; do sleep 1; done'
|
||||
kubectl create ns cozy-system -o yaml | kubectl apply -f -
|
||||
kubectl create -f - <<\EOT
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-system
|
||||
data:
|
||||
bundle-name: "paas-full"
|
||||
ipv4-pod-cidr: "10.244.0.0/16"
|
||||
ipv4-pod-gateway: "10.244.0.1"
|
||||
ipv4-svc-cidr: "10.96.0.0/16"
|
||||
ipv4-join-cidr: "100.64.0.0/16"
|
||||
root-host: example.org
|
||||
api-server-endpoint: https://192.168.123.10:6443
|
||||
EOT
|
||||
|
||||
#
|
||||
echo "$COZYSTACK_INSTALLER_YAML" | kubectl apply -f -
|
||||
|
||||
# wait for cozystack pod to start
|
||||
kubectl wait deploy --timeout=1m --for=condition=available -n cozy-system cozystack
|
||||
|
||||
# wait for helmreleases appear
|
||||
timeout 60 sh -c 'until kubectl get hr -A | grep cozy; do sleep 1; done'
|
||||
|
||||
sleep 5
|
||||
|
||||
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n " $1 " hr/" $2 " &"} END{print "wait"}' | sh -x
|
||||
|
||||
# Wait for Cluster-API providers
|
||||
timeout 30 sh -c 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager; do sleep 1; done'
|
||||
kubectl wait deploy --timeout=30s --for=condition=available -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager
|
||||
|
||||
# Wait for linstor controller
|
||||
kubectl wait deploy --timeout=5m --for=condition=available -n cozy-linstor linstor-controller
|
||||
|
||||
# Wait for all linstor nodes become Online
|
||||
timeout 60 sh -c 'until [ $(kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor node list | grep -c Online) = 3 ]; do sleep 1; done'
|
||||
|
||||
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv1 /dev/vdc --pool-name data --storage-pool data
|
||||
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv2 /dev/vdc --pool-name data --storage-pool data
|
||||
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv3 /dev/vdc --pool-name data --storage-pool data
|
||||
|
||||
kubectl create -f- <<EOT
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: local
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
provisioner: linstor.csi.linbit.com
|
||||
parameters:
|
||||
linstor.csi.linbit.com/storagePool: "data"
|
||||
linstor.csi.linbit.com/layerList: "storage"
|
||||
linstor.csi.linbit.com/allowRemoteVolumeAccess: "false"
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
allowVolumeExpansion: true
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: replicated
|
||||
provisioner: linstor.csi.linbit.com
|
||||
parameters:
|
||||
linstor.csi.linbit.com/storagePool: "data"
|
||||
linstor.csi.linbit.com/autoPlace: "3"
|
||||
linstor.csi.linbit.com/layerList: "drbd storage"
|
||||
linstor.csi.linbit.com/allowRemoteVolumeAccess: "true"
|
||||
property.linstor.csi.linbit.com/DrbdOptions/auto-quorum: suspend-io
|
||||
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-no-data-accessible: suspend-io
|
||||
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-suspended-primary-outdated: force-secondary
|
||||
property.linstor.csi.linbit.com/DrbdOptions/Net/rr-conflict: retry-connect
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
allowVolumeExpansion: true
|
||||
EOT
|
||||
kubectl create -f- <<EOT
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: L2Advertisement
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-metallb
|
||||
spec:
|
||||
ipAddressPools:
|
||||
- cozystack
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: IPAddressPool
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-metallb
|
||||
spec:
|
||||
addresses:
|
||||
- 192.168.123.200-192.168.123.250
|
||||
autoAssign: true
|
||||
avoidBuggyIPs: false
|
||||
EOT
|
||||
|
||||
# Wait for cozystack-api
|
||||
kubectl wait --for=condition=Available apiservices v1alpha1.apps.cozystack.io --timeout=2m
|
||||
|
||||
kubectl patch -n tenant-root tenants.apps.cozystack.io root --type=merge -p '{"spec":{
|
||||
"host": "example.org",
|
||||
"ingress": true,
|
||||
"monitoring": true,
|
||||
"etcd": true,
|
||||
"isolated": true
|
||||
}}'
|
||||
|
||||
# Wait for HelmRelease be created
|
||||
timeout 60 sh -c 'until kubectl get hr -n tenant-root etcd ingress monitoring tenant-root; do sleep 1; done'
|
||||
|
||||
# Wait for HelmReleases be installed
|
||||
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr etcd ingress tenant-root
|
||||
|
||||
if ! kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr monitoring; then
|
||||
flux reconcile hr monitoring -n tenant-root --force
|
||||
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr monitoring
|
||||
fi
|
||||
|
||||
kubectl patch -n tenant-root ingresses.apps.cozystack.io ingress --type=merge -p '{"spec":{
|
||||
"dashboard": true
|
||||
}}'
|
||||
|
||||
# Wait for nginx-ingress-controller
|
||||
timeout 60 sh -c 'until kubectl get deploy -n tenant-root root-ingress-controller; do sleep 1; done'
|
||||
kubectl wait --timeout=5m --for=condition=available -n tenant-root deploy root-ingress-controller
|
||||
|
||||
# Wait for etcd
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.readyReplicas=3 -n tenant-root sts etcd
|
||||
|
||||
# Wait for Victoria metrics
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.updateStatus=operational -n tenant-root vmalert/vmalert-shortterm vmalertmanager/alertmanager
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.updateStatus=operational -n tenant-root vlogs/generic
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.clusterStatus=operational -n tenant-root vmcluster/shortterm vmcluster/longterm
|
||||
|
||||
# Wait for grafana
|
||||
kubectl wait --timeout=5m --for=condition=ready -n tenant-root clusters.postgresql.cnpg.io grafana-db
|
||||
kubectl wait --timeout=5m --for=condition=available -n tenant-root deploy grafana-deployment
|
||||
|
||||
# Get IP of nginx-ingress
|
||||
ip=$(kubectl get svc -n tenant-root root-ingress-controller -o jsonpath='{.status.loadBalancer.ingress..ip}')
|
||||
|
||||
# Check Grafana
|
||||
curl -sS -k "https://$ip" -H 'Host: grafana.example.org' | grep Found
|
||||
|
||||
|
||||
# Test OIDC
|
||||
kubectl patch -n cozy-system cm/cozystack --type=merge -p '{"data":{
|
||||
"oidc-enabled": "true"
|
||||
}}'
|
||||
|
||||
timeout 60 sh -c 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator; do sleep 1; done'
|
||||
kubectl wait --timeout=10m --for=condition=ready -n cozy-keycloak hr keycloak keycloak-configure keycloak-operator
|
||||
@@ -16,15 +16,13 @@ if [ ! -f "$file" ] || [ ! -s "$file" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
miss_map=$(mktemp)
|
||||
trap 'rm -f "$miss_map"' EXIT
|
||||
echo -n "$new_map" | awk 'NR==FNR { nm[$1 " " $2] = $3; next } { if (!($1 " " $2 in nm)) print $1, $2, $3}' - "$file" > $miss_map
|
||||
miss_map=$(echo "$new_map" | awk 'NR==FNR { nm[$1 " " $2] = $3; next } { if (!($1 " " $2 in nm)) print $1, $2, $3}' - "$file")
|
||||
|
||||
# search accross all tags sorted by version
|
||||
search_commits=$(git ls-remote --tags origin | awk -F/ '$3 ~ /v[0-9]+.[0-9]+.[0-9]+/ {print}' | sort -k2,2 -rV | awk '{print $1}')
|
||||
|
||||
resolved_miss_map=$(
|
||||
while read -r chart version commit; do
|
||||
echo "$miss_map" | while read -r chart version commit; do
|
||||
# if version is found in HEAD, it's HEAD
|
||||
if [ "$(awk '$1 == "version:" {print $2}' ./${chart}/Chart.yaml)" = "${version}" ]; then
|
||||
echo "$chart $version HEAD"
|
||||
@@ -58,7 +56,7 @@ resolved_miss_map=$(
|
||||
fi
|
||||
|
||||
echo "$chart $version $found_tag"
|
||||
done < $miss_map
|
||||
done
|
||||
)
|
||||
|
||||
printf "%s\n" "$new_map" "$resolved_miss_map" | sort -k1,1 -k2,2 -V | awk '!seen[$1, $2]++' > "$file"
|
||||
printf "%s\n" "$new_map" "$resolved_miss_map" | sort -k1,1 -k2,2 -V | awk '$1' > "$file"
|
||||
|
||||
@@ -1,65 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
usage() {
|
||||
printf "%s\n" "Usage:" >&2 ;
|
||||
printf -- "%s\n" '---' >&2 ;
|
||||
printf "%s %s\n" "$0" "INPUT_DIR OUTPUT_DIR TMP_DIR [DEPENDENCY_DIR]" >&2 ;
|
||||
printf -- "%s\n" '---' >&2 ;
|
||||
printf "%s\n" "Takes a helm repository from INPUT_DIR, with an optional library repository in" >&2 ;
|
||||
printf "%s\n" "DEPENDENCY_DIR, prepares a view of the git archive at select points in history" >&2 ;
|
||||
printf "%s\n" "in TMP_DIR and packages helm charts, outputting the tarballs to OUTPUT_DIR" >&2 ;
|
||||
}
|
||||
|
||||
if [ "x$(basename $PWD)" != "xpackages" ]
|
||||
then
|
||||
echo "Error: This script must run from the ./packages/ directory" >&2
|
||||
echo >&2
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "x$#" != "x3" ] && [ "x$#" != "x4" ]
|
||||
then
|
||||
echo "Error: This script takes 3 or 4 arguments" >&2
|
||||
echo "Got $# arguments:" "$@" >&2
|
||||
echo >&2
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
input_dir=$1
|
||||
output_dir=$2
|
||||
tmp_dir=$3
|
||||
|
||||
if [ "x$#" = "x4" ]
|
||||
then
|
||||
dependency_dir=$4
|
||||
fi
|
||||
|
||||
rm -rf "${output_dir:?}"
|
||||
mkdir -p "${output_dir}"
|
||||
while read package _ commit
|
||||
do
|
||||
# this lets devs build the packages from a dirty repo for quick local testing
|
||||
if [ "x$commit" = "xHEAD" ]
|
||||
then
|
||||
helm package "${input_dir}/${package}" -d "${output_dir}"
|
||||
continue
|
||||
fi
|
||||
git archive --format tar "${commit}" "${input_dir}/${package}" | tar -xf- -C "${tmp_dir}/"
|
||||
|
||||
# the library chart is not present in older commits and git archive doesn't fail gracefully if the path is not found
|
||||
if [ "x${dependency_dir}" != "x" ] && git ls-tree --name-only "${commit}" "${dependency_dir}" | grep -qx "${dependency_dir}"
|
||||
then
|
||||
git archive --format tar "${commit}" "${dependency_dir}" | tar -xf- -C "${tmp_dir}/"
|
||||
fi
|
||||
helm package "${tmp_dir}/${input_dir}/${package}" -d "${output_dir}"
|
||||
rm -rf "${tmp_dir:?}/${input_dir:?}/${package:?}"
|
||||
if [ "x${dependency_dir}" != "x" ]
|
||||
then
|
||||
rm -rf "${tmp_dir:?}/${dependency_dir:?}"
|
||||
fi
|
||||
done < "${input_dir}/versions_map"
|
||||
helm repo index "${output_dir}"
|
||||
1
hack/testdata/http-cache/check.sh
vendored
Normal file
1
hack/testdata/http-cache/check.sh
vendored
Normal file
@@ -0,0 +1 @@
|
||||
return 0
|
||||
2
hack/testdata/http-cache/values.yaml
vendored
Normal file
2
hack/testdata/http-cache/values.yaml
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
endpoints:
|
||||
- 8.8.8.8:443
|
||||
1
hack/testdata/kubernetes/check.sh
vendored
Normal file
1
hack/testdata/kubernetes/check.sh
vendored
Normal file
@@ -0,0 +1 @@
|
||||
return 0
|
||||
62
hack/testdata/kubernetes/values.yaml
vendored
Normal file
62
hack/testdata/kubernetes/values.yaml
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
## @section Common parameters
|
||||
|
||||
## @param host The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host).
|
||||
## @param controlPlane.replicas Number of replicas for Kubernetes contorl-plane components
|
||||
## @param storageClass StorageClass used to store user data
|
||||
##
|
||||
host: ""
|
||||
controlPlane:
|
||||
replicas: 2
|
||||
storageClass: replicated
|
||||
|
||||
## @param nodeGroups [object] nodeGroups configuration
|
||||
##
|
||||
nodeGroups:
|
||||
md0:
|
||||
minReplicas: 0
|
||||
maxReplicas: 10
|
||||
instanceType: "u1.medium"
|
||||
ephemeralStorage: 20Gi
|
||||
roles:
|
||||
- ingress-nginx
|
||||
|
||||
resources:
|
||||
cpu: ""
|
||||
memory: ""
|
||||
|
||||
## @section Cluster Addons
|
||||
##
|
||||
addons:
|
||||
|
||||
## Cert-manager: automatically creates and manages SSL/TLS certificate
|
||||
##
|
||||
certManager:
|
||||
## @param addons.certManager.enabled Enables the cert-manager
|
||||
## @param addons.certManager.valuesOverride Custom values to override
|
||||
enabled: true
|
||||
valuesOverride: {}
|
||||
|
||||
## Ingress-NGINX Controller
|
||||
##
|
||||
ingressNginx:
|
||||
## @param addons.ingressNginx.enabled Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role)
|
||||
## @param addons.ingressNginx.valuesOverride Custom values to override
|
||||
##
|
||||
enabled: true
|
||||
## @param addons.ingressNginx.hosts List of domain names that should be passed through to the cluster by upper cluster
|
||||
## e.g:
|
||||
## hosts:
|
||||
## - example.org
|
||||
## - foo.example.net
|
||||
##
|
||||
hosts: []
|
||||
valuesOverride: {}
|
||||
|
||||
## Flux CD
|
||||
##
|
||||
fluxcd:
|
||||
## @param addons.fluxcd.enabled Enables Flux CD
|
||||
## @param addons.fluxcd.valuesOverride Custom values to override
|
||||
##
|
||||
enabled: true
|
||||
valuesOverride: {}
|
||||
1
hack/testdata/nats/check.sh
vendored
Normal file
1
hack/testdata/nats/check.sh
vendored
Normal file
@@ -0,0 +1 @@
|
||||
return 0
|
||||
10
hack/testdata/nats/values.yaml
vendored
Normal file
10
hack/testdata/nats/values.yaml
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
|
||||
## @section Common parameters
|
||||
|
||||
## @param external Enable external access from outside the cluster
|
||||
## @param replicas Persistent Volume size for NATS
|
||||
## @param storageClass StorageClass used to store the data
|
||||
##
|
||||
external: false
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
1
hack/testdata/tenant/check.sh
vendored
Normal file
1
hack/testdata/tenant/check.sh
vendored
Normal file
@@ -0,0 +1 @@
|
||||
return 0
|
||||
6
hack/testdata/tenant/values.yaml
vendored
Normal file
6
hack/testdata/tenant/values.yaml
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
host: ""
|
||||
etcd: false
|
||||
monitoring: false
|
||||
ingress: false
|
||||
seaweedfs: false
|
||||
isolated: true
|
||||
@@ -1,158 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
e "errors"
|
||||
|
||||
helmv2 "github.com/fluxcd/helm-controller/api/v2"
|
||||
"gopkg.in/yaml.v2"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
)
|
||||
|
||||
type TenantHelmReconciler struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
func (r *TenantHelmReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
hr := &helmv2.HelmRelease{}
|
||||
if err := r.Get(ctx, req.NamespacedName, hr); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
logger.Error(err, "unable to fetch HelmRelease")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(hr.Name, "tenant-") {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if len(hr.Status.Conditions) == 0 || hr.Status.Conditions[0].Type != "Ready" {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if len(hr.Status.History) == 0 {
|
||||
logger.Info("no history in HelmRelease status", "name", hr.Name)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if hr.Status.History[0].Status != "deployed" {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
newDigest := hr.Status.History[0].Digest
|
||||
var hrList helmv2.HelmReleaseList
|
||||
childNamespace := getChildNamespace(hr.Namespace, hr.Name)
|
||||
if childNamespace == "tenant-root" && hr.Name == "tenant-root" {
|
||||
if hr.Spec.Values == nil {
|
||||
logger.Error(e.New("hr.Spec.Values is nil"), "cant annotate tenant-root ns")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
err := annotateTenantRootNs(*hr.Spec.Values, r.Client)
|
||||
if err != nil {
|
||||
logger.Error(err, "cant annotate tenant-root ns")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
logger.Info("namespace 'tenant-root' annotated")
|
||||
}
|
||||
|
||||
if err := r.List(ctx, &hrList, client.InNamespace(childNamespace)); err != nil {
|
||||
logger.Error(err, "unable to list HelmReleases in namespace", "namespace", hr.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
for _, item := range hrList.Items {
|
||||
if item.Name == hr.Name {
|
||||
continue
|
||||
}
|
||||
oldDigest := item.GetAnnotations()["cozystack.io/tenant-config-digest"]
|
||||
if oldDigest == newDigest {
|
||||
continue
|
||||
}
|
||||
patchTarget := item.DeepCopy()
|
||||
|
||||
if patchTarget.Annotations == nil {
|
||||
patchTarget.Annotations = map[string]string{}
|
||||
}
|
||||
ts := time.Now().Format(time.RFC3339Nano)
|
||||
|
||||
patchTarget.Annotations["cozystack.io/tenant-config-digest"] = newDigest
|
||||
patchTarget.Annotations["reconcile.fluxcd.io/forceAt"] = ts
|
||||
patchTarget.Annotations["reconcile.fluxcd.io/requestedAt"] = ts
|
||||
|
||||
patch := client.MergeFrom(item.DeepCopy())
|
||||
if err := r.Patch(ctx, patchTarget, patch); err != nil {
|
||||
logger.Error(err, "failed to patch HelmRelease", "name", patchTarget.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Info("patched HelmRelease with new digest", "name", patchTarget.Name, "digest", newDigest, "version", hr.Status.History[0].Version)
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *TenantHelmReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&helmv2.HelmRelease{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func getChildNamespace(currentNamespace, hrName string) string {
|
||||
tenantName := strings.TrimPrefix(hrName, "tenant-")
|
||||
|
||||
switch {
|
||||
case currentNamespace == "tenant-root" && hrName == "tenant-root":
|
||||
// 1) root tenant inside root namespace
|
||||
return "tenant-root"
|
||||
|
||||
case currentNamespace == "tenant-root":
|
||||
// 2) any other tenant in root namespace
|
||||
return fmt.Sprintf("tenant-%s", tenantName)
|
||||
|
||||
default:
|
||||
// 3) tenant in a dedicated namespace
|
||||
return fmt.Sprintf("%s-%s", currentNamespace, tenantName)
|
||||
}
|
||||
}
|
||||
|
||||
func annotateTenantRootNs(values apiextensionsv1.JSON, c client.Client) error {
|
||||
var data map[string]interface{}
|
||||
if err := yaml.Unmarshal(values.Raw, &data); err != nil {
|
||||
return fmt.Errorf("failed to parse HelmRelease values: %w", err)
|
||||
}
|
||||
|
||||
host, ok := data["host"].(string)
|
||||
if !ok || host == "" {
|
||||
return fmt.Errorf("host field not found or not a string")
|
||||
}
|
||||
|
||||
var ns corev1.Namespace
|
||||
if err := c.Get(context.TODO(), client.ObjectKey{Name: "tenant-root"}, &ns); err != nil {
|
||||
return fmt.Errorf("failed to get namespace tenant-root: %w", err)
|
||||
}
|
||||
|
||||
if ns.Annotations == nil {
|
||||
ns.Annotations = map[string]string{}
|
||||
}
|
||||
ns.Annotations["namespace.cozystack.io/host"] = host
|
||||
|
||||
if err := c.Update(context.TODO(), &ns); err != nil {
|
||||
return fmt.Errorf("failed to update namespace: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -39,15 +39,6 @@ func (r *WorkloadReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c
|
||||
}
|
||||
|
||||
t := getMonitoredObject(w)
|
||||
|
||||
if t == nil {
|
||||
err = r.Delete(ctx, w)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to delete workload")
|
||||
}
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
err = r.Get(ctx, types.NamespacedName{Name: t.GetName(), Namespace: t.GetNamespace()}, t)
|
||||
|
||||
// found object, nothing to do
|
||||
@@ -77,23 +68,20 @@ func (r *WorkloadReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
}
|
||||
|
||||
func getMonitoredObject(w *cozyv1alpha1.Workload) client.Object {
|
||||
switch {
|
||||
case strings.HasPrefix(w.Name, "pvc-"):
|
||||
if strings.HasPrefix(w.Name, "pvc-") {
|
||||
obj := &corev1.PersistentVolumeClaim{}
|
||||
obj.Name = strings.TrimPrefix(w.Name, "pvc-")
|
||||
obj.Namespace = w.Namespace
|
||||
return obj
|
||||
case strings.HasPrefix(w.Name, "svc-"):
|
||||
}
|
||||
if strings.HasPrefix(w.Name, "svc-") {
|
||||
obj := &corev1.Service{}
|
||||
obj.Name = strings.TrimPrefix(w.Name, "svc-")
|
||||
obj.Namespace = w.Namespace
|
||||
return obj
|
||||
case strings.HasPrefix(w.Name, "pod-"):
|
||||
obj := &corev1.Pod{}
|
||||
obj.Name = strings.TrimPrefix(w.Name, "pod-")
|
||||
obj.Namespace = w.Namespace
|
||||
return obj
|
||||
}
|
||||
var obj client.Object
|
||||
obj := &corev1.Pod{}
|
||||
obj.Name = w.Name
|
||||
obj.Namespace = w.Namespace
|
||||
return obj
|
||||
}
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestUnprefixedMonitoredObjectReturnsNil(t *testing.T) {
|
||||
w := &cozyv1alpha1.Workload{}
|
||||
w.Name = "unprefixed-name"
|
||||
obj := getMonitoredObject(w)
|
||||
if obj != nil {
|
||||
t.Errorf(`getMonitoredObject(&Workload{Name: "%s"}) == %v, want nil`, w.Name, obj)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodMonitoredObject(t *testing.T) {
|
||||
w := &cozyv1alpha1.Workload{}
|
||||
w.Name = "pod-mypod"
|
||||
obj := getMonitoredObject(w)
|
||||
if pod, ok := obj.(*corev1.Pod); !ok || pod.Name != "mypod" {
|
||||
t.Errorf(`getMonitoredObject(&Workload{Name: "%s"}) == %v, want &Pod{Name: "mypod"}`, w.Name, obj)
|
||||
}
|
||||
}
|
||||
@@ -212,12 +212,15 @@ func (r *WorkloadMonitorReconciler) reconcilePodForMonitor(
|
||||
) error {
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
// totalResources will store the sum of all container resource requests
|
||||
// Combine both init containers and normal containers to sum resources properly
|
||||
combinedContainers := append(pod.Spec.InitContainers, pod.Spec.Containers...)
|
||||
|
||||
// totalResources will store the sum of all container resource limits
|
||||
totalResources := make(map[string]resource.Quantity)
|
||||
|
||||
// Iterate over all containers to aggregate their requests
|
||||
for _, container := range pod.Spec.Containers {
|
||||
for name, qty := range container.Resources.Requests {
|
||||
// Iterate over all containers to aggregate their Limits
|
||||
for _, container := range combinedContainers {
|
||||
for name, qty := range container.Resources.Limits {
|
||||
if existing, exists := totalResources[name.String()]; exists {
|
||||
existing.Add(qty)
|
||||
totalResources[name.String()] = existing
|
||||
@@ -246,7 +249,7 @@ func (r *WorkloadMonitorReconciler) reconcilePodForMonitor(
|
||||
|
||||
workload := &cozyv1alpha1.Workload{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("pod-%s", pod.Name),
|
||||
Name: pod.Name,
|
||||
Namespace: pod.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,8 +1,14 @@
|
||||
OUT=../_out/repos/apps
|
||||
TMP := $(shell mktemp -d)
|
||||
OUT=../../_out/repos/apps
|
||||
TMP=../../_out/repos/apps/historical
|
||||
|
||||
repo:
|
||||
cd .. && ../hack/package_chart.sh apps $(OUT) $(TMP) library
|
||||
rm -rf "$(OUT)"
|
||||
mkdir -p "$(OUT)"
|
||||
awk '$$3 != "HEAD" {print "mkdir -p $(TMP)/" $$1 "-" $$2}' versions_map | sh -ex
|
||||
awk '$$3 != "HEAD" {print "git archive " $$3 " " $$1 " | tar -xf- --strip-components=1 -C $(TMP)/" $$1 "-" $$2 }' versions_map | sh -ex
|
||||
helm package -d "$(OUT)" $$(find . $(TMP) -mindepth 2 -maxdepth 2 -name Chart.yaml | awk 'sub("/Chart.yaml", "")' | sort -V)
|
||||
cd "$(OUT)" && helm repo index . --url http://cozystack.cozy-system.svc/repos/apps
|
||||
rm -rf "$(TMP)"
|
||||
|
||||
fix-chartnames:
|
||||
find . -maxdepth 2 -name Chart.yaml | awk -F/ '{print $$2}' | while read i; do sed -i "s/^name: .*/name: $$i/" "$$i/Chart.yaml"; done
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.0
|
||||
version: 0.2.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
include ../../../scripts/package.mk
|
||||
|
||||
generate:
|
||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||
#readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
# S3 bucket
|
||||
|
||||
## Parameters
|
||||
@@ -11,8 +11,14 @@ spec:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '>= 0.0.0-0'
|
||||
version: '*'
|
||||
interval: 1m0s
|
||||
timeout: 5m0s
|
||||
values:
|
||||
bucketName: {{ .Release.Name }}
|
||||
valuesFrom:
|
||||
- kind: ConfigMap
|
||||
name: cozy-tenant-configuration-hash
|
||||
optional: true
|
||||
targetPath: cozyTenantConfigurationHash
|
||||
valuesKey: cozyTenantConfigurationHash
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
{
|
||||
"title": "Chart Values",
|
||||
"type": "object",
|
||||
"properties": {}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
{}
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.9.0
|
||||
version: 0.7.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -7,10 +7,8 @@ generate:
|
||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||
|
||||
image:
|
||||
docker buildx build images/clickhouse-backup \
|
||||
docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/clickhouse-backup \
|
||||
--provenance false \
|
||||
--builder=$(BUILDER) \
|
||||
--platform=$(PLATFORM) \
|
||||
--tag $(REGISTRY)/clickhouse-backup:$(call settag,$(CLICKHOUSE_BACKUP_TAG)) \
|
||||
--cache-from type=registry,ref=$(REGISTRY)/clickhouse-backup:latest \
|
||||
--cache-to type=inline \
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
../../../library/cozy-lib
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/clickhouse-backup:0.9.0@sha256:3faf7a4cebf390b9053763107482de175aa0fdb88c1e77424fd81100b1c3a205
|
||||
ghcr.io/cozystack/cozystack/clickhouse-backup:0.7.0@sha256:3faf7a4cebf390b9053763107482de175aa0fdb88c1e77424fd81100b1c3a205
|
||||
|
||||
@@ -11,34 +11,35 @@ These presets are for basic testing and not meant to be used in production
|
||||
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||
*/}}
|
||||
{{- define "resources.preset" -}}
|
||||
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
|
||||
{{- $presets := dict
|
||||
"nano" (dict
|
||||
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"micro" (dict
|
||||
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"small" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"medium" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"large" (dict
|
||||
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"xlarge" (dict
|
||||
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"2xlarge" (dict
|
||||
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
}}
|
||||
{{- if hasKey $presets .type -}}
|
||||
|
||||
@@ -122,9 +122,9 @@ spec:
|
||||
- name: clickhouse
|
||||
image: clickhouse/clickhouse-server:24.9.2.42
|
||||
{{- if .Values.resources }}
|
||||
resources: {{- include "cozy-lib.resources.sanitize" .Values.resources | nindent 16 }}
|
||||
resources: {{- toYaml .Values.resources | nindent 16 }}
|
||||
{{- else if ne .Values.resourcesPreset "none" }}
|
||||
resources: {{- include "cozy-lib.resources.preset" .Values.resourcesPreset | nindent 16 }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.resourcesPreset "Release" .Release) | nindent 16 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: data-volume-template
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.6.0
|
||||
version: 0.5.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/postgres-backup:0.11.0@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f
|
||||
ghcr.io/cozystack/cozystack/postgres-backup:0.10.0@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f
|
||||
|
||||
@@ -11,34 +11,35 @@ These presets are for basic testing and not meant to be used in production
|
||||
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||
*/}}
|
||||
{{- define "resources.preset" -}}
|
||||
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
|
||||
{{- $presets := dict
|
||||
"nano" (dict
|
||||
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"micro" (dict
|
||||
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"small" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"medium" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"large" (dict
|
||||
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"xlarge" (dict
|
||||
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"2xlarge" (dict
|
||||
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
}}
|
||||
{{- if hasKey $presets .type -}}
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.5.0
|
||||
version: 0.4.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -6,10 +6,8 @@ include ../../../scripts/package.mk
|
||||
image: image-nginx
|
||||
|
||||
image-nginx:
|
||||
docker buildx build images/nginx-cache \
|
||||
docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/nginx-cache \
|
||||
--provenance false \
|
||||
--builder=$(BUILDER) \
|
||||
--platform=$(PLATFORM) \
|
||||
--tag $(REGISTRY)/nginx-cache:$(call settag,$(NGINX_CACHE_TAG)) \
|
||||
--cache-from type=registry,ref=$(REGISTRY)/nginx-cache:latest \
|
||||
--cache-to type=inline \
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/nginx-cache:0.5.0@sha256:158c35dd6a512bd14e86a423be5c8c7ca91ac71999c73cce2714e4db60a2db43
|
||||
ghcr.io/cozystack/cozystack/nginx-cache:0.4.0@sha256:bef7344da098c4dc400a9e20ffad10ac991df67d09a30026207454abbc91f28b
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM ubuntu:22.04 AS stage
|
||||
FROM ubuntu:22.04 as stage
|
||||
|
||||
ARG NGINX_VERSION=1.25.3
|
||||
ARG IP2LOCATION_C_VERSION=8.6.1
|
||||
@@ -9,15 +9,11 @@ ARG FIFTYONEDEGREES_NGINX_VERSION=3.2.21.1
|
||||
ARG NGINX_CACHE_PURGE_VERSION=2.5.3
|
||||
ARG NGINX_VTS_VERSION=0.2.2
|
||||
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
|
||||
# Install required packages for development
|
||||
RUN apt update -q \
|
||||
&& apt install -yq --no-install-recommends \
|
||||
ca-certificates \
|
||||
RUN apt-get update -q \
|
||||
&& apt-get install -yq \
|
||||
unzip \
|
||||
automake \
|
||||
autoconf \
|
||||
build-essential \
|
||||
libtool \
|
||||
libpcre3 \
|
||||
@@ -72,7 +68,7 @@ RUN checkinstall \
|
||||
--default \
|
||||
--pkgname=ip2location-c \
|
||||
--pkgversion=${IP2LOCATION_C_VERSION} \
|
||||
--pkgarch=${TARGETARCH} \
|
||||
--pkgarch=amd64 \
|
||||
--pkggroup=lib \
|
||||
--pkgsource="https://github.com/chrislim2888/IP2Location-C-Library" \
|
||||
--maintainer="Eduard Generalov <eduard@generalov.net>" \
|
||||
@@ -101,7 +97,7 @@ RUN checkinstall \
|
||||
--default \
|
||||
--pkgname=ip2proxy-c \
|
||||
--pkgversion=${IP2PROXY_C_VERSION} \
|
||||
--pkgarch=${TARGETARCH} \
|
||||
--pkgarch=amd64 \
|
||||
--pkggroup=lib \
|
||||
--pkgsource="https://github.com/ip2location/ip2proxy-c" \
|
||||
--maintainer="Eduard Generalov <eduard@generalov.net>" \
|
||||
@@ -148,7 +144,7 @@ RUN checkinstall \
|
||||
--default \
|
||||
--pkgname=nginx \
|
||||
--pkgversion=$VERS \
|
||||
--pkgarch=${TARGETARCH} \
|
||||
--pkgarch=amd64 \
|
||||
--pkggroup=web \
|
||||
--provides=nginx \
|
||||
--requires=ip2location-c,ip2proxy-c,libssl3,libc-bin,libc6,libzstd1,libpcre++0v5,libpcre16-3,libpcre2-8-0,libpcre3,libpcre32-3,libpcrecpp0v5,libmaxminddb0 \
|
||||
@@ -169,9 +165,10 @@ COPY nginx-reloader.sh /usr/bin/nginx-reloader.sh
|
||||
RUN set -x \
|
||||
&& groupadd --system --gid 101 nginx \
|
||||
&& useradd --system --gid nginx --no-create-home --home /nonexistent --comment "nginx user" --shell /bin/false --uid 101 nginx \
|
||||
&& apt update -q \
|
||||
&& apt install -yq --no-install-recommends --no-install-suggests gnupg1 ca-certificates inotify-tools \
|
||||
&& apt install -y /packages/*.deb \
|
||||
&& apt update \
|
||||
&& apt-get install --no-install-recommends --no-install-suggests -y gnupg1 ca-certificates inotify-tools \
|
||||
&& apt -y install /packages/*.deb \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& mkdir -p /var/lib/nginx /var/log/nginx \
|
||||
&& ln -sf /dev/stdout /var/log/nginx/access.log \
|
||||
|
||||
@@ -11,34 +11,35 @@ These presets are for basic testing and not meant to be used in production
|
||||
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||
*/}}
|
||||
{{- define "resources.preset" -}}
|
||||
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
|
||||
{{- $presets := dict
|
||||
"nano" (dict
|
||||
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"micro" (dict
|
||||
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"small" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"medium" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"large" (dict
|
||||
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"xlarge" (dict
|
||||
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"2xlarge" (dict
|
||||
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
}}
|
||||
{{- if hasKey $presets .type -}}
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.6.0
|
||||
version: 0.5.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -11,34 +11,35 @@ These presets are for basic testing and not meant to be used in production
|
||||
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||
*/}}
|
||||
{{- define "resources.preset" -}}
|
||||
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
|
||||
{{- $presets := dict
|
||||
"nano" (dict
|
||||
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"micro" (dict
|
||||
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"small" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"medium" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"large" (dict
|
||||
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"xlarge" (dict
|
||||
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"2xlarge" (dict
|
||||
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
}}
|
||||
{{- if hasKey $presets .type -}}
|
||||
|
||||
@@ -16,10 +16,10 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.20.1
|
||||
version: 0.18.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: 1.32.4
|
||||
appVersion: "1.30.1"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
KUBERNETES_VERSION = v1.32
|
||||
UBUNTU_CONTAINER_DISK_TAG = v1.30.1
|
||||
KUBERNETES_PKG_TAG = $(shell awk '$$1 == "version:" {print $$2}' Chart.yaml)
|
||||
|
||||
include ../../../scripts/common-envs.mk
|
||||
@@ -6,36 +6,27 @@ include ../../../scripts/package.mk
|
||||
|
||||
generate:
|
||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||
yq -o json -i '.properties.controlPlane.properties.apiServer.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
|
||||
yq -o json -i '.properties.controlPlane.properties.controllerManager.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
|
||||
yq -o json -i '.properties.controlPlane.properties.scheduler.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
|
||||
yq -o json -i '.properties.controlPlane.properties.konnectivity.properties.server.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
|
||||
|
||||
image: image-ubuntu-container-disk image-kubevirt-cloud-provider image-kubevirt-csi-driver image-cluster-autoscaler
|
||||
|
||||
image-ubuntu-container-disk:
|
||||
docker buildx build images/ubuntu-container-disk \
|
||||
docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/ubuntu-container-disk \
|
||||
--provenance false \
|
||||
--builder=$(BUILDER) \
|
||||
--platform=$(PLATFORM) \
|
||||
--build-arg KUBERNETES_VERSION=${KUBERNETES_VERSION} \
|
||||
--tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(KUBERNETES_VERSION)) \
|
||||
--tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(KUBERNETES_VERSION)-$(TAG)) \
|
||||
--tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(UBUNTU_CONTAINER_DISK_TAG)) \
|
||||
--tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(UBUNTU_CONTAINER_DISK_TAG)-$(TAG)) \
|
||||
--cache-from type=registry,ref=$(REGISTRY)/ubuntu-container-disk:latest \
|
||||
--cache-to type=inline \
|
||||
--metadata-file images/ubuntu-container-disk.json \
|
||||
--push=$(PUSH) \
|
||||
--label "org.opencontainers.image.source=https://github.com/cozystack/cozystack" \
|
||||
--load=$(LOAD)
|
||||
echo "$(REGISTRY)/ubuntu-container-disk:$(call settag,$(KUBERNETES_VERSION))@$$(yq e '."containerimage.digest"' images/ubuntu-container-disk.json -o json -r)" \
|
||||
echo "$(REGISTRY)/ubuntu-container-disk:$(call settag,$(UBUNTU_CONTAINER_DISK_TAG))@$$(yq e '."containerimage.digest"' images/ubuntu-container-disk.json -o json -r)" \
|
||||
> images/ubuntu-container-disk.tag
|
||||
rm -f images/ubuntu-container-disk.json
|
||||
|
||||
image-kubevirt-cloud-provider:
|
||||
docker buildx build images/kubevirt-cloud-provider \
|
||||
docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/kubevirt-cloud-provider \
|
||||
--provenance false \
|
||||
--builder=$(BUILDER) \
|
||||
--platform=$(PLATFORM) \
|
||||
--tag $(REGISTRY)/kubevirt-cloud-provider:$(call settag,$(KUBERNETES_PKG_TAG)) \
|
||||
--tag $(REGISTRY)/kubevirt-cloud-provider:$(call settag,$(KUBERNETES_PKG_TAG)-$(TAG)) \
|
||||
--cache-from type=registry,ref=$(REGISTRY)/kubevirt-cloud-provider:latest \
|
||||
@@ -49,10 +40,8 @@ image-kubevirt-cloud-provider:
|
||||
rm -f images/kubevirt-cloud-provider.json
|
||||
|
||||
image-kubevirt-csi-driver:
|
||||
docker buildx build images/kubevirt-csi-driver \
|
||||
docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/kubevirt-csi-driver \
|
||||
--provenance false \
|
||||
--builder=$(BUILDER) \
|
||||
--platform=$(PLATFORM) \
|
||||
--tag $(REGISTRY)/kubevirt-csi-driver:$(call settag,$(KUBERNETES_PKG_TAG)) \
|
||||
--tag $(REGISTRY)/kubevirt-csi-driver:$(call settag,$(KUBERNETES_PKG_TAG)-$(TAG)) \
|
||||
--cache-from type=registry,ref=$(REGISTRY)/kubevirt-csi-driver:latest \
|
||||
@@ -67,10 +56,8 @@ image-kubevirt-csi-driver:
|
||||
|
||||
|
||||
image-cluster-autoscaler:
|
||||
docker buildx build images/cluster-autoscaler \
|
||||
docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/cluster-autoscaler \
|
||||
--provenance false \
|
||||
--builder=$(BUILDER) \
|
||||
--platform=$(PLATFORM) \
|
||||
--tag $(REGISTRY)/cluster-autoscaler:$(call settag,$(KUBERNETES_PKG_TAG)) \
|
||||
--tag $(REGISTRY)/cluster-autoscaler:$(call settag,$(KUBERNETES_PKG_TAG)-$(TAG)) \
|
||||
--cache-from type=registry,ref=$(REGISTRY)/cluster-autoscaler:latest \
|
||||
|
||||
@@ -27,48 +27,20 @@ How to access to deployed cluster:
|
||||
kubectl get secret -n <namespace> kubernetes-<clusterName>-admin-kubeconfig -o go-template='{{ printf "%s\n" (index .data "super-admin.conf" | base64decode) }}' > test
|
||||
```
|
||||
|
||||
## Parameters
|
||||
# Series
|
||||
|
||||
### Common parameters
|
||||
<!-- source: https://github.com/kubevirt/common-instancetypes/blob/main/README.md -->
|
||||
|
||||
| Name | Description | Value |
|
||||
| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | ------------ |
|
||||
| `host` | The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host). | `""` |
|
||||
| `controlPlane.replicas` | Number of replicas for Kubernetes control-plane components | `2` |
|
||||
| `storageClass` | StorageClass used to store user data | `replicated` |
|
||||
| `nodeGroups` | nodeGroups configuration | `{}` |
|
||||
|
||||
### Cluster Addons
|
||||
|
||||
| Name | Description | Value |
|
||||
| --------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| `addons.certManager.enabled` | Enables the cert-manager | `false` |
|
||||
| `addons.certManager.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.cilium.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.gatewayAPI.enabled` | Enables the Gateway API | `false` |
|
||||
| `addons.ingressNginx.enabled` | Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role) | `false` |
|
||||
| `addons.ingressNginx.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.ingressNginx.hosts` | List of domain names that should be passed through to the cluster by upper cluster | `[]` |
|
||||
| `addons.gpuOperator.enabled` | Enables the gpu-operator | `false` |
|
||||
| `addons.gpuOperator.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.fluxcd.enabled` | Enables Flux CD | `false` |
|
||||
| `addons.fluxcd.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.monitoringAgents.enabled` | Enables MonitoringAgents (fluentbit, vmagents for sending logs and metrics to storage) if tenant monitoring enabled, send to tenant storage, else to root storage | `false` |
|
||||
| `addons.monitoringAgents.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.verticalPodAutoscaler.valuesOverride` | Custom values to override | `{}` |
|
||||
|
||||
### Kubernetes control plane configuration
|
||||
|
||||
| Name | Description | Value |
|
||||
| -------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| `controlPlane.apiServer.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `small` |
|
||||
| `controlPlane.apiServer.resources` | Resources | `{}` |
|
||||
| `controlPlane.controllerManager.resources` | Resources | `{}` |
|
||||
| `controlPlane.controllerManager.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` |
|
||||
| `controlPlane.scheduler.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` |
|
||||
| `controlPlane.scheduler.resources` | Resources | `{}` |
|
||||
| `controlPlane.konnectivity.server.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` |
|
||||
| `controlPlane.konnectivity.server.resources` | Resources | `{}` |
|
||||
. | U | O | CX | M | RT
|
||||
----------------------------|-----|-----|------|-----|------
|
||||
*Has GPUs* | | | | |
|
||||
*Hugepages* | | | ✓ | ✓ | ✓
|
||||
*Overcommitted Memory* | | ✓ | | |
|
||||
*Dedicated CPU* | | | ✓ | | ✓
|
||||
*Burstable CPU performance* | ✓ | ✓ | | ✓ |
|
||||
*Isolated emulator threads* | | | ✓ | | ✓
|
||||
*vNUMA* | | | ✓ | | ✓
|
||||
*vCPU-To-Memory Ratio* | 1:4 | 1:4 | 1:2 | 1:8 | 1:4
|
||||
|
||||
|
||||
## U Series
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.20.1@sha256:720148128917fa10f860a8b7e74f9428de72481c466c880c5ad894e1f0026d43
|
||||
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.18.0@sha256:85371c6aabf5a7fea2214556deac930c600e362f92673464fe2443784e2869c3
|
||||
|
||||
@@ -1,14 +1,7 @@
|
||||
# Source: https://raw.githubusercontent.com/kubernetes/autoscaler/refs/heads/master/cluster-autoscaler/Dockerfile.amd64
|
||||
ARG builder_image=docker.io/library/golang:1.23.4
|
||||
ARG BASEIMAGE=gcr.io/distroless/static:nonroot-${TARGETARCH}
|
||||
|
||||
ARG BASEIMAGE=gcr.io/distroless/static:nonroot-amd64
|
||||
FROM ${builder_image} AS builder
|
||||
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
ENV GOOS=$TARGETOS
|
||||
ENV GOARCH=$TARGETARCH
|
||||
|
||||
RUN git clone https://github.com/kubernetes/autoscaler /src/autoscaler \
|
||||
&& cd /src/autoscaler/cluster-autoscaler \
|
||||
&& git checkout cluster-autoscaler-1.32.0
|
||||
@@ -21,8 +14,6 @@ RUN make build
|
||||
FROM $BASEIMAGE
|
||||
LABEL maintainer="Marcin Wielgus <mwielgus@google.com>"
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
COPY --from=builder /src/autoscaler/cluster-autoscaler/cluster-autoscaler-${TARGETARCH} /cluster-autoscaler
|
||||
COPY --from=builder /src/autoscaler/cluster-autoscaler/cluster-autoscaler-amd64 /cluster-autoscaler
|
||||
WORKDIR /
|
||||
CMD ["/cluster-autoscaler"]
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.20.1@sha256:1b48a4725a33ccb48604bb2e1be3171271e7daac2726d3119228212d8a9da5bb
|
||||
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.18.0@sha256:795d8e1ef4b2b0df2aa1e09d96cd13476ebb545b4bf4b5779b7547a70ef64cf9
|
||||
|
||||
@@ -1,10 +1,5 @@
|
||||
# Source: https://github.com/kubevirt/cloud-provider-kubevirt/blob/main/build/images/kubevirt-cloud-controller-manager/Dockerfile
|
||||
FROM golang:1.20.6 AS builder
|
||||
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
ENV GOOS=$TARGETOS
|
||||
ENV GOARCH=$TARGETARCH
|
||||
FROM --platform=linux/amd64 golang:1.20.6 AS builder
|
||||
|
||||
RUN git clone https://github.com/kubevirt/cloud-provider-kubevirt /go/src/kubevirt.io/cloud-provider-kubevirt \
|
||||
&& cd /go/src/kubevirt.io/cloud-provider-kubevirt \
|
||||
@@ -19,7 +14,7 @@ RUN go get 'k8s.io/endpointslice/util@v0.28' 'k8s.io/apiserver@v0.28'
|
||||
RUN go mod tidy
|
||||
RUN go mod vendor
|
||||
|
||||
RUN CGO_ENABLED=0 go build -mod=vendor -ldflags="-s -w" -o bin/kubevirt-cloud-controller-manager ./cmd/kubevirt-cloud-controller-manager
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -mod=vendor -ldflags="-s -w" -o bin/kubevirt-cloud-controller-manager ./cmd/kubevirt-cloud-controller-manager
|
||||
|
||||
FROM registry.access.redhat.com/ubi9/ubi-micro
|
||||
COPY --from=builder /go/src/kubevirt.io/cloud-provider-kubevirt/bin/kubevirt-cloud-controller-manager /bin/kubevirt-cloud-controller-manager
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.20.1@sha256:fb6d3ce9d6d948285a6d399c852e15259d6922162ec7c44177d2274243f59d1f
|
||||
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.18.0@sha256:6f9091c3e7e4951c5e43fdafd505705fcc9f1ead290ee3ae42e97e9ec2b87b20
|
||||
|
||||
@@ -5,11 +5,6 @@ RUN git clone https://github.com/kubevirt/csi-driver /src/kubevirt-csi-driver \
|
||||
&& cd /src/kubevirt-csi-driver \
|
||||
&& git checkout 35836e0c8b68d9916d29a838ea60cdd3fc6199cf
|
||||
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
ENV GOOS=$TARGETOS
|
||||
ENV GOARCH=$TARGETARCH
|
||||
|
||||
WORKDIR /src/kubevirt-csi-driver
|
||||
RUN make build
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/ubuntu-container-disk:v1.32@sha256:184b81529ae72684279799b12f436cc7a511d8ff5bd1e9a30478799c7707c625
|
||||
ghcr.io/cozystack/cozystack/ubuntu-container-disk:v1.30.1@sha256:07392e7a87a3d4ef1c86c1b146e6c5de5c2b524aed5a53bf48870dc8a296f99a
|
||||
|
||||
@@ -1,26 +1,19 @@
|
||||
# TODO: Here we use ubuntu:22.04, as guestfish has some network issues running in ubuntu:24.04
|
||||
FROM ubuntu:22.04 AS guestfish
|
||||
FROM ubuntu:22.04 as guestfish
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt-get update \
|
||||
&& apt-get -y install \
|
||||
libguestfs-tools \
|
||||
linux-image-generic \
|
||||
wget \
|
||||
make \
|
||||
bash-completion
|
||||
bash-completion \
|
||||
&& apt-get clean
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
FROM guestfish AS builder
|
||||
FROM guestfish as builder
|
||||
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
|
||||
# noble is a code name for the Ubuntu 24.04 LTS release
|
||||
RUN wget -O image.img https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-${TARGETARCH}.img --show-progress --output-file /dev/stdout --progress=dot:giga 2>/dev/null
|
||||
|
||||
ARG KUBERNETES_VERSION
|
||||
RUN wget -O image.img https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img
|
||||
|
||||
RUN qemu-img resize image.img 5G \
|
||||
&& eval "$(guestfish --listen --network)" \
|
||||
@@ -31,21 +24,19 @@ RUN qemu-img resize image.img 5G \
|
||||
&& guestfish --remote command "resize2fs /dev/sda1" \
|
||||
# docker repo
|
||||
&& guestfish --remote sh "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg" \
|
||||
&& guestfish --remote sh 'echo "deb [signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list' \
|
||||
&& guestfish --remote sh 'echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list' \
|
||||
# kubernetes repo
|
||||
&& guestfish --remote sh "curl -fsSL https://pkgs.k8s.io/core:/stable:/${KUBERNETES_VERSION}/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg" \
|
||||
&& guestfish --remote sh "echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/${KUBERNETES_VERSION}/deb/ /' | tee /etc/apt/sources.list.d/kubernetes.list" \
|
||||
&& guestfish --remote command "apt-get check -q" \
|
||||
&& guestfish --remote sh "curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.30/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg" \
|
||||
&& guestfish --remote sh "echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.30/deb/ /' | tee /etc/apt/sources.list.d/kubernetes.list" \
|
||||
# install containerd
|
||||
&& guestfish --remote command "apt-get update -q" \
|
||||
&& guestfish --remote command "apt-get install -yq containerd.io" \
|
||||
&& guestfish --remote command "apt-get update -y" \
|
||||
&& guestfish --remote command "apt-get install -y containerd.io" \
|
||||
# configure containerd
|
||||
&& guestfish --remote command "mkdir -p /etc/containerd" \
|
||||
&& guestfish --remote sh "containerd config default | tee /etc/containerd/config.toml" \
|
||||
&& guestfish --remote command "sed -i '/SystemdCgroup/ s/=.*/= true/' /etc/containerd/config.toml" \
|
||||
&& guestfish --remote command "containerd config dump >/dev/null" \
|
||||
# install kubernetes
|
||||
&& guestfish --remote command "apt-get install -yq kubelet kubeadm" \
|
||||
&& guestfish --remote command "apt-get install -y kubelet kubeadm" \
|
||||
# clean apt cache
|
||||
&& guestfish --remote sh 'apt-get clean && rm -rf /var/lib/apt/lists/*' \
|
||||
# write system configuration
|
||||
|
||||
@@ -11,34 +11,35 @@ These presets are for basic testing and not meant to be used in production
|
||||
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||
*/}}
|
||||
{{- define "resources.preset" -}}
|
||||
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
|
||||
{{- $presets := dict
|
||||
"nano" (dict
|
||||
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"micro" (dict
|
||||
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"small" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"medium" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"large" (dict
|
||||
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"xlarge" (dict
|
||||
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"2xlarge" (dict
|
||||
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
}}
|
||||
{{- if hasKey $presets .type -}}
|
||||
|
||||
@@ -39,13 +39,6 @@ spec:
|
||||
sockets: 1
|
||||
{{- end }}
|
||||
devices:
|
||||
{{- if .group.gpus }}
|
||||
gpus:
|
||||
{{- range $i, $gpu := .group.gpus }}
|
||||
- name: gpu{{ add $i 1 }}
|
||||
deviceName: {{ $gpu.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
disks:
|
||||
- name: system
|
||||
disk:
|
||||
@@ -110,22 +103,22 @@ metadata:
|
||||
kamaji.clastix.io/kubeconfig-secret-key: "super-admin.svc"
|
||||
spec:
|
||||
apiServer:
|
||||
{{- if .Values.controlPlane.apiServer.resources }}
|
||||
resources: {{- toYaml .Values.controlPlane.apiServer.resources | nindent 6 }}
|
||||
{{- else if ne .Values.controlPlane.apiServer.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.controlPlane.apiServer.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
{{- if .Values.kamajiControlPlane.apiServer.resources }}
|
||||
resources: {{- toYaml .Values.kamajiControlPlane.apiServer.resources | nindent 6 }}
|
||||
{{- else if ne .Values.kamajiControlPlane.apiServer.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.kamajiControlPlane.apiServer.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
{{- end }}
|
||||
controllerManager:
|
||||
{{- if .Values.controlPlane.controllerManager.resources }}
|
||||
resources: {{- toYaml .Values.controlPlane.controllerManager.resources | nindent 6 }}
|
||||
{{- else if ne .Values.controlPlane.controllerManager.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.controlPlane.controllerManager.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
{{- if .Values.kamajiControlPlane.controllerManager.resources }}
|
||||
resources: {{- toYaml .Values.kamajiControlPlane.controllerManager.resources | nindent 6 }}
|
||||
{{- else if ne .Values.kamajiControlPlane.controllerManager.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.kamajiControlPlane.controllerManager.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
{{- end }}
|
||||
scheduler:
|
||||
{{- if .Values.controlPlane.scheduler.resources }}
|
||||
resources: {{- toYaml .Values.controlPlane.scheduler.resources | nindent 6 }}
|
||||
{{- else if ne .Values.controlPlane.scheduler.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.controlPlane.scheduler.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
{{- if .Values.kamajiControlPlane.scheduler.resources }}
|
||||
resources: {{- toYaml .Values.kamajiControlPlane.scheduler.resources | nindent 6 }}
|
||||
{{- else if ne .Values.kamajiControlPlane.scheduler.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.kamajiControlPlane.scheduler.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
{{- end }}
|
||||
dataStoreName: "{{ $etcd }}"
|
||||
addons:
|
||||
@@ -135,10 +128,10 @@ spec:
|
||||
konnectivity:
|
||||
server:
|
||||
port: 8132
|
||||
{{- if .Values.controlPlane.konnectivity.server.resources }}
|
||||
resources: {{- toYaml .Values.controlPlane.konnectivity.server.resources | nindent 10 }}
|
||||
{{- else if ne .Values.controlPlane.konnectivity.server.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.controlPlane.konnectivity.server.resourcesPreset "Release" .Release) | nindent 10 }}
|
||||
{{- if .Values.kamajiControlPlane.addons.konnectivity.server.resources }}
|
||||
resources: {{- toYaml .Values.kamajiControlPlane.addons.konnectivity.server.resources | nindent 10 }}
|
||||
{{- else if ne .Values.kamajiControlPlane.addons.konnectivity.server.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.kamajiControlPlane.addons.konnectivity.server.resourcesPreset "Release" .Release) | nindent 10 }}
|
||||
{{- end }}
|
||||
kubelet:
|
||||
cgroupfs: systemd
|
||||
@@ -150,14 +143,14 @@ spec:
|
||||
ingress:
|
||||
extraAnnotations:
|
||||
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
|
||||
hostname: {{ .Values.host | default (printf "%s.%s" .Release.Name $host) }}:443
|
||||
hostname: {{ .Values.host | default (printf "%s.%s" .Release.Name $host) }}
|
||||
className: "{{ $ingress }}"
|
||||
deployment:
|
||||
podAdditionalMetadata:
|
||||
labels:
|
||||
policy.cozystack.io/allow-to-etcd: "true"
|
||||
replicas: 2
|
||||
version: {{ $.Chart.AppVersion }}
|
||||
version: 1.30.1
|
||||
---
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
@@ -283,7 +276,7 @@ spec:
|
||||
kind: KubevirtMachineTemplate
|
||||
name: {{ $.Release.Name }}-{{ $groupName }}-{{ $kubevirtmachinetemplateHash }}
|
||||
namespace: {{ $.Release.Namespace }}
|
||||
version: v{{ $.Chart.AppVersion }}
|
||||
version: v1.30.1
|
||||
---
|
||||
apiVersion: cluster.x-k8s.io/v1beta1
|
||||
kind: MachineHealthCheck
|
||||
|
||||
@@ -4,7 +4,7 @@ metadata:
|
||||
name: {{ .Release.Name }}-cert-manager-crds
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: cert-manager-crds
|
||||
@@ -16,7 +16,6 @@ spec:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '>= 0.0.0-0'
|
||||
kubeConfig:
|
||||
secretRef:
|
||||
name: {{ .Release.Name }}-admin-kubeconfig
|
||||
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
name: {{ .Release.Name }}-cert-manager
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: cert-manager
|
||||
@@ -17,7 +17,6 @@ spec:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '>= 0.0.0-0'
|
||||
kubeConfig:
|
||||
secretRef:
|
||||
name: {{ .Release.Name }}-admin-kubeconfig
|
||||
@@ -31,9 +30,11 @@ spec:
|
||||
upgrade:
|
||||
remediation:
|
||||
retries: -1
|
||||
{{- with .Values.addons.certManager.valuesOverride }}
|
||||
values:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- if .Values.addons.certManager.valuesOverride }}
|
||||
valuesFrom:
|
||||
- kind: Secret
|
||||
name: {{ .Release.Name }}-cert-manager-values-override
|
||||
valuesKey: values
|
||||
{{- end }}
|
||||
|
||||
dependsOn:
|
||||
@@ -46,3 +47,13 @@ spec:
|
||||
- name: {{ .Release.Name }}-cert-manager-crds
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
{{- if .Values.addons.certManager.valuesOverride }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-cert-manager-values-override
|
||||
stringData:
|
||||
values: |
|
||||
{{- toYaml .Values.addons.certManager.valuesOverride | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -1,25 +1,10 @@
|
||||
{{- define "cozystack.defaultCiliumValues" -}}
|
||||
cilium:
|
||||
k8sServiceHost: {{ .Release.Name }}.{{ .Release.Namespace }}.svc
|
||||
k8sServicePort: 6443
|
||||
routingMode: tunnel
|
||||
enableIPv4Masquerade: true
|
||||
ipv4NativeRoutingCIDR: ""
|
||||
{{- if $.Values.addons.gatewayAPI.enabled }}
|
||||
gatewayAPI:
|
||||
enabled: true
|
||||
envoy:
|
||||
enabled: true
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-cilium
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: cilium
|
||||
@@ -31,7 +16,6 @@ spec:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '>= 0.0.0-0'
|
||||
kubeConfig:
|
||||
secretRef:
|
||||
name: {{ .Release.Name }}-admin-kubeconfig
|
||||
@@ -46,13 +30,14 @@ spec:
|
||||
remediation:
|
||||
retries: -1
|
||||
values:
|
||||
{{- toYaml (deepCopy .Values.addons.cilium.valuesOverride | mergeOverwrite (fromYaml (include "cozystack.defaultCiliumValues" .))) | nindent 4 }}
|
||||
cilium:
|
||||
k8sServiceHost: {{ .Release.Name }}.{{ .Release.Namespace }}.svc
|
||||
k8sServicePort: 6443
|
||||
routingMode: tunnel
|
||||
enableIPv4Masquerade: true
|
||||
ipv4NativeRoutingCIDR: ""
|
||||
dependsOn:
|
||||
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
|
||||
- name: {{ .Release.Name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
{{- if $.Values.addons.gatewayAPI.enabled }}
|
||||
- name: {{ .Release.Name }}-gateway-api-crds
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
|
||||
@@ -4,7 +4,7 @@ metadata:
|
||||
name: {{ .Release.Name }}-csi
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: csi
|
||||
@@ -16,7 +16,6 @@ spec:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '>= 0.0.0-0'
|
||||
kubeConfig:
|
||||
secretRef:
|
||||
name: {{ .Release.Name }}-admin-kubeconfig
|
||||
|
||||
@@ -20,7 +20,7 @@ spec:
|
||||
effect: "NoSchedule"
|
||||
containers:
|
||||
- name: kubectl
|
||||
image: docker.io/clastix/kubectl:v1.32
|
||||
image: docker.io/clastix/kubectl:v1.30.1
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
@@ -30,7 +30,6 @@ spec:
|
||||
patch
|
||||
helmrelease
|
||||
{{ .Release.Name }}-cilium
|
||||
{{ .Release.Name }}-gateway-api-crds
|
||||
{{ .Release.Name }}-csi
|
||||
{{ .Release.Name }}-cert-manager
|
||||
{{ .Release.Name }}-cert-manager-crds
|
||||
@@ -39,7 +38,6 @@ spec:
|
||||
{{ .Release.Name }}-ingress-nginx
|
||||
{{ .Release.Name }}-fluxcd-operator
|
||||
{{ .Release.Name }}-fluxcd
|
||||
{{ .Release.Name }}-gpu-operator
|
||||
-p '{"spec": {"suspend": true}}'
|
||||
--type=merge --field-manager=flux-client-side-apply || true
|
||||
---
|
||||
@@ -78,7 +76,6 @@ rules:
|
||||
- {{ .Release.Name }}-ingress-nginx
|
||||
- {{ .Release.Name }}-fluxcd-operator
|
||||
- {{ .Release.Name }}-fluxcd
|
||||
- {{ .Release.Name }}-gpu-operator
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
name: {{ .Release.Name }}-fluxcd-operator
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: fluxcd-operator
|
||||
@@ -17,7 +17,6 @@ spec:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '>= 0.0.0-0'
|
||||
kubeConfig:
|
||||
secretRef:
|
||||
name: {{ .Release.Name }}-admin-kubeconfig
|
||||
@@ -50,7 +49,7 @@ metadata:
|
||||
name: {{ .Release.Name }}-fluxcd
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: fluxcd
|
||||
@@ -62,7 +61,6 @@ spec:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '>= 0.0.0-0'
|
||||
kubeConfig:
|
||||
secretRef:
|
||||
name: {{ .Release.Name }}-kubeconfig
|
||||
@@ -75,9 +73,11 @@ spec:
|
||||
upgrade:
|
||||
remediation:
|
||||
retries: -1
|
||||
{{- with .Values.addons.fluxcd.valuesOverride }}
|
||||
values:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- if .Values.addons.fluxcd.valuesOverride }}
|
||||
valuesFrom:
|
||||
- kind: Secret
|
||||
name: {{ .Release.Name }}-fluxcd-values-override
|
||||
valuesKey: values
|
||||
{{- end }}
|
||||
dependsOn:
|
||||
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
|
||||
@@ -89,3 +89,14 @@ spec:
|
||||
- name: {{ .Release.Name }}-fluxcd-operator
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.addons.fluxcd.valuesOverride }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-fluxcd-values-override
|
||||
stringData:
|
||||
values: |
|
||||
{{- toYaml .Values.addons.fluxcd.valuesOverride | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
{{- if $.Values.addons.gatewayAPI.enabled }}
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-gateway-api-crds
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: gateway-api-crds
|
||||
chart:
|
||||
spec:
|
||||
chart: cozy-gateway-api-crds
|
||||
reconcileStrategy: Revision
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
kubeConfig:
|
||||
secretRef:
|
||||
name: {{ .Release.Name }}-admin-kubeconfig
|
||||
key: super-admin.svc
|
||||
targetNamespace: kube-system
|
||||
storageNamespace: kube-system
|
||||
install:
|
||||
createNamespace: false
|
||||
remediation:
|
||||
retries: -1
|
||||
upgrade:
|
||||
remediation:
|
||||
retries: -1
|
||||
dependsOn:
|
||||
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
|
||||
- name: {{ .Release.Name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,46 +0,0 @@
|
||||
{{- if .Values.addons.gpuOperator.enabled }}
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-gpu-operator
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: gpu-operator
|
||||
chart:
|
||||
spec:
|
||||
chart: cozy-gpu-operator
|
||||
reconcileStrategy: Revision
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '>= 0.0.0-0'
|
||||
kubeConfig:
|
||||
secretRef:
|
||||
name: {{ .Release.Name }}-admin-kubeconfig
|
||||
key: super-admin.svc
|
||||
targetNamespace: cozy-gpu-operator
|
||||
storageNamespace: cozy-gpu-operator
|
||||
install:
|
||||
createNamespace: true
|
||||
remediation:
|
||||
retries: -1
|
||||
upgrade:
|
||||
remediation:
|
||||
retries: -1
|
||||
{{- with .Values.addons.gpuOperator.valuesOverride }}
|
||||
values:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
dependsOn:
|
||||
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
|
||||
- name: {{ .Release.Name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
- name: {{ .Release.Name }}-cilium
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
@@ -1,20 +1,3 @@
|
||||
{{- define "cozystack.defaultIngressValues" -}}
|
||||
ingress-nginx:
|
||||
fullnameOverride: ingress-nginx
|
||||
controller:
|
||||
kind: DaemonSet
|
||||
hostNetwork: true
|
||||
service:
|
||||
enabled: false
|
||||
{{- if not .Values.addons.certManager.enabled }}
|
||||
admissionWebhooks:
|
||||
certManager:
|
||||
enabled: false
|
||||
{{- end }}
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/ingress-nginx: ""
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.addons.ingressNginx.enabled }}
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
@@ -22,7 +5,7 @@ metadata:
|
||||
name: {{ .Release.Name }}-ingress-nginx
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: ingress-nginx
|
||||
@@ -34,7 +17,6 @@ spec:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '>= 0.0.0-0'
|
||||
kubeConfig:
|
||||
secretRef:
|
||||
name: {{ .Release.Name }}-admin-kubeconfig
|
||||
@@ -49,7 +31,21 @@ spec:
|
||||
remediation:
|
||||
retries: -1
|
||||
values:
|
||||
{{- toYaml (deepCopy .Values.addons.ingressNginx.valuesOverride | mergeOverwrite (fromYaml (include "cozystack.defaultIngressValues" .))) | nindent 4 }}
|
||||
ingress-nginx:
|
||||
fullnameOverride: ingress-nginx
|
||||
controller:
|
||||
kind: DaemonSet
|
||||
hostNetwork: true
|
||||
service:
|
||||
enabled: false
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/ingress-nginx: ""
|
||||
{{- if .Values.addons.ingressNginx.valuesOverride }}
|
||||
valuesFrom:
|
||||
- kind: Secret
|
||||
name: {{ .Release.Name }}-ingress-nginx-values-override
|
||||
valuesKey: values
|
||||
{{- end }}
|
||||
dependsOn:
|
||||
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
|
||||
- name: {{ .Release.Name }}
|
||||
@@ -58,3 +54,14 @@ spec:
|
||||
- name: {{ .Release.Name }}-cilium
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.addons.ingressNginx.valuesOverride }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-ingress-nginx-values-override
|
||||
stringData:
|
||||
values: |
|
||||
{{- toYaml .Values.addons.ingressNginx.valuesOverride | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -7,7 +7,7 @@ metadata:
|
||||
name: {{ .Release.Name }}-monitoring-agents
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: cozy-monitoring-agents
|
||||
@@ -19,7 +19,6 @@ spec:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '>= 0.0.0-0'
|
||||
kubeConfig:
|
||||
secretRef:
|
||||
name: {{ .Release.Name }}-admin-kubeconfig
|
||||
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
name: {{ .Release.Name }}-vertical-pod-autoscaler-crds
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: vertical-pod-autoscaler-crds
|
||||
@@ -17,7 +17,6 @@ spec:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '>= 0.0.0-0'
|
||||
kubeConfig:
|
||||
secretRef:
|
||||
name: {{ .Release.Name }}-admin-kubeconfig
|
||||
|
||||
@@ -1,28 +1,5 @@
|
||||
{{- define "cozystack.defaultVPAValues" -}}
|
||||
{{- $myNS := lookup "v1" "Namespace" "" .Release.Namespace }}
|
||||
{{- $targetTenant := index $myNS.metadata.annotations "namespace.cozystack.io/monitoring" }}
|
||||
vertical-pod-autoscaler:
|
||||
recommender:
|
||||
extraArgs:
|
||||
container-name-label: container
|
||||
container-namespace-label: namespace
|
||||
container-pod-name-label: pod
|
||||
storage: prometheus
|
||||
memory-saver: true
|
||||
pod-label-prefix: label_
|
||||
metric-for-pod-labels: kube_pod_labels{job="kube-state-metrics", tenant="{{ .Release.Namespace }}", cluster="{{ .Release.Name }}"}[8d]
|
||||
pod-name-label: pod
|
||||
pod-namespace-label: namespace
|
||||
prometheus-address: http://vmselect-shortterm.{{ $targetTenant }}.svc.cozy.local:8481/select/0/prometheus/
|
||||
prometheus-cadvisor-job-name: cadvisor
|
||||
resources:
|
||||
limits:
|
||||
memory: 1600Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 1600Mi
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.addons.monitoringAgents.enabled }}
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
@@ -30,7 +7,7 @@ metadata:
|
||||
name: {{ .Release.Name }}-vertical-pod-autoscaler
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: vertical-pod-autoscaler
|
||||
@@ -42,7 +19,6 @@ spec:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '>= 0.0.0-0'
|
||||
kubeConfig:
|
||||
secretRef:
|
||||
name: {{ .Release.Name }}-admin-kubeconfig
|
||||
@@ -57,7 +33,32 @@ spec:
|
||||
remediation:
|
||||
retries: -1
|
||||
values:
|
||||
{{- toYaml (deepCopy .Values.addons.verticalPodAutoscaler.valuesOverride | mergeOverwrite (fromYaml (include "cozystack.defaultVPAValues" .))) | nindent 4 }}
|
||||
vertical-pod-autoscaler:
|
||||
recommender:
|
||||
extraArgs:
|
||||
container-name-label: container
|
||||
container-namespace-label: namespace
|
||||
container-pod-name-label: pod
|
||||
storage: prometheus
|
||||
memory-saver: true
|
||||
pod-label-prefix: label_
|
||||
metric-for-pod-labels: kube_pod_labels{job="kube-state-metrics", tenant="{{ .Release.Namespace }}", cluster="{{ .Release.Name }}"}[8d]
|
||||
pod-name-label: pod
|
||||
pod-namespace-label: namespace
|
||||
prometheus-address: http://vmselect-shortterm.{{ $targetTenant }}.svc.cozy.local:8481/select/0/prometheus/
|
||||
prometheus-cadvisor-job-name: cadvisor
|
||||
resources:
|
||||
limits:
|
||||
memory: 1600Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 1600Mi
|
||||
{{- if .Values.addons.verticalPodAutoscaler.valuesOverride }}
|
||||
valuesFrom:
|
||||
- kind: Secret
|
||||
name: {{ .Release.Name }}-vertical-pod-autoscaler-values-override
|
||||
valuesKey: values
|
||||
{{- end }}
|
||||
dependsOn:
|
||||
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
|
||||
- name: {{ .Release.Name }}
|
||||
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
name: {{ .Release.Name }}-cozy-victoria-metrics-operator
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: cozy-victoria-metrics-operator
|
||||
@@ -17,7 +17,6 @@ spec:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '>= 0.0.0-0'
|
||||
kubeConfig:
|
||||
secretRef:
|
||||
name: {{ .Release.Name }}-admin-kubeconfig
|
||||
|
||||
@@ -1,247 +1,97 @@
|
||||
{
|
||||
"title": "Chart Values",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"host": {
|
||||
"type": "string",
|
||||
"description": "The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host).",
|
||||
"default": ""
|
||||
},
|
||||
"controlPlane": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"replicas": {
|
||||
"type": "number",
|
||||
"description": "Number of replicas for Kubernetes control-plane components",
|
||||
"default": 2
|
||||
"title": "Chart Values",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"host": {
|
||||
"type": "string",
|
||||
"description": "The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host).",
|
||||
"default": ""
|
||||
},
|
||||
"apiServer": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "small",
|
||||
"enum": [
|
||||
"none",
|
||||
"nano",
|
||||
"micro",
|
||||
"small",
|
||||
"medium",
|
||||
"large",
|
||||
"xlarge",
|
||||
"2xlarge"
|
||||
]
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"controllerManager": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "micro",
|
||||
"enum": [
|
||||
"none",
|
||||
"nano",
|
||||
"micro",
|
||||
"small",
|
||||
"medium",
|
||||
"large",
|
||||
"xlarge",
|
||||
"2xlarge"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"scheduler": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "micro",
|
||||
"enum": [
|
||||
"none",
|
||||
"nano",
|
||||
"micro",
|
||||
"small",
|
||||
"medium",
|
||||
"large",
|
||||
"xlarge",
|
||||
"2xlarge"
|
||||
]
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"konnectivity": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"server": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "micro",
|
||||
"enum": [
|
||||
"none",
|
||||
"nano",
|
||||
"micro",
|
||||
"small",
|
||||
"medium",
|
||||
"large",
|
||||
"xlarge",
|
||||
"2xlarge"
|
||||
]
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"default": {}
|
||||
"controlPlane": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"replicas": {
|
||||
"type": "number",
|
||||
"description": "Number of replicas for Kubernetes contorl-plane components",
|
||||
"default": 2
|
||||
}
|
||||
}
|
||||
},
|
||||
"storageClass": {
|
||||
"type": "string",
|
||||
"description": "StorageClass used to store user data",
|
||||
"default": "replicated"
|
||||
},
|
||||
"addons": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"certManager": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enables the cert-manager",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
"type": "object",
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"ingressNginx": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role)",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
"type": "object",
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
},
|
||||
"hosts": {
|
||||
"type": "array",
|
||||
"description": "List of domain names that should be passed through to the cluster by upper cluster",
|
||||
"default": [],
|
||||
"items": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"fluxcd": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enables Flux CD",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
"type": "object",
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"monitoringAgents": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enables MonitoringAgents (fluentbit, vmagents for sending logs and metrics to storage) if tenant monitoring enabled, send to tenant storage, else to root storage",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
"type": "object",
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"storageClass": {
|
||||
"type": "string",
|
||||
"description": "StorageClass used to store user data",
|
||||
"default": "replicated"
|
||||
},
|
||||
"addons": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"certManager": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enables the cert-manager",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
"type": "object",
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"cilium": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"valuesOverride": {
|
||||
"type": "object",
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"gatewayAPI": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enables the Gateway API",
|
||||
"default": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"ingressNginx": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role)",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
"type": "object",
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
},
|
||||
"hosts": {
|
||||
"type": "array",
|
||||
"description": "List of domain names that should be passed through to the cluster by upper cluster",
|
||||
"default": [],
|
||||
"items": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"gpuOperator": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enables the gpu-operator",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
"type": "object",
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"fluxcd": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enables Flux CD",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
"type": "object",
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"monitoringAgents": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enables MonitoringAgents (fluentbit, vmagents for sending logs and metrics to storage) if tenant monitoring enabled, send to tenant storage, else to root storage",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
"type": "object",
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"verticalPodAutoscaler": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"valuesOverride": {
|
||||
"type": "object",
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
## @section Common parameters
|
||||
|
||||
## @param host The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host).
|
||||
## @param controlPlane.replicas Number of replicas for Kubernetes control-plane components
|
||||
## @param controlPlane.replicas Number of replicas for Kubernetes contorl-plane components
|
||||
## @param storageClass StorageClass used to store user data
|
||||
##
|
||||
host: ""
|
||||
controlPlane:
|
||||
replicas: 2
|
||||
storageClass: replicated
|
||||
|
||||
## @param nodeGroups [object] nodeGroups configuration
|
||||
@@ -22,14 +24,6 @@ nodeGroups:
|
||||
cpu: ""
|
||||
memory: ""
|
||||
|
||||
## List of GPUs to attach (WARN: NVIDIA driver requires at least 4 GiB of RAM)
|
||||
## e.g:
|
||||
## instanceType: "u1.xlarge"
|
||||
## gpus:
|
||||
## - name: nvidia.com/AD102GL_L40S
|
||||
gpus: []
|
||||
|
||||
|
||||
## @section Cluster Addons
|
||||
##
|
||||
addons:
|
||||
@@ -42,18 +36,6 @@ addons:
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
|
||||
## Cilium CNI plugin
|
||||
##
|
||||
cilium:
|
||||
## @param addons.cilium.valuesOverride Custom values to override
|
||||
valuesOverride: {}
|
||||
|
||||
## Gateway API
|
||||
##
|
||||
gatewayAPI:
|
||||
## @param addons.gatewayAPI.enabled Enables the Gateway API
|
||||
enabled: false
|
||||
|
||||
## Ingress-NGINX Controller
|
||||
##
|
||||
ingressNginx:
|
||||
@@ -70,14 +52,6 @@ addons:
|
||||
hosts: []
|
||||
valuesOverride: {}
|
||||
|
||||
## GPU-operator: NVIDIA GPU Operator
|
||||
##
|
||||
gpuOperator:
|
||||
## @param addons.gpuOperator.enabled Enables the gpu-operator
|
||||
## @param addons.gpuOperator.valuesOverride Custom values to override
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
|
||||
## Flux CD
|
||||
##
|
||||
fluxcd:
|
||||
@@ -103,42 +77,62 @@ addons:
|
||||
##
|
||||
valuesOverride: {}
|
||||
|
||||
## @section Kubernetes control plane configuration
|
||||
## @section Kamaji control plane
|
||||
##
|
||||
|
||||
controlPlane:
|
||||
replicas: 2
|
||||
|
||||
kamajiControlPlane:
|
||||
apiServer:
|
||||
## @param controlPlane.apiServer.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
## @param controlPlane.apiServer.resources Resources
|
||||
## e.g:
|
||||
## resources:
|
||||
## limits:
|
||||
## cpu: 4000m
|
||||
## memory: 4Gi
|
||||
## requests:
|
||||
## cpu: 100m
|
||||
## memory: 512Mi
|
||||
##
|
||||
resourcesPreset: "small"
|
||||
## @param kamajiControlPlane.apiServer.resources Resources
|
||||
resources: {}
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 512Mi
|
||||
|
||||
## @param kamajiControlPlane.apiServer.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
resourcesPreset: "small"
|
||||
|
||||
controllerManager:
|
||||
## @param controlPlane.controllerManager.resources Resources
|
||||
## @param controlPlane.controllerManager.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
resourcesPreset: "micro"
|
||||
## @param kamajiControlPlane.controllerManager.resources Resources
|
||||
resources: {}
|
||||
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 512Mi
|
||||
|
||||
## @param kamajiControlPlane.controllerManager.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
resourcesPreset: "micro"
|
||||
scheduler:
|
||||
## @param controlPlane.scheduler.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
## @param controlPlane.scheduler.resources Resources
|
||||
resourcesPreset: "micro"
|
||||
## @param kamajiControlPlane.scheduler.resources Resources
|
||||
resources: {}
|
||||
|
||||
konnectivity:
|
||||
server:
|
||||
## @param controlPlane.konnectivity.server.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
## @param controlPlane.konnectivity.server.resources Resources
|
||||
resourcesPreset: "micro"
|
||||
resources: {}
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 512Mi
|
||||
|
||||
## @param kamajiControlPlane.scheduler.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
resourcesPreset: "micro"
|
||||
addons:
|
||||
konnectivity:
|
||||
server:
|
||||
## @param kamajiControlPlane.addons.konnectivity.server.resources Resources
|
||||
resources: {}
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 512Mi
|
||||
|
||||
## @param kamajiControlPlane.addons.konnectivity.server.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
resourcesPreset: "micro"
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.7.0
|
||||
version: 0.6.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -7,10 +7,8 @@ generate:
|
||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||
|
||||
image:
|
||||
docker buildx build images/mariadb-backup \
|
||||
docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/mariadb-backup \
|
||||
--provenance false \
|
||||
--builder=$(BUILDER) \
|
||||
--platform=$(PLATFORM) \
|
||||
--tag $(REGISTRY)/mariadb-backup:$(call settag,$(MARIADB_BACKUP_TAG)) \
|
||||
--cache-from type=registry,ref=$(REGISTRY)/mariadb-backup:latest \
|
||||
--cache-to type=inline \
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/mariadb-backup:0.7.0@sha256:cfd1c37d8ad24e10681d82d6e6ce8a641b4602c1b0ffa8516ae15b4958bb12d4
|
||||
ghcr.io/cozystack/cozystack/mariadb-backup:0.6.0@sha256:cfd1c37d8ad24e10681d82d6e6ce8a641b4602c1b0ffa8516ae15b4958bb12d4
|
||||
|
||||
@@ -11,34 +11,35 @@ These presets are for basic testing and not meant to be used in production
|
||||
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||
*/}}
|
||||
{{- define "resources.preset" -}}
|
||||
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
|
||||
{{- $presets := dict
|
||||
"nano" (dict
|
||||
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"micro" (dict
|
||||
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"small" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"medium" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"large" (dict
|
||||
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"xlarge" (dict
|
||||
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"2xlarge" (dict
|
||||
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
}}
|
||||
{{- if hasKey $presets .type -}}
|
||||
|
||||
@@ -11,34 +11,35 @@ These presets are for basic testing and not meant to be used in production
|
||||
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||
*/}}
|
||||
{{- define "resources.preset" -}}
|
||||
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
|
||||
{{- $presets := dict
|
||||
"nano" (dict
|
||||
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"micro" (dict
|
||||
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"small" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"medium" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"large" (dict
|
||||
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"xlarge" (dict
|
||||
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"2xlarge" (dict
|
||||
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
}}
|
||||
{{- if hasKey $presets .type -}}
|
||||
|
||||
@@ -33,7 +33,7 @@ spec:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '>= 0.0.0-0'
|
||||
version: '*'
|
||||
interval: 1m0s
|
||||
timeout: 5m0s
|
||||
values:
|
||||
@@ -95,3 +95,9 @@ spec:
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
{{- end }}
|
||||
valuesFrom:
|
||||
- kind: ConfigMap
|
||||
name: cozy-tenant-configuration-hash
|
||||
optional: true
|
||||
targetPath: cozyTenantConfigurationHash
|
||||
valuesKey: cozyTenantConfigurationHash
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.11.0
|
||||
version: 0.10.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -7,10 +7,8 @@ generate:
|
||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||
|
||||
image:
|
||||
docker buildx build images/postgres-backup \
|
||||
docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/postgres-backup \
|
||||
--provenance false \
|
||||
--builder=$(BUILDER) \
|
||||
--platform=$(PLATFORM) \
|
||||
--tag $(REGISTRY)/postgres-backup:$(call settag,$(POSTGRES_BACKUP_TAG)) \
|
||||
--cache-from type=registry,ref=$(REGISTRY)/postgres-backup:latest \
|
||||
--cache-to type=inline \
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/postgres-backup:0.11.0@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f
|
||||
ghcr.io/cozystack/cozystack/postgres-backup:0.10.0@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f
|
||||
|
||||
@@ -11,34 +11,35 @@ These presets are for basic testing and not meant to be used in production
|
||||
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||
*/}}
|
||||
{{- define "resources.preset" -}}
|
||||
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
|
||||
{{- $presets := dict
|
||||
"nano" (dict
|
||||
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"micro" (dict
|
||||
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"small" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"medium" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"large" (dict
|
||||
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"xlarge" (dict
|
||||
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"2xlarge" (dict
|
||||
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
}}
|
||||
{{- if hasKey $presets .type -}}
|
||||
|
||||
@@ -13,6 +13,9 @@ spec:
|
||||
jobTemplate:
|
||||
spec:
|
||||
backoffLimit: 2
|
||||
template:
|
||||
spec:
|
||||
restartPolicy: OnFailure
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
@@ -21,7 +24,7 @@ spec:
|
||||
spec:
|
||||
imagePullSecrets:
|
||||
- name: {{ .Release.Name }}-regsecret
|
||||
restartPolicy: OnFailure
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: pgdump
|
||||
image: "{{ $.Files.Get "images/postgres-backup.tag" | trim }}"
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.6.0
|
||||
version: 0.5.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -11,34 +11,35 @@ These presets are for basic testing and not meant to be used in production
|
||||
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||
*/}}
|
||||
{{- define "resources.preset" -}}
|
||||
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
|
||||
{{- $presets := dict
|
||||
"nano" (dict
|
||||
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"micro" (dict
|
||||
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"small" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"medium" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"large" (dict
|
||||
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"xlarge" (dict
|
||||
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"2xlarge" (dict
|
||||
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
}}
|
||||
{{- if hasKey $presets .type -}}
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.7.0
|
||||
version: 0.6.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -11,34 +11,35 @@ These presets are for basic testing and not meant to be used in production
|
||||
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||
*/}}
|
||||
{{- define "resources.preset" -}}
|
||||
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
|
||||
{{- $presets := dict
|
||||
"nano" (dict
|
||||
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"micro" (dict
|
||||
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"small" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"medium" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"large" (dict
|
||||
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"xlarge" (dict
|
||||
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"2xlarge" (dict
|
||||
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
}}
|
||||
{{- if hasKey $presets .type -}}
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.4.0
|
||||
version: 0.3.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -11,34 +11,35 @@ These presets are for basic testing and not meant to be used in production
|
||||
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||
*/}}
|
||||
{{- define "resources.preset" -}}
|
||||
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
|
||||
{{- $presets := dict
|
||||
"nano" (dict
|
||||
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"micro" (dict
|
||||
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"small" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi")
|
||||
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"medium" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"large" (dict
|
||||
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"xlarge" (dict
|
||||
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"2xlarge" (dict
|
||||
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi")
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
}}
|
||||
{{- if hasKey $presets .type -}}
|
||||
|
||||
@@ -4,4 +4,4 @@ description: Separated tenant namespace
|
||||
icon: /logos/tenant.svg
|
||||
|
||||
type: application
|
||||
version: 1.9.2
|
||||
version: 1.10.0
|
||||
|
||||
@@ -4,55 +4,36 @@ A tenant is the main unit of security on the platform. The closest analogy would
|
||||
|
||||
Tenants can be created recursively and are subject to the following rules:
|
||||
|
||||
### Tenant naming
|
||||
### Higher-level tenants can access lower-level ones.
|
||||
|
||||
Tenant names must be alphanumeric.
|
||||
Using dashes (`-`) in tenant names is not allowed, unlike with other services.
|
||||
This limitation exists to keep consistent naming in tenants, nested tenants, and services deployed in them.
|
||||
Higher-level tenants can view and manage the applications of all their children.
|
||||
|
||||
For example:
|
||||
### Each tenant has its own domain
|
||||
|
||||
- The root tenant is named `root`, but internally it's referenced as `tenant-root`.
|
||||
- A nested tenant could be named `foo`, which would result in `tenant-foo` in service names and URLs.
|
||||
- However, a tenant can not be named `foo-bar`, because parsing names such as `tenant-foo-bar` would be ambiguous.
|
||||
|
||||
### Unique domains
|
||||
|
||||
Each tenant has its own domain.
|
||||
By default, (unless otherwise specified), it inherits the domain of its parent with a prefix of its name.
|
||||
For example, if the parent had the domain `example.org`, then `tenant-foo` would get the domain `foo.example.org` by default.
|
||||
By default (unless otherwise specified), it inherits the domain of its parent with a prefix of its name, for example, if the parent had the domain `example.org`, then `tenant-foo` would get the domain `foo.example.org` by default.
|
||||
|
||||
Kubernetes clusters created in this tenant namespace would get domains like: `kubernetes-cluster.foo.example.org`
|
||||
|
||||
Example:
|
||||
```text
|
||||
```
|
||||
tenant-root (example.org)
|
||||
└── tenant-foo (foo.example.org)
|
||||
└── kubernetes-cluster1 (kubernetes-cluster1.foo.example.org)
|
||||
```
|
||||
|
||||
### Nesting tenants and reusing parent services
|
||||
### Lower-level tenants can access the cluster services of their parent (provided they do not run their own)
|
||||
|
||||
Tenants can be nested.
|
||||
A tenant administrator can create nested tenants using the "Tenant" application from the catalogue.
|
||||
|
||||
Higher-level tenants can view and manage the applications of all their children tenants.
|
||||
If a tenant does not run their own cluster services, it can access ones of its parent.
|
||||
|
||||
For example, you create:
|
||||
- Tenant `tenant-u1` with a set of services like `etcd`, `ingress`, `monitoring`.
|
||||
- Tenant `tenant-u2` nested in `tenant-u1`.
|
||||
Thus, you can create `tenant-u1` with a set of services like `etcd`, `ingress`, `monitoring`. And create another tenant namespace `tenant-u2` inside of `tenant-u1`.
|
||||
|
||||
Let's see what will happen when you run Kubernetes and Postgres under `tenant-u2` namespace.
|
||||
|
||||
Since `tenant-u2` does not have its own cluster services like `etcd`, `ingress`, and `monitoring`,
|
||||
the applications running in `tenant-u2` will use the cluster services of the parent tenant.
|
||||
|
||||
Since `tenant-u2` does not have its own cluster services like `etcd`, `ingress`, and `monitoring`, the applications will use the cluster services of the parent tenant.
|
||||
This in turn means:
|
||||
|
||||
- The Kubernetes cluster data will be stored in `etcd` for `tenant-u1`.
|
||||
- Access to the cluster will be through the common `ingress` of `tenant-u1`.
|
||||
- Essentially, all metrics will be collected in the `monitoring` from `tenant-u1`, and only that tenant will have access to them.
|
||||
- The Kubernetes cluster data will be stored in etcd for `tenant-u1`.
|
||||
- Access to the cluster will be through the common ingress of `tenant-u1`.
|
||||
- Essentially, all metrics will be collected in the monitoring from `tenant-u1`, and only it will have access to them.
|
||||
|
||||
|
||||
Example:
|
||||
```
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user