mirror of
https://github.com/outbackdingo/cozystack.git
synced 2026-02-05 00:15:51 +00:00
Compare commits
58 Commits
v0.30.5
...
(platform)
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2a77813de8 | ||
|
|
d3a8a4a7de | ||
|
|
fc2c5a0f6b | ||
|
|
0f8b8e1744 | ||
|
|
197434ff94 | ||
|
|
703073a164 | ||
|
|
6a0fc64475 | ||
|
|
f1624353ef | ||
|
|
277b438f68 | ||
|
|
405863cb11 | ||
|
|
0ddaff9380 | ||
|
|
a6b02bf381 | ||
|
|
39ede77fec | ||
|
|
e505857832 | ||
|
|
d8f3547db7 | ||
|
|
6d8a99269b | ||
|
|
b9112a398e | ||
|
|
719fdd29cc | ||
|
|
9e1376f709 | ||
|
|
7a9a1fcba4 | ||
|
|
2def9f4e83 | ||
|
|
c1046aae6a | ||
|
|
53cf1c537c | ||
|
|
ccedcb7419 | ||
|
|
f94a01febd | ||
|
|
495e584313 | ||
|
|
172e660cd1 | ||
|
|
14262cdd2a | ||
|
|
80576cb757 | ||
|
|
fde6e9cc73 | ||
|
|
57ca60c5a5 | ||
|
|
1d0ee15948 | ||
|
|
eeaa1b4517 | ||
|
|
a14bcf98dd | ||
|
|
be84fc6e4e | ||
|
|
73a3f481bc | ||
|
|
5903bbc64a | ||
|
|
f204809e43 | ||
|
|
fe4806ce49 | ||
|
|
8f535acc3f | ||
|
|
53cbb4ae12 | ||
|
|
4e9446d934 | ||
|
|
acbfb6ad64 | ||
|
|
8570449080 | ||
|
|
ffe6109dfb | ||
|
|
7dbb8a1d75 | ||
|
|
86210c1fc1 | ||
|
|
e96f15773d | ||
|
|
5d71c90f0a | ||
|
|
05d6ab9516 | ||
|
|
ccb001ee97 | ||
|
|
5a5cf91742 | ||
|
|
6a0d4913f2 | ||
|
|
685e50bf6c | ||
|
|
f90fc6f681 | ||
|
|
d8f3f2dee1 | ||
|
|
da8100965f | ||
|
|
6d2ea1295e |
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@@ -1 +1 @@
|
||||
* @kvaps @lllamnyp
|
||||
* @kvaps @lllamnyp @klinch0
|
||||
|
||||
49
.github/workflows/backport.yaml
vendored
49
.github/workflows/backport.yaml
vendored
@@ -1,49 +0,0 @@
|
||||
name: Automatic Backport
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [closed] # fires when PR is closed (merged)
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
backport:
|
||||
if: |
|
||||
github.event.pull_request.merged == true &&
|
||||
contains(github.event.pull_request.labels.*.name, 'backport')
|
||||
runs-on: [self-hosted]
|
||||
|
||||
steps:
|
||||
# 1. Decide which maintenance branch should receive the back‑port
|
||||
- name: Determine target maintenance branch
|
||||
id: target
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
let rel;
|
||||
try {
|
||||
rel = await github.rest.repos.getLatestRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
} catch (e) {
|
||||
core.setFailed('No existing releases found; cannot determine backport target.');
|
||||
return;
|
||||
}
|
||||
const [maj, min] = rel.data.tag_name.replace(/^v/, '').split('.');
|
||||
const branch = `release-${maj}.${min}`;
|
||||
core.setOutput('branch', branch);
|
||||
console.log(`Latest release ${rel.data.tag_name}; backporting to ${branch}`);
|
||||
# 2. Checkout (required by backport‑action)
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# 3. Create the back‑port pull request
|
||||
- name: Create back‑port PR
|
||||
uses: korthout/backport-action@v3
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
label_pattern: '' # don't read labels for targets
|
||||
target_branches: ${{ steps.target.outputs.branch }}
|
||||
100
.github/workflows/pull-requests-release.yaml
vendored
100
.github/workflows/pull-requests-release.yaml
vendored
@@ -39,82 +39,38 @@ jobs:
|
||||
runs-on: [self-hosted]
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
|
||||
if: |
|
||||
github.event.pull_request.merged == true &&
|
||||
contains(github.event.pull_request.labels.*.name, 'release')
|
||||
|
||||
|
||||
steps:
|
||||
# Extract tag from branch name (branch = release-X.Y.Z*)
|
||||
- name: Extract tag from branch name
|
||||
id: get_tag
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const branch = context.payload.pull_request.head.ref;
|
||||
const m = branch.match(/^release-(\d+\.\d+\.\d+(?:[-\w\.]+)?)$/);
|
||||
if (!m) {
|
||||
core.setFailed(`Branch '${branch}' does not match 'release-X.Y.Z[-suffix]'`);
|
||||
return;
|
||||
}
|
||||
const tag = `v${m[1]}`;
|
||||
core.setOutput('tag', tag);
|
||||
console.log(`✅ Tag to publish: ${tag}`);
|
||||
const match = branch.match(/^release-(\d+\.\d+\.\d+(?:[-\w\.]+)?)$/);
|
||||
|
||||
# Checkout repo & create / push annotated tag
|
||||
if (!match) {
|
||||
core.setFailed(`Branch '${branch}' does not match expected format 'release-X.Y.Z[-suffix]'`);
|
||||
} else {
|
||||
const tag = `v${match[1]}`;
|
||||
core.setOutput('tag', tag);
|
||||
console.log(`✅ Extracted tag: ${tag}`);
|
||||
}
|
||||
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Create tag on merge commit
|
||||
|
||||
- name: Create tag on merged commit
|
||||
run: |
|
||||
git tag -f ${{ steps.get_tag.outputs.tag }} ${{ github.sha }}
|
||||
git push -f origin ${{ steps.get_tag.outputs.tag }}
|
||||
|
||||
# Get the latest published release
|
||||
- name: Get the latest published release
|
||||
id: latest_release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
try {
|
||||
const rel = await github.rest.repos.getLatestRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
core.setOutput('tag', rel.data.tag_name);
|
||||
} catch (_) {
|
||||
core.setOutput('tag', '');
|
||||
}
|
||||
|
||||
# Compare current tag vs latest using semver-utils
|
||||
- name: Semver compare
|
||||
id: semver
|
||||
uses: madhead/semver-utils@v4.3.0
|
||||
with:
|
||||
version: ${{ steps.get_tag.outputs.tag }}
|
||||
compare-to: ${{ steps.latest_release.outputs.tag }}
|
||||
|
||||
# Derive flags: prerelease? make_latest?
|
||||
- name: Calculate publish flags
|
||||
id: flags
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const tag = '${{ steps.get_tag.outputs.tag }}'; // v0.31.5-rc1
|
||||
const m = tag.match(/^v(\d+\.\d+\.\d+)(-rc\d+)?$/);
|
||||
if (!m) {
|
||||
core.setFailed(`❌ tag '${tag}' must match 'vX.Y.Z' or 'vX.Y.Z-rcN'`);
|
||||
return;
|
||||
}
|
||||
const version = m[1] + (m[2] ?? ''); // 0.31.5‑rc1
|
||||
const isRc = Boolean(m[2]);
|
||||
core.setOutput('is_rc', isRc);
|
||||
const outdated = '${{ steps.semver.outputs.comparison-result }}' === '<';
|
||||
core.setOutput('make_latest', isRc || outdated ? 'false' : 'legacy');
|
||||
|
||||
# Publish draft release with correct flags
|
||||
git tag ${{ steps.get_tag.outputs.tag }} ${{ github.sha }} --force
|
||||
git push origin ${{ steps.get_tag.outputs.tag }} --force
|
||||
|
||||
- name: Publish draft release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
@@ -122,17 +78,19 @@ jobs:
|
||||
const tag = '${{ steps.get_tag.outputs.tag }}';
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
repo: context.repo.repo
|
||||
});
|
||||
const draft = releases.data.find(r => r.tag_name === tag && r.draft);
|
||||
if (!draft) throw new Error(`Draft release for ${tag} not found`);
|
||||
|
||||
const release = releases.data.find(r => r.tag_name === tag && r.draft);
|
||||
if (!release) {
|
||||
throw new Error(`Draft release with tag ${tag} not found`);
|
||||
}
|
||||
|
||||
await github.rest.repos.updateRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
release_id: draft.id,
|
||||
draft: false,
|
||||
prerelease: ${{ steps.flags.outputs.is_rc }},
|
||||
make_latest: '${{ steps.flags.outputs.make_latest }}'
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
release_id: release.id,
|
||||
draft: false
|
||||
});
|
||||
|
||||
console.log(`🚀 Published release for ${tag}`);
|
||||
|
||||
console.log(`✅ Published release for ${tag}`);
|
||||
|
||||
25
.github/workflows/pull-requests.yaml
vendored
25
.github/workflows/pull-requests.yaml
vendored
@@ -12,20 +12,9 @@ jobs:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
# ─────────────────────────────────────────────────────────────
|
||||
# Run automatically for internal PRs (same repo).
|
||||
# For external PRs (forks) require the "ok‑to‑test" label.
|
||||
# Never run when the PR carries the "release" label.
|
||||
# ─────────────────────────────────────────────────────────────
|
||||
if: |
|
||||
!contains(github.event.pull_request.labels.*.name, 'release') &&
|
||||
(
|
||||
github.event.pull_request.head.repo.full_name == github.repository ||
|
||||
(
|
||||
github.event.pull_request.head.repo.full_name != github.repository &&
|
||||
contains(github.event.pull_request.labels.*.name, 'ok-to-test')
|
||||
)
|
||||
)
|
||||
contains(github.event.pull_request.labels.*.name, 'ok-to-test') &&
|
||||
!contains(github.event.pull_request.labels.*.name, 'release')
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
@@ -41,8 +30,10 @@ jobs:
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
|
||||
- name: Build
|
||||
run: make build
|
||||
- name: make build
|
||||
run: |
|
||||
make build
|
||||
|
||||
- name: Test
|
||||
run: make test
|
||||
- name: make test
|
||||
run: |
|
||||
make test
|
||||
|
||||
252
.github/workflows/tags.yaml
vendored
252
.github/workflows/tags.yaml
vendored
@@ -1,9 +1,10 @@
|
||||
name: Versioned Tag
|
||||
|
||||
on:
|
||||
# Trigger on push if it includes a tag like vX.Y.Z
|
||||
push:
|
||||
tags:
|
||||
- 'v*.*.*' # vX.Y.Z or vX.Y.Z-rcN
|
||||
- 'v*.*.*'
|
||||
|
||||
jobs:
|
||||
prepare-release:
|
||||
@@ -15,7 +16,7 @@ jobs:
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
# Check if a non-draft release with this tag already exists
|
||||
# 1) Check if a non-draft release with this tag already exists
|
||||
- name: Check if release already exists
|
||||
id: check_release
|
||||
uses: actions/github-script@v7
|
||||
@@ -24,67 +25,57 @@ jobs:
|
||||
const tag = context.ref.replace('refs/tags/', '');
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
repo: context.repo.repo
|
||||
});
|
||||
const exists = releases.data.some(r => r.tag_name === tag && !r.draft);
|
||||
core.setOutput('skip', exists);
|
||||
console.log(exists ? `Release ${tag} already published` : `No published release ${tag}`);
|
||||
const existing = releases.data.find(r => r.tag_name === tag && !r.draft);
|
||||
if (existing) {
|
||||
core.setOutput('skip', 'true');
|
||||
} else {
|
||||
core.setOutput('skip', 'false');
|
||||
}
|
||||
|
||||
# If a published release already exists, skip the rest of the workflow
|
||||
- name: Skip if release already exists
|
||||
if: steps.check_release.outputs.skip == 'true'
|
||||
run: echo "Release already exists, skipping workflow."
|
||||
|
||||
# Parse tag meta‑data (rc?, maintenance line, etc.)
|
||||
- name: Parse tag
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: tag
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const ref = context.ref.replace('refs/tags/', ''); // e.g. v0.31.5-rc1
|
||||
const m = ref.match(/^v(\d+\.\d+\.\d+)(-rc\d+)?$/);
|
||||
if (!m) {
|
||||
core.setFailed(`❌ tag '${ref}' must match 'vX.Y.Z' or 'vX.Y.Z-rcN'`);
|
||||
return;
|
||||
}
|
||||
const version = m[1] + (m[2] ?? ''); // 0.31.5‑rc1
|
||||
const isRc = Boolean(m[2]);
|
||||
const [maj, min] = m[1].split('.');
|
||||
core.setOutput('tag', ref);
|
||||
core.setOutput('version', version);
|
||||
core.setOutput('is_rc', isRc);
|
||||
core.setOutput('line', `${maj}.${min}`); // 0.31
|
||||
|
||||
# Detect base branch (main or release‑X.Y) the tag was pushed from
|
||||
# 2) Determine the base branch from which the tag was pushed
|
||||
- name: Get base branch
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: get_base
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
/*
|
||||
For a push event with a tag, GitHub sets context.payload.base_ref
|
||||
if the tag was pushed from a branch.
|
||||
If it's empty, we can't determine the correct base branch and must fail.
|
||||
*/
|
||||
const baseRef = context.payload.base_ref;
|
||||
if (!baseRef) {
|
||||
core.setFailed(`❌ base_ref is empty. Push the tag via 'git push origin HEAD:refs/tags/<tag>'.`);
|
||||
core.setFailed(`❌ base_ref is empty. Make sure you push the tag from a branch (e.g. 'git push origin HEAD:refs/tags/vX.Y.Z').`);
|
||||
return;
|
||||
}
|
||||
const branch = baseRef.replace('refs/heads/', '');
|
||||
const ok = branch === 'main' || /^release-\d+\.\d+$/.test(branch);
|
||||
if (!ok) {
|
||||
core.setFailed(`❌ Tagged commit must belong to 'main' or 'release-X.Y'. Got '${branch}'`);
|
||||
return;
|
||||
}
|
||||
core.setOutput('branch', branch);
|
||||
|
||||
# Checkout & login once
|
||||
const shortBranch = baseRef.replace("refs/heads/", "");
|
||||
const releasePattern = /^release-\d+\.\d+$/;
|
||||
if (shortBranch !== "main" && !releasePattern.test(shortBranch)) {
|
||||
core.setFailed(`❌ Tagged commit must belong to branch 'main' or 'release-X.Y'. Got '${shortBranch}'`);
|
||||
return;
|
||||
}
|
||||
|
||||
core.setOutput('branch', shortBranch);
|
||||
|
||||
# 3) Checkout full git history and tags
|
||||
- name: Checkout code
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
fetch-tags: true
|
||||
|
||||
- name: Login to GHCR
|
||||
# 4) Login to GitHub Container Registry
|
||||
- name: Login to GitHub Container Registry
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
@@ -92,160 +83,113 @@ jobs:
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
|
||||
# Build project artifacts
|
||||
# 5) Build project artifacts
|
||||
- name: Build
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: make build
|
||||
|
||||
# Commit built artifacts
|
||||
# 6) Optionally commit built artifacts to the repository
|
||||
- name: Commit release artifacts
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
env:
|
||||
GIT_AUTHOR_NAME: ${{ github.actor }}
|
||||
GIT_AUTHOR_EMAIL: ${{ github.actor }}@users.noreply.github.com
|
||||
run: |
|
||||
git config user.name "github-actions"
|
||||
git config user.name "github-actions"
|
||||
git config user.email "github-actions@github.com"
|
||||
git add .
|
||||
git commit -m "Prepare release ${GITHUB_REF#refs/tags/}" -s || echo "No changes to commit"
|
||||
git push origin HEAD || true
|
||||
|
||||
# Get `latest_version` from latest published release
|
||||
- name: Get latest published release
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: latest_release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
try {
|
||||
const rel = await github.rest.repos.getLatestRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
core.setOutput('tag', rel.data.tag_name);
|
||||
} catch (_) {
|
||||
core.setOutput('tag', '');
|
||||
}
|
||||
|
||||
# Compare tag (A) with latest (B)
|
||||
- name: Semver compare
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: semver
|
||||
uses: madhead/semver-utils@v4.3.0
|
||||
with:
|
||||
version: ${{ steps.tag.outputs.tag }} # A
|
||||
compare-to: ${{ steps.latest_release.outputs.tag }} # B
|
||||
|
||||
# Create or reuse DRAFT GitHub Release
|
||||
- name: Create / reuse draft release
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const tag = '${{ steps.tag.outputs.tag }}';
|
||||
const isRc = ${{ steps.tag.outputs.is_rc }};
|
||||
const outdated = '${{ steps.semver.outputs.comparison-result }}' === '<';
|
||||
const makeLatest = outdated ? false : 'legacy';
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
let rel = releases.data.find(r => r.tag_name === tag);
|
||||
if (!rel) {
|
||||
rel = await github.rest.repos.createRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
tag_name: tag,
|
||||
name: tag,
|
||||
draft: true,
|
||||
prerelease: isRc,
|
||||
make_latest: makeLatest
|
||||
});
|
||||
console.log(`Draft release created for ${tag}`);
|
||||
} else {
|
||||
console.log(`Re‑using existing release ${tag}`);
|
||||
}
|
||||
core.setOutput('upload_url', rel.upload_url);
|
||||
|
||||
# Build + upload assets (optional)
|
||||
- name: Build & upload assets
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: |
|
||||
make assets
|
||||
make upload_assets VERSION=${{ steps.tag.outputs.tag }}
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# Ensure long‑lived maintenance branch release‑X.Y
|
||||
- name: Ensure maintenance branch release‑${{ steps.tag.outputs.line }}
|
||||
if: |
|
||||
steps.check_release.outputs.skip == 'false' &&
|
||||
steps.get_base.outputs.branch == 'main'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const branch = `release-${'${{ steps.tag.outputs.line }}'}`;
|
||||
try {
|
||||
await github.rest.repos.getBranch({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
branch
|
||||
});
|
||||
console.log(`Branch '${branch}' already exists`);
|
||||
} catch (_) {
|
||||
await github.git.createRef({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
ref: `refs/heads/${branch}`,
|
||||
sha: context.sha
|
||||
});
|
||||
console.log(`Branch '${branch}' created at ${context.sha}`);
|
||||
}
|
||||
|
||||
# Create release‑X.Y.Z branch and push (force‑update)
|
||||
# 7) Create a release branch like release-X.Y.Z
|
||||
- name: Create release branch
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: |
|
||||
BRANCH="release-${GITHUB_REF#refs/tags/v}"
|
||||
git branch -f "$BRANCH"
|
||||
git push -f origin "$BRANCH"
|
||||
BRANCH_NAME="release-${GITHUB_REF#refs/tags/v}"
|
||||
git branch -f "$BRANCH_NAME"
|
||||
git push origin "$BRANCH_NAME" --force
|
||||
|
||||
# Create pull request into original base branch (if absent)
|
||||
# 8) Create a pull request from release-X.Y.Z to the original base branch
|
||||
- name: Create pull request if not exists
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const version = context.ref.replace('refs/tags/v', '');
|
||||
const base = '${{ steps.get_base.outputs.branch }}';
|
||||
const head = `release-${version}`;
|
||||
const base = '${{ steps.get_base.outputs.branch }}';
|
||||
const head = `release-${version}`;
|
||||
|
||||
const prs = await github.rest.pulls.list({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
head: `${context.repo.owner}:${head}`,
|
||||
repo: context.repo.repo,
|
||||
head: `${context.repo.owner}:${head}`,
|
||||
base
|
||||
});
|
||||
|
||||
if (prs.data.length === 0) {
|
||||
const pr = await github.rest.pulls.create({
|
||||
const newPr = await github.rest.pulls.create({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
repo: context.repo.repo,
|
||||
head,
|
||||
base,
|
||||
title: `Release v${version}`,
|
||||
body: `This PR prepares the release \`v${version}\`.`,
|
||||
body:
|
||||
`This PR prepares the release \`v${version}\`.\n` +
|
||||
`(Please merge it before releasing draft)`,
|
||||
draft: false
|
||||
});
|
||||
|
||||
console.log(`Created pull request #${newPr.data.number} from ${head} to ${base}`);
|
||||
await github.rest.issues.addLabels({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: pr.data.number,
|
||||
repo: context.repo.repo,
|
||||
issue_number: newPr.data.number,
|
||||
labels: ['release']
|
||||
});
|
||||
console.log(`Created PR #${pr.data.number}`);
|
||||
} else {
|
||||
console.log(`PR already exists from ${head} to ${base}`);
|
||||
console.log(`Pull request already exists from ${head} to ${base}`);
|
||||
}
|
||||
|
||||
# Run tests
|
||||
- name: Test
|
||||
# 9) Create or reuse an existing draft GitHub release for this tag
|
||||
- name: Create or reuse draft release
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: create_release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const tag = context.ref.replace('refs/tags/', '');
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
|
||||
let release = releases.data.find(r => r.tag_name === tag);
|
||||
if (!release) {
|
||||
release = await github.rest.repos.createRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
tag_name: tag,
|
||||
name: `${tag}`,
|
||||
draft: true,
|
||||
prerelease: false
|
||||
});
|
||||
}
|
||||
core.setOutput('upload_url', release.upload_url);
|
||||
|
||||
# 10) Build additional assets for the release (if needed)
|
||||
- name: Build assets
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: make assets
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# 11) Upload assets to the draft release
|
||||
- name: Upload assets
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: make upload_assets VERSION=${GITHUB_REF#refs/tags/}
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# 12) Run tests
|
||||
- name: Run tests
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: make test
|
||||
|
||||
139
docs/release.md
Normal file
139
docs/release.md
Normal file
@@ -0,0 +1,139 @@
|
||||
# Release Workflow
|
||||
|
||||
This section explains how Cozystack builds and releases are made.
|
||||
|
||||
## Regular Releases
|
||||
|
||||
When making regular releases, we take a commit in `main` and decide to make it a release `x.y.0`.
|
||||
In this explanation, we'll use version `v0.42.0` as an example:
|
||||
|
||||
```mermaid
|
||||
gitGraph
|
||||
commit id: "feature"
|
||||
commit id: "feature 2"
|
||||
commit id: "feature 3" tag: "v0.42.0"
|
||||
```
|
||||
|
||||
A regular release sequence starts in the following way:
|
||||
|
||||
1. Maintainer tags a commit in `main` with `v0.42.0` and pushes it to GitHub.
|
||||
2. CI workflow triggers on tag push:
|
||||
1. Creates a draft page for release `v0.42.0`, if it wasn't created before.
|
||||
2. Takes code from tag `v0.42.0`, builds images, and pushes them to ghcr.io.
|
||||
3. Makes a new commit `Prepare release v0.42.0` with updated digests, pushes it to the new branch `release-0.42.0`, and opens a PR to `main`.
|
||||
4. Builds Cozystack release assets from the new commit `Prepare release v0.42.0` and uploads them to the release draft page.
|
||||
3. Maintainer reviews PR, tests build artifacts, and edits changelogs on the release draft page.
|
||||
|
||||
```mermaid
|
||||
gitGraph
|
||||
commit id: "feature"
|
||||
commit id: "feature 2"
|
||||
commit id: "feature 3" tag: "v0.42.0"
|
||||
branch release-0.42.0
|
||||
checkout release-0.42.0
|
||||
commit id: "Prepare release v0.42.0"
|
||||
checkout main
|
||||
merge release-0.42.0 id: "Pull Request"
|
||||
```
|
||||
|
||||
When testing and editing are completed, the sequence goes on.
|
||||
|
||||
4. Maintainer merges the PR. GitHub removes the merged branch `release-0.42.0`.
|
||||
5. CI workflow triggers on merge:
|
||||
1. Moves the tag `v0.42.0` to the newly created merge commit by force-pushing a tag to GitHub.
|
||||
2. Publishes the release page (`draft` → `latest`).
|
||||
6. The maintainer can now announce the release to the community.
|
||||
|
||||
```mermaid
|
||||
gitGraph
|
||||
commit id: "feature"
|
||||
commit id: "feature 2"
|
||||
commit id: "feature 3"
|
||||
branch release-0.42.0
|
||||
checkout release-0.42.0
|
||||
commit id: "Prepare release v0.42.0"
|
||||
checkout main
|
||||
merge release-0.42.0 id: "Release v0.42.0" tag: "v0.42.0"
|
||||
```
|
||||
|
||||
## Patch Releases
|
||||
|
||||
Making a patch release has a lot in common with a regular release, with a couple of differences:
|
||||
|
||||
* A release branch is used instead of `main`
|
||||
* Patch commits are cherry-picked to the release branch.
|
||||
* A pull request is opened against the release branch.
|
||||
|
||||
|
||||
Let's assume that we've released `v0.42.0` and that development is ongoing.
|
||||
We have introduced a couple of new features and some fixes to features that we have released
|
||||
in `v0.42.0`.
|
||||
|
||||
Once problems were found and fixed, a patch release is due.
|
||||
|
||||
```mermaid
|
||||
gitGraph
|
||||
commit id: "Release v0.42.0" tag: "v0.42.0"
|
||||
checkout main
|
||||
commit id: "feature 4"
|
||||
commit id: "patch 1"
|
||||
commit id: "feature 5"
|
||||
commit id: "patch 2"
|
||||
```
|
||||
|
||||
|
||||
1. The maintainer creates a release branch, `release-0.42,` and cherry-picks patch commits from `main` to `release-0.42`.
|
||||
These must be only patches to features that were present in version `v0.42.0`.
|
||||
|
||||
Cherry-picking can be done as soon as each patch is merged into `main`,
|
||||
or directly before the release.
|
||||
|
||||
```mermaid
|
||||
gitGraph
|
||||
commit id: "Release v0.42.0" tag: "v0.42.0"
|
||||
branch release-0.42
|
||||
checkout main
|
||||
commit id: "feature 4"
|
||||
commit id: "patch 1"
|
||||
commit id: "feature 5"
|
||||
commit id: "patch 2"
|
||||
checkout release-0.42
|
||||
cherry-pick id: "patch 1"
|
||||
cherry-pick id: "patch 2"
|
||||
```
|
||||
|
||||
When all relevant patch commits are cherry-picked, the branch is ready for release.
|
||||
|
||||
2. The maintainer tags the `HEAD` commit of branch `release-0.42` as `v0.42.1` and then pushes it to GitHub.
|
||||
3. CI workflow triggers on tag push:
|
||||
1. Creates a draft page for release `v0.42.1`, if it wasn't created before.
|
||||
2. Takes code from tag `v0.42.1`, builds images, and pushes them to ghcr.io.
|
||||
3. Makes a new commit `Prepare release v0.42.1` with updated digests, pushes it to the new branch `release-0.42.1`, and opens a PR to `release-0.42`.
|
||||
4. Builds Cozystack release assets from the new commit `Prepare release v0.42.1` and uploads them to the release draft page.
|
||||
4. Maintainer reviews PR, tests build artifacts, and edits changelogs on the release draft page.
|
||||
|
||||
```mermaid
|
||||
gitGraph
|
||||
commit id: "Release v0.42.0" tag: "v0.42.0"
|
||||
branch release-0.42
|
||||
checkout main
|
||||
commit id: "feature 4"
|
||||
commit id: "patch 1"
|
||||
commit id: "feature 5"
|
||||
commit id: "patch 2"
|
||||
checkout release-0.42
|
||||
cherry-pick id: "patch 1"
|
||||
cherry-pick id: "patch 2" tag: "v0.42.1"
|
||||
branch release-0.42.1
|
||||
commit id: "Prepare release v0.42.1"
|
||||
checkout release-0.42
|
||||
merge release-0.42.1 id: "Pull request"
|
||||
```
|
||||
|
||||
Finally, when release is confirmed, the release sequence goes on.
|
||||
|
||||
5. Maintainer merges the PR. GitHub removes the merged branch `release-0.42.1`.
|
||||
6. CI workflow triggers on merge:
|
||||
1. Moves the tag `v0.42.1` to the newly created merge commit by force-pushing a tag to GitHub.
|
||||
2. Publishes the release page (`draft` → `latest`).
|
||||
7. The maintainer can now announce the release to the community.
|
||||
@@ -60,7 +60,7 @@ done
|
||||
|
||||
# Prepare system drive
|
||||
if [ ! -f nocloud-amd64.raw ]; then
|
||||
wget https://github.com/cozystack/cozystack/releases/latest/download/nocloud-amd64.raw.xz -O nocloud-amd64.raw.xz --show-progress --output-file /dev/stdout --progress=dot:giga 2>/dev/null
|
||||
wget https://github.com/cozystack/cozystack/releases/latest/download/nocloud-amd64.raw.xz -O nocloud-amd64.raw.xz
|
||||
rm -f nocloud-amd64.raw
|
||||
xz --decompress nocloud-amd64.raw.xz
|
||||
fi
|
||||
@@ -234,8 +234,8 @@ sleep 5
|
||||
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n " $1 " hr/" $2 " &"} END{print "wait"}' | sh -x
|
||||
|
||||
# Wait for Cluster-API providers
|
||||
timeout 60 sh -c 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager; do sleep 1; done'
|
||||
kubectl wait deploy --timeout=1m --for=condition=available -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager
|
||||
timeout 30 sh -c 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager; do sleep 1; done'
|
||||
kubectl wait deploy --timeout=30s --for=condition=available -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager
|
||||
|
||||
# Wait for linstor controller
|
||||
kubectl wait deploy --timeout=5m --for=condition=available -n cozy-linstor linstor-controller
|
||||
@@ -357,5 +357,5 @@ kubectl patch -n cozy-system cm/cozystack --type=merge -p '{"data":{
|
||||
"oidc-enabled": "true"
|
||||
}}'
|
||||
|
||||
timeout 120 sh -c 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator; do sleep 1; done'
|
||||
timeout 60 sh -c 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator; do sleep 1; done'
|
||||
kubectl wait --timeout=10m --for=condition=ready -n cozy-keycloak hr keycloak keycloak-configure keycloak-operator
|
||||
|
||||
@@ -39,15 +39,6 @@ func (r *WorkloadReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c
|
||||
}
|
||||
|
||||
t := getMonitoredObject(w)
|
||||
|
||||
if t == nil {
|
||||
err = r.Delete(ctx, w)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to delete workload")
|
||||
}
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
err = r.Get(ctx, types.NamespacedName{Name: t.GetName(), Namespace: t.GetNamespace()}, t)
|
||||
|
||||
// found object, nothing to do
|
||||
@@ -77,23 +68,20 @@ func (r *WorkloadReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
}
|
||||
|
||||
func getMonitoredObject(w *cozyv1alpha1.Workload) client.Object {
|
||||
switch {
|
||||
case strings.HasPrefix(w.Name, "pvc-"):
|
||||
if strings.HasPrefix(w.Name, "pvc-") {
|
||||
obj := &corev1.PersistentVolumeClaim{}
|
||||
obj.Name = strings.TrimPrefix(w.Name, "pvc-")
|
||||
obj.Namespace = w.Namespace
|
||||
return obj
|
||||
case strings.HasPrefix(w.Name, "svc-"):
|
||||
}
|
||||
if strings.HasPrefix(w.Name, "svc-") {
|
||||
obj := &corev1.Service{}
|
||||
obj.Name = strings.TrimPrefix(w.Name, "svc-")
|
||||
obj.Namespace = w.Namespace
|
||||
return obj
|
||||
case strings.HasPrefix(w.Name, "pod-"):
|
||||
obj := &corev1.Pod{}
|
||||
obj.Name = strings.TrimPrefix(w.Name, "pod-")
|
||||
obj.Namespace = w.Namespace
|
||||
return obj
|
||||
}
|
||||
var obj client.Object
|
||||
obj := &corev1.Pod{}
|
||||
obj.Name = w.Name
|
||||
obj.Namespace = w.Namespace
|
||||
return obj
|
||||
}
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestUnprefixedMonitoredObjectReturnsNil(t *testing.T) {
|
||||
w := &cozyv1alpha1.Workload{}
|
||||
w.Name = "unprefixed-name"
|
||||
obj := getMonitoredObject(w)
|
||||
if obj != nil {
|
||||
t.Errorf(`getMonitoredObject(&Workload{Name: "%s"}) == %v, want nil`, w.Name, obj)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodMonitoredObject(t *testing.T) {
|
||||
w := &cozyv1alpha1.Workload{}
|
||||
w.Name = "pod-mypod"
|
||||
obj := getMonitoredObject(w)
|
||||
if pod, ok := obj.(*corev1.Pod); !ok || pod.Name != "mypod" {
|
||||
t.Errorf(`getMonitoredObject(&Workload{Name: "%s"}) == %v, want &Pod{Name: "mypod"}`, w.Name, obj)
|
||||
}
|
||||
}
|
||||
@@ -116,15 +116,24 @@ func (r *WorkloadMonitorReconciler) reconcileServiceForMonitor(
|
||||
|
||||
resources := make(map[string]resource.Quantity)
|
||||
|
||||
q := resource.MustParse("0")
|
||||
quantity := resource.MustParse("0")
|
||||
|
||||
for _, ing := range svc.Status.LoadBalancer.Ingress {
|
||||
if ing.IP != "" {
|
||||
q.Add(resource.MustParse("1"))
|
||||
quantity.Add(resource.MustParse("1"))
|
||||
}
|
||||
}
|
||||
|
||||
resources["public-ips"] = q
|
||||
var resourceLabel string
|
||||
if svc.Annotations != nil {
|
||||
var ok bool
|
||||
resourceLabel, ok = svc.Annotations["metallb.universe.tf/ip-allocated-from-pool"]
|
||||
if !ok {
|
||||
resourceLabel = "default"
|
||||
}
|
||||
}
|
||||
resourceLabel = fmt.Sprintf("%s.ipaddresspool.metallb.io/requests.ipaddresses", resourceLabel)
|
||||
resources[resourceLabel] = quantity
|
||||
|
||||
_, err := ctrl.CreateOrUpdate(ctx, r.Client, workload, func() error {
|
||||
// Update owner references with the new monitor
|
||||
@@ -165,7 +174,12 @@ func (r *WorkloadMonitorReconciler) reconcilePVCForMonitor(
|
||||
resources := make(map[string]resource.Quantity)
|
||||
|
||||
for resourceName, resourceQuantity := range pvc.Status.Capacity {
|
||||
resources[resourceName.String()] = resourceQuantity
|
||||
storageClass := "default"
|
||||
if pvc.Spec.StorageClassName != nil || *pvc.Spec.StorageClassName == "" {
|
||||
storageClass = *pvc.Spec.StorageClassName
|
||||
}
|
||||
resourceLabel := fmt.Sprintf("%s.storageclass.storage.k8s.io/requests.%s", storageClass, resourceName.String())
|
||||
resources[resourceLabel] = resourceQuantity
|
||||
}
|
||||
|
||||
_, err := ctrl.CreateOrUpdate(ctx, r.Client, workload, func() error {
|
||||
@@ -198,12 +212,15 @@ func (r *WorkloadMonitorReconciler) reconcilePodForMonitor(
|
||||
) error {
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
// totalResources will store the sum of all container resource requests
|
||||
// Combine both init containers and normal containers to sum resources properly
|
||||
combinedContainers := append(pod.Spec.InitContainers, pod.Spec.Containers...)
|
||||
|
||||
// totalResources will store the sum of all container resource limits
|
||||
totalResources := make(map[string]resource.Quantity)
|
||||
|
||||
// Iterate over all containers to aggregate their requests
|
||||
for _, container := range pod.Spec.Containers {
|
||||
for name, qty := range container.Resources.Requests {
|
||||
// Iterate over all containers to aggregate their Limits
|
||||
for _, container := range combinedContainers {
|
||||
for name, qty := range container.Resources.Limits {
|
||||
if existing, exists := totalResources[name.String()]; exists {
|
||||
existing.Add(qty)
|
||||
totalResources[name.String()] = existing
|
||||
@@ -232,7 +249,7 @@ func (r *WorkloadMonitorReconciler) reconcilePodForMonitor(
|
||||
|
||||
workload := &cozyv1alpha1.Workload{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("pod-%s", pod.Name),
|
||||
Name: pod.Name,
|
||||
Namespace: pod.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/postgres-backup:0.10.1@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f
|
||||
ghcr.io/cozystack/cozystack/postgres-backup:0.10.0@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/nginx-cache:0.4.0@sha256:4da14241052d2c4bd29d1766c4a569446f808a19538ef7f6acc05a981913df8e
|
||||
ghcr.io/cozystack/cozystack/nginx-cache:0.4.0@sha256:bef7344da098c4dc400a9e20ffad10ac991df67d09a30026207454abbc91f28b
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.18.1@sha256:85371c6aabf5a7fea2214556deac930c600e362f92673464fe2443784e2869c3
|
||||
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.18.0@sha256:85371c6aabf5a7fea2214556deac930c600e362f92673464fe2443784e2869c3
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.18.1@sha256:795d8e1ef4b2b0df2aa1e09d96cd13476ebb545b4bf4b5779b7547a70ef64cf9
|
||||
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.18.0@sha256:795d8e1ef4b2b0df2aa1e09d96cd13476ebb545b4bf4b5779b7547a70ef64cf9
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.18.1@sha256:5717919c75e609902c6d67138311a2a8fd07be822e2173f3802b67cf5f3486e9
|
||||
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.18.0@sha256:6f9091c3e7e4951c5e43fdafd505705fcc9f1ead290ee3ae42e97e9ec2b87b20
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/ubuntu-container-disk:v1.30.1@sha256:6359b7877f04c6ac6641c0ebcc2a1d03cabfe1718464cd43f82e97724ad6aad8
|
||||
ghcr.io/cozystack/cozystack/ubuntu-container-disk:v1.30.1@sha256:07392e7a87a3d4ef1c86c1b146e6c5de5c2b524aed5a53bf48870dc8a296f99a
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.10.1
|
||||
version: 0.10.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/postgres-backup:0.10.1@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f
|
||||
ghcr.io/cozystack/cozystack/postgres-backup:0.10.0@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f
|
||||
|
||||
@@ -13,6 +13,9 @@ spec:
|
||||
jobTemplate:
|
||||
spec:
|
||||
backoffLimit: 2
|
||||
template:
|
||||
spec:
|
||||
restartPolicy: OnFailure
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
@@ -21,7 +24,7 @@ spec:
|
||||
spec:
|
||||
imagePullSecrets:
|
||||
- name: {{ .Release.Name }}-regsecret
|
||||
restartPolicy: OnFailure
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: pgdump
|
||||
image: "{{ $.Files.Get "images/postgres-backup.tag" | trim }}"
|
||||
|
||||
@@ -4,4 +4,4 @@ description: Separated tenant namespace
|
||||
icon: /logos/tenant.svg
|
||||
|
||||
type: application
|
||||
version: 1.9.2
|
||||
version: 1.10.0
|
||||
|
||||
7
packages/apps/tenant/templates/configuration-hash.yaml
Normal file
7
packages/apps/tenant/templates/configuration-hash.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cozy-tenant-configuration-hash
|
||||
namespace: {{ include "tenant.name" . }}
|
||||
data:
|
||||
cozyTenantConfigurationHash: {{ sha256sum (toJson .Values) | quote }}
|
||||
@@ -24,7 +24,6 @@ spec:
|
||||
ingress:
|
||||
- fromEntities:
|
||||
- world
|
||||
- cluster
|
||||
egress:
|
||||
- toEntities:
|
||||
- world
|
||||
|
||||
@@ -89,8 +89,7 @@ postgres 0.7.0 4b90bf5a
|
||||
postgres 0.7.1 1ec10165
|
||||
postgres 0.8.0 4e68e65c
|
||||
postgres 0.9.0 8267072d
|
||||
postgres 0.10.0 721c12a7
|
||||
postgres 0.10.1 HEAD
|
||||
postgres 0.10.0 HEAD
|
||||
rabbitmq 0.1.0 263e47be
|
||||
rabbitmq 0.2.0 53f2365e
|
||||
rabbitmq 0.3.0 6c5cf5bf
|
||||
@@ -132,7 +131,7 @@ tenant 1.7.0 24fa7222
|
||||
tenant 1.8.0 160e4e2a
|
||||
tenant 1.9.0 728743db
|
||||
tenant 1.9.1 721c12a7
|
||||
tenant 1.9.2 HEAD
|
||||
tenant 1.10.0 HEAD
|
||||
virtual-machine 0.1.4 f2015d65
|
||||
virtual-machine 0.1.5 263e47be
|
||||
virtual-machine 0.2.0 c0685f43
|
||||
@@ -145,8 +144,7 @@ virtual-machine 0.7.1 0ab39f20
|
||||
virtual-machine 0.8.0 3fa4dd3a
|
||||
virtual-machine 0.8.1 93c46161
|
||||
virtual-machine 0.8.2 de19450f
|
||||
virtual-machine 0.9.0 721c12a7
|
||||
virtual-machine 0.9.1 HEAD
|
||||
virtual-machine 0.9.0 HEAD
|
||||
vm-disk 0.1.0 d971f2ff
|
||||
vm-disk 0.1.1 HEAD
|
||||
vm-instance 0.1.0 1ec10165
|
||||
@@ -156,8 +154,7 @@ vm-instance 0.4.0 e23286a3
|
||||
vm-instance 0.4.1 0ab39f20
|
||||
vm-instance 0.5.0 3fa4dd3a
|
||||
vm-instance 0.5.1 de19450f
|
||||
vm-instance 0.6.0 721c12a7
|
||||
vm-instance 0.6.1 HEAD
|
||||
vm-instance 0.6.0 HEAD
|
||||
vpn 0.1.0 263e47be
|
||||
vpn 0.2.0 53f2365e
|
||||
vpn 0.3.0 6c5cf5bf
|
||||
|
||||
@@ -17,7 +17,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.9.1
|
||||
version: 0.9.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -74,8 +74,7 @@ spec:
|
||||
{{- if .Values.gpus }}
|
||||
gpus:
|
||||
{{- range $i, $gpu := .Values.gpus }}
|
||||
- name: gpu{{ add $i 1 }}
|
||||
deviceName: {{ $gpu.name }}
|
||||
- deviceName: {{ $gpu.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
disks:
|
||||
|
||||
@@ -17,7 +17,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.6.1
|
||||
version: 0.6.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -46,8 +46,7 @@ spec:
|
||||
{{- if .Values.gpus }}
|
||||
gpus:
|
||||
{{- range $i, $gpu := .Values.gpus }}
|
||||
- name: gpu{{ add $i 1 }}
|
||||
deviceName: {{ $gpu.name }}
|
||||
- deviceName: {{ $gpu.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
disks:
|
||||
|
||||
@@ -30,6 +30,8 @@ FROM alpine:3.21
|
||||
|
||||
RUN apk add --no-cache make
|
||||
RUN apk add helm kubectl --repository=https://dl-cdn.alpinelinux.org/alpine/edge/community
|
||||
RUN apk add yq
|
||||
RUN apk add coreutils
|
||||
|
||||
COPY scripts /cozystack/scripts
|
||||
COPY --from=builder /src/packages/core /cozystack/packages/core
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
cozystack:
|
||||
image: ghcr.io/cozystack/cozystack/installer:v0.30.4@sha256:d474e9c3f90dadb24f2fc325acfa42648053e2b21949c91169769795b8b8217c
|
||||
image: ghcr.io/cozystack/cozystack/installer:v0.30.2@sha256:59996588b5d59b5593fb34442b2f2ed8ef466d138b229a8d37beb6f70141a690
|
||||
|
||||
@@ -7,7 +7,11 @@ show:
|
||||
helm template -n $(NAMESPACE) $(NAME) . --dry-run=server $(API_VERSIONS_FLAGS)
|
||||
|
||||
apply:
|
||||
helm template -n $(NAMESPACE) $(NAME) . --dry-run=server $(API_VERSIONS_FLAGS) | kubectl apply -f-
|
||||
helm template -n $(NAMESPACE) $(NAME) . --dry-run=server $(API_VERSIONS_FLAGS) \
|
||||
| kubectl apply -f-
|
||||
kubectl delete helmreleases.helm.toolkit.fluxcd.io -l cozystack.io/marked-for-deletion=true -A
|
||||
|
||||
reconcile: apply
|
||||
|
||||
namespaces-show:
|
||||
helm template -n $(NAMESPACE) $(NAME) . --dry-run=server $(API_VERSIONS_FLAGS) -s templates/namespaces.yaml
|
||||
|
||||
@@ -270,7 +270,10 @@ releases:
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
frontend:
|
||||
resourcesPreset: "none"
|
||||
dashboard:
|
||||
resourcesPreset: "none"
|
||||
{{- $cozystackBranding:= lookup "v1" "ConfigMap" "cozy-system" "cozystack-branding" }}
|
||||
{{- $branding := dig "data" "branding" "" $cozystackBranding }}
|
||||
{{- if $branding }}
|
||||
|
||||
@@ -168,7 +168,10 @@ releases:
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
frontend:
|
||||
resourcesPreset: "none"
|
||||
dashboard:
|
||||
resourcesPreset: "none"
|
||||
{{- $cozystackBranding:= lookup "v1" "ConfigMap" "cozy-system" "cozystack-branding" }}
|
||||
{{- $branding := dig "data" "branding" "" $cozystackBranding }}
|
||||
{{- if $branding }}
|
||||
|
||||
@@ -54,6 +54,12 @@ spec:
|
||||
namespace: cozy-public
|
||||
values:
|
||||
host: "{{ $host }}"
|
||||
valuesFrom:
|
||||
- kind: ConfigMap
|
||||
name: "cozy-system-configuration-hash"
|
||||
valuesKey: "cozyTenantConfigurationHash"
|
||||
targetPath: "cozyTenantConfigurationHash"
|
||||
optional: true
|
||||
dependsOn:
|
||||
{{- range $x := $bundle.releases }}
|
||||
{{- if has $x.name (list "cilium" "kubeovn") }}
|
||||
|
||||
14
packages/core/platform/templates/configuration-hash.yaml
Normal file
14
packages/core/platform/templates/configuration-hash.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
{{- $rootTenantConfiguration := dict "values" .Values }}
|
||||
{{- $cozyConfig := index (lookup "v1" "ConfigMap" "cozy-system" "cozystack" ) "data" }}
|
||||
{{- $cozyScheduling := index (lookup "v1" "ConfigMap" "cozy-system" "cozystack-scheduling") "data" }}
|
||||
{{- $cozyBranding := index (lookup "v1" "ConfigMap" "cozy-system" "cozystack-branding" ) "data" }}
|
||||
{{- $_ := set $rootTenantConfiguration "config" $cozyConfig }}
|
||||
{{- $_ := set $rootTenantConfiguration "scheduling" $cozyScheduling }}
|
||||
{{- $_ := set $rootTenantConfiguration "branding" $cozyBranding }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cozy-system-configuration-hash
|
||||
namespace: tenant-root
|
||||
data:
|
||||
cozyTenantConfigurationHash: {{ sha256sum (toJson $rootTenantConfiguration) | quote }}
|
||||
@@ -7,12 +7,23 @@
|
||||
|
||||
{{/* collect dependency namespaces from releases */}}
|
||||
{{- range $x := $bundle.releases }}
|
||||
{{- $_ := set $dependencyNamespaces $x.name $x.namespace }}
|
||||
{{- $_ := set $dependencyNamespaces $x.name $x.namespace }}
|
||||
{{- end }}
|
||||
|
||||
{{- range $x := $bundle.releases }}
|
||||
{{- if not (has $x.name $disabledComponents) }}
|
||||
{{- if or (not $x.optional) (and ($x.optional) (has $x.name $enabledComponents)) }}
|
||||
|
||||
{{- $shouldInstall := true }}
|
||||
{{- $shouldDelete := false }}
|
||||
{{- if or (has $x.name $disabledComponents) (and ($x.optional) (not (has $x.name $enabledComponents))) }}
|
||||
{{- $shouldInstall = false }}
|
||||
{{- if $.Capabilities.APIVersions.Has "helm.toolkit.fluxcd.io/v2" }}
|
||||
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" $x.namespace $x.name }}
|
||||
{{- $shouldDelete = true }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- if or $shouldInstall $shouldDelete }}
|
||||
---
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
@@ -22,6 +33,9 @@ metadata:
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
cozystack.io/system-app: "true"
|
||||
{{- if $shouldDelete }}
|
||||
cozystack.io/marked-for-deletion: "true"
|
||||
{{- end }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: {{ $x.releaseName | default $x.name }}
|
||||
@@ -47,10 +61,10 @@ spec:
|
||||
{{- end }}
|
||||
{{- $values := dict }}
|
||||
{{- with $x.values }}
|
||||
{{- $values = merge . $values }}
|
||||
{{- $values = merge . $values }}
|
||||
{{- end }}
|
||||
{{- with index $cozyConfig.data (printf "values-%s" $x.name) }}
|
||||
{{- $values = merge (fromYaml .) $values }}
|
||||
{{- $values = merge (fromYaml .) $values }}
|
||||
{{- end }}
|
||||
{{- with $values }}
|
||||
values:
|
||||
@@ -70,13 +84,12 @@ spec:
|
||||
|
||||
{{- with $x.dependsOn }}
|
||||
dependsOn:
|
||||
{{- range $dep := . }}
|
||||
{{- if not (has $dep $disabledComponents) }}
|
||||
{{- range $dep := . }}
|
||||
{{- if not (has $dep $disabledComponents) }}
|
||||
- name: {{ $dep }}
|
||||
namespace: {{ index $dependencyNamespaces $dep }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
e2e:
|
||||
image: ghcr.io/cozystack/cozystack/e2e-sandbox:v0.30.4@sha256:1f35a80c22b4ae3909216892e44f7ba50b00bd135b64081ffe5296eb936a5ca3
|
||||
image: ghcr.io/cozystack/cozystack/e2e-sandbox:v0.30.2@sha256:31273d6b42dc88c2be2ff9ba64564d1b12e70ae8a5480953341b0d113ac7d4bd
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/matchbox:v0.30.4@sha256:8eb6da7d616bd4f91fbe6a1bf3a4cb5448976c1ade2e1ecca9bf6a2bd1772851
|
||||
ghcr.io/cozystack/cozystack/matchbox:v0.30.2@sha256:307d382f75f1dcb39820c73b93b2ce576cdb6d58032679bda7d926999c677900
|
||||
|
||||
@@ -3,4 +3,4 @@ name: ingress
|
||||
description: NGINX Ingress Controller
|
||||
icon: /logos/ingress-nginx.svg
|
||||
type: application
|
||||
version: 1.4.0
|
||||
version: 1.5.0
|
||||
|
||||
@@ -4,12 +4,13 @@
|
||||
|
||||
### Common parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ---------------- | ----------------------------------------------------------------- | ------- |
|
||||
| `replicas` | Number of ingress-nginx replicas | `2` |
|
||||
| `externalIPs` | List of externalIPs for service. | `[]` |
|
||||
| `whitelist` | List of client networks | `[]` |
|
||||
| `clouflareProxy` | Restoring original visitor IPs when Cloudflare proxied is enabled | `false` |
|
||||
| `dashboard` | Should ingress serve Cozystack service dashboard | `false` |
|
||||
| `cdiUploadProxy` | Should ingress serve CDI upload proxy | `false` |
|
||||
| Name | Description | Value |
|
||||
| ----------------- | ----------------------------------------------------------------- | ------- |
|
||||
| `replicas` | Number of ingress-nginx replicas | `2` |
|
||||
| `externalIPs` | List of externalIPs for service. | `[]` |
|
||||
| `whitelist` | List of client networks | `[]` |
|
||||
| `clouflareProxy` | Restoring original visitor IPs when Cloudflare proxied is enabled | `false` |
|
||||
| `dashboard` | Should ingress serve Cozystack service dashboard | `false` |
|
||||
| `cdiUploadProxy` | Should ingress serve CDI upload proxy | `false` |
|
||||
| `virtExportProxy` | Should ingress serve KubeVirt export proxy | `false` |
|
||||
|
||||
|
||||
@@ -35,6 +35,11 @@
|
||||
"type": "boolean",
|
||||
"description": "Should ingress serve CDI upload proxy",
|
||||
"default": false
|
||||
},
|
||||
"virtExportProxy": {
|
||||
"type": "boolean",
|
||||
"description": "Should ingress serve KubeVirt export proxy",
|
||||
"default": false
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -30,3 +30,6 @@ dashboard: false
|
||||
|
||||
## @param cdiUploadProxy Should ingress serve CDI upload proxy
|
||||
cdiUploadProxy: false
|
||||
|
||||
## @param virtExportProxy Should ingress serve KubeVirt export proxy
|
||||
virtExportProxy: false
|
||||
|
||||
37
packages/extra/ingress/vm-exportproxy.yaml
Normal file
37
packages/extra/ingress/vm-exportproxy.yaml
Normal file
@@ -0,0 +1,37 @@
|
||||
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
|
||||
{{- $issuerType := (index $cozyConfig.data "clusterissuer") | default "http01" }}
|
||||
|
||||
{{- $myNS := lookup "v1" "Namespace" "" .Release.Namespace }}
|
||||
{{- $host := index $myNS.metadata.annotations "namespace.cozystack.io/host" }}
|
||||
|
||||
{{- if .Values.virtExportProxy }}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/backend-protocol: HTTPS
|
||||
cert-manager.io/cluster-issuer: letsencrypt-prod
|
||||
{{- if eq $issuerType "cloudflare" }}
|
||||
{{- else }}
|
||||
acme.cert-manager.io/http01-ingress-class: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
name: virt-exportproxy-{{ .Release.Namespace }}
|
||||
namespace: cozy-kubevirt
|
||||
spec:
|
||||
ingressClassName: {{ .Release.Namespace }}
|
||||
rules:
|
||||
- host: virt-exportproxy.{{ $host }}
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
service:
|
||||
name: virt-exportproxy
|
||||
port:
|
||||
number: 443
|
||||
path: /
|
||||
pathType: ImplementationSpecific
|
||||
tls:
|
||||
- hosts:
|
||||
virt-exportproxy.{{ $host }}
|
||||
secretName: virt-exportproxy-{{ .Release.Namespace }}-tls
|
||||
{{- end }}
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/grafana:1.9.2@sha256:24382d445bf7a39ed988ef4dc7a0d9f084db891fcb5f42fd2e64622710b9457e
|
||||
ghcr.io/cozystack/cozystack/grafana:1.9.2@sha256:c63978e1ed0304e8518b31ddee56c4e8115541b997d8efbe1c0a74da57140399
|
||||
|
||||
@@ -16,7 +16,8 @@ ingress 1.0.0 d7cfa53c
|
||||
ingress 1.1.0 5bbc488e
|
||||
ingress 1.2.0 28fca4ef
|
||||
ingress 1.3.0 fde4bcfa
|
||||
ingress 1.4.0 HEAD
|
||||
ingress 1.4.0 fd240701
|
||||
ingress 1.5.0 HEAD
|
||||
monitoring 1.0.0 d7cfa53c
|
||||
monitoring 1.1.0 25221fdc
|
||||
monitoring 1.2.0 f81be075
|
||||
|
||||
@@ -79,7 +79,7 @@ annotations:
|
||||
Pod IP Pool\n description: |\n CiliumPodIPPool defines an IP pool that can
|
||||
be used for pooled IPAM (i.e. the multi-pool IPAM mode).\n"
|
||||
apiVersion: v2
|
||||
appVersion: 1.17.2
|
||||
appVersion: 1.17.3
|
||||
description: eBPF-based Networking, Security, and Observability
|
||||
home: https://cilium.io/
|
||||
icon: https://cdn.jsdelivr.net/gh/cilium/cilium@main/Documentation/images/logo-solo.svg
|
||||
@@ -95,4 +95,4 @@ kubeVersion: '>= 1.21.0-0'
|
||||
name: cilium
|
||||
sources:
|
||||
- https://github.com/cilium/cilium
|
||||
version: 1.17.2
|
||||
version: 1.17.3
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# cilium
|
||||
|
||||
 
|
||||
 
|
||||
|
||||
Cilium is open source software for providing and transparently securing
|
||||
network connectivity and loadbalancing between application workloads such as
|
||||
@@ -85,7 +85,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| authentication.mutual.spire.install.agent.tolerations | list | `[{"effect":"NoSchedule","key":"node.kubernetes.io/not-ready"},{"effect":"NoSchedule","key":"node-role.kubernetes.io/master"},{"effect":"NoSchedule","key":"node-role.kubernetes.io/control-plane"},{"effect":"NoSchedule","key":"node.cloudprovider.kubernetes.io/uninitialized","value":"true"},{"key":"CriticalAddonsOnly","operator":"Exists"}]` | SPIRE agent tolerations configuration By default it follows the same tolerations as the agent itself to allow the Cilium agent on this node to connect to SPIRE. ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ |
|
||||
| authentication.mutual.spire.install.enabled | bool | `true` | Enable SPIRE installation. This will only take effect only if authentication.mutual.spire.enabled is true |
|
||||
| authentication.mutual.spire.install.existingNamespace | bool | `false` | SPIRE namespace already exists. Set to true if Helm should not create, manage, and import the SPIRE namespace. |
|
||||
| authentication.mutual.spire.install.initImage | object | `{"digest":"sha256:498a000f370d8c37927118ed80afe8adc38d1edcbfc071627d17b25c88efcab0","override":null,"pullPolicy":"IfNotPresent","repository":"docker.io/library/busybox","tag":"1.37.0","useDigest":true}` | init container image of SPIRE agent and server |
|
||||
| authentication.mutual.spire.install.initImage | object | `{"digest":"sha256:37f7b378a29ceb4c551b1b5582e27747b855bbfaa73fa11914fe0df028dc581f","override":null,"pullPolicy":"IfNotPresent","repository":"docker.io/library/busybox","tag":"1.37.0","useDigest":true}` | init container image of SPIRE agent and server |
|
||||
| authentication.mutual.spire.install.namespace | string | `"cilium-spire"` | SPIRE namespace to install into |
|
||||
| authentication.mutual.spire.install.server.affinity | object | `{}` | SPIRE server affinity configuration |
|
||||
| authentication.mutual.spire.install.server.annotations | object | `{}` | SPIRE server annotations |
|
||||
@@ -197,7 +197,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| clustermesh.apiserver.extraVolumeMounts | list | `[]` | Additional clustermesh-apiserver volumeMounts. |
|
||||
| clustermesh.apiserver.extraVolumes | list | `[]` | Additional clustermesh-apiserver volumes. |
|
||||
| clustermesh.apiserver.healthPort | int | `9880` | TCP port for the clustermesh-apiserver health API. |
|
||||
| clustermesh.apiserver.image | object | `{"digest":"sha256:981250ebdc6e66e190992eaf75cfca169113a8f08d5c3793fe15822176980398","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/clustermesh-apiserver","tag":"v1.17.2","useDigest":true}` | Clustermesh API server image. |
|
||||
| clustermesh.apiserver.image | object | `{"digest":"sha256:98d5feaf67dd9b5d8d219ff5990de10539566eedc5412bcf52df75920896ad42","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/clustermesh-apiserver","tag":"v1.17.3","useDigest":true}` | Clustermesh API server image. |
|
||||
| clustermesh.apiserver.kvstoremesh.enabled | bool | `true` | Enable KVStoreMesh. KVStoreMesh caches the information retrieved from the remote clusters in the local etcd instance. |
|
||||
| clustermesh.apiserver.kvstoremesh.extraArgs | list | `[]` | Additional KVStoreMesh arguments. |
|
||||
| clustermesh.apiserver.kvstoremesh.extraEnv | list | `[]` | Additional KVStoreMesh environment variables. |
|
||||
@@ -377,7 +377,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| envoy.healthPort | int | `9878` | TCP port for the health API. |
|
||||
| envoy.httpRetryCount | int | `3` | Maximum number of retries for each HTTP request |
|
||||
| envoy.idleTimeoutDurationSeconds | int | `60` | Set Envoy upstream HTTP idle connection timeout seconds. Does not apply to connections with pending requests. Default 60s |
|
||||
| envoy.image | object | `{"digest":"sha256:377c78c13d2731f3720f931721ee309159e782d882251709cb0fac3b42c03f4b","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium-envoy","tag":"v1.31.5-1741765102-efed3defcc70ab5b263a0fc44c93d316b846a211","useDigest":true}` | Envoy container image. |
|
||||
| envoy.image | object | `{"digest":"sha256:a01cadf7974409b5c5c92ace3d6afa298408468ca24cab1cb413c04f89d3d1f9","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium-envoy","tag":"v1.32.5-1744305768-f9ddca7dcd91f7ca25a505560e655c47d3dec2cf","useDigest":true}` | Envoy container image. |
|
||||
| envoy.initialFetchTimeoutSeconds | int | `30` | Time in seconds after which the initial fetch on an xDS stream is considered timed out |
|
||||
| envoy.livenessProbe.failureThreshold | int | `10` | failure threshold of liveness probe |
|
||||
| envoy.livenessProbe.periodSeconds | int | `30` | interval between checks of the liveness probe |
|
||||
@@ -518,7 +518,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| hubble.relay.extraVolumes | list | `[]` | Additional hubble-relay volumes. |
|
||||
| hubble.relay.gops.enabled | bool | `true` | Enable gops for hubble-relay |
|
||||
| hubble.relay.gops.port | int | `9893` | Configure gops listen port for hubble-relay |
|
||||
| hubble.relay.image | object | `{"digest":"sha256:42a8db5c256c516cacb5b8937c321b2373ad7a6b0a1e5a5120d5028433d586cc","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-relay","tag":"v1.17.2","useDigest":true}` | Hubble-relay container image. |
|
||||
| hubble.relay.image | object | `{"digest":"sha256:f8674b5139111ac828a8818da7f2d344b4a5bfbaeb122c5dc9abed3e74000c55","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-relay","tag":"v1.17.3","useDigest":true}` | Hubble-relay container image. |
|
||||
| hubble.relay.listenHost | string | `""` | Host to listen to. Specify an empty string to bind to all the interfaces. |
|
||||
| hubble.relay.listenPort | string | `"4245"` | Port to listen to. |
|
||||
| hubble.relay.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector |
|
||||
@@ -625,7 +625,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| hubble.ui.updateStrategy | object | `{"rollingUpdate":{"maxUnavailable":1},"type":"RollingUpdate"}` | hubble-ui update strategy. |
|
||||
| identityAllocationMode | string | `"crd"` | Method to use for identity allocation (`crd`, `kvstore` or `doublewrite-readkvstore` / `doublewrite-readcrd` for migrating between identity backends). |
|
||||
| identityChangeGracePeriod | string | `"5s"` | Time to wait before using new identity on endpoint identity change. |
|
||||
| image | object | `{"digest":"sha256:3c4c9932b5d8368619cb922a497ff2ebc8def5f41c18e410bcc84025fcd385b1","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.17.2","useDigest":true}` | Agent container image. |
|
||||
| image | object | `{"digest":"sha256:1782794aeac951af139315c10eff34050aa7579c12827ee9ec376bb719b82873","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.17.3","useDigest":true}` | Agent container image. |
|
||||
| imagePullSecrets | list | `[]` | Configure image pull secrets for pulling container images |
|
||||
| ingressController.default | bool | `false` | Set cilium ingress controller to be the default ingress controller This will let cilium ingress controller route entries without ingress class set |
|
||||
| ingressController.defaultSecretName | string | `nil` | Default secret name for ingresses without .spec.tls[].secretName set. |
|
||||
@@ -762,7 +762,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| operator.hostNetwork | bool | `true` | HostNetwork setting |
|
||||
| operator.identityGCInterval | string | `"15m0s"` | Interval for identity garbage collection. |
|
||||
| operator.identityHeartbeatTimeout | string | `"30m0s"` | Timeout for identity heartbeats. |
|
||||
| operator.image | object | `{"alibabacloudDigest":"sha256:7cb8c23417f65348bb810fe92fb05b41d926f019d77442f3fa1058d17fea7ffe","awsDigest":"sha256:955096183e22a203bbb198ca66e3266ce4dbc2b63f1a2fbd03f9373dcd97893c","azureDigest":"sha256:455fb88b558b1b8ba09d63302ccce76b4930581be89def027184ab04335c20e0","genericDigest":"sha256:81f2d7198366e8dec2903a3a8361e4c68d47d19c68a0d42f0b7b6e3f0523f249","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/operator","suffix":"","tag":"v1.17.2","useDigest":true}` | cilium-operator image. |
|
||||
| operator.image | object | `{"alibabacloudDigest":"sha256:e9a9ab227c6e833985bde6537b4d1540b0907f21a84319de4b7d62c5302eed5c","awsDigest":"sha256:40f235111fb2bca209ee65b12f81742596e881a0a3ee4d159776d78e3091ba7f","azureDigest":"sha256:6a3294ec8a2107048254179c3ac5121866f90d20fccf12f1d70960e61f304713","genericDigest":"sha256:8bd38d0e97a955b2d725929d60df09d712fb62b60b930551a29abac2dd92e597","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/operator","suffix":"","tag":"v1.17.3","useDigest":true}` | cilium-operator image. |
|
||||
| operator.nodeGCInterval | string | `"5m0s"` | Interval for cilium node garbage collection. |
|
||||
| operator.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for cilium-operator pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector |
|
||||
| operator.podAnnotations | object | `{}` | Annotations to be added to cilium-operator pods |
|
||||
@@ -812,7 +812,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| preflight.extraEnv | list | `[]` | Additional preflight environment variables. |
|
||||
| preflight.extraVolumeMounts | list | `[]` | Additional preflight volumeMounts. |
|
||||
| preflight.extraVolumes | list | `[]` | Additional preflight volumes. |
|
||||
| preflight.image | object | `{"digest":"sha256:3c4c9932b5d8368619cb922a497ff2ebc8def5f41c18e410bcc84025fcd385b1","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.17.2","useDigest":true}` | Cilium pre-flight image. |
|
||||
| preflight.image | object | `{"digest":"sha256:1782794aeac951af139315c10eff34050aa7579c12827ee9ec376bb719b82873","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.17.3","useDigest":true}` | Cilium pre-flight image. |
|
||||
| preflight.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for preflight pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector |
|
||||
| preflight.podAnnotations | object | `{}` | Annotations to be added to preflight pods |
|
||||
| preflight.podDisruptionBudget.enabled | bool | `false` | enable PodDisruptionBudget ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ |
|
||||
|
||||
@@ -191,10 +191,10 @@ image:
|
||||
# @schema
|
||||
override: ~
|
||||
repository: "quay.io/cilium/cilium"
|
||||
tag: "v1.17.2"
|
||||
tag: "v1.17.3"
|
||||
pullPolicy: "IfNotPresent"
|
||||
# cilium-digest
|
||||
digest: "sha256:3c4c9932b5d8368619cb922a497ff2ebc8def5f41c18e410bcc84025fcd385b1"
|
||||
digest: "sha256:1782794aeac951af139315c10eff34050aa7579c12827ee9ec376bb719b82873"
|
||||
useDigest: true
|
||||
# -- Scheduling configurations for cilium pods
|
||||
scheduling:
|
||||
@@ -1440,9 +1440,9 @@ hubble:
|
||||
# @schema
|
||||
override: ~
|
||||
repository: "quay.io/cilium/hubble-relay"
|
||||
tag: "v1.17.2"
|
||||
tag: "v1.17.3"
|
||||
# hubble-relay-digest
|
||||
digest: "sha256:42a8db5c256c516cacb5b8937c321b2373ad7a6b0a1e5a5120d5028433d586cc"
|
||||
digest: "sha256:f8674b5139111ac828a8818da7f2d344b4a5bfbaeb122c5dc9abed3e74000c55"
|
||||
useDigest: true
|
||||
pullPolicy: "IfNotPresent"
|
||||
# -- Specifies the resources for the hubble-relay pods
|
||||
@@ -2351,9 +2351,9 @@ envoy:
|
||||
# @schema
|
||||
override: ~
|
||||
repository: "quay.io/cilium/cilium-envoy"
|
||||
tag: "v1.31.5-1741765102-efed3defcc70ab5b263a0fc44c93d316b846a211"
|
||||
tag: "v1.32.5-1744305768-f9ddca7dcd91f7ca25a505560e655c47d3dec2cf"
|
||||
pullPolicy: "IfNotPresent"
|
||||
digest: "sha256:377c78c13d2731f3720f931721ee309159e782d882251709cb0fac3b42c03f4b"
|
||||
digest: "sha256:a01cadf7974409b5c5c92ace3d6afa298408468ca24cab1cb413c04f89d3d1f9"
|
||||
useDigest: true
|
||||
# -- Additional containers added to the cilium Envoy DaemonSet.
|
||||
extraContainers: []
|
||||
@@ -2708,15 +2708,15 @@ operator:
|
||||
# @schema
|
||||
override: ~
|
||||
repository: "quay.io/cilium/operator"
|
||||
tag: "v1.17.2"
|
||||
tag: "v1.17.3"
|
||||
# operator-generic-digest
|
||||
genericDigest: "sha256:81f2d7198366e8dec2903a3a8361e4c68d47d19c68a0d42f0b7b6e3f0523f249"
|
||||
genericDigest: "sha256:8bd38d0e97a955b2d725929d60df09d712fb62b60b930551a29abac2dd92e597"
|
||||
# operator-azure-digest
|
||||
azureDigest: "sha256:455fb88b558b1b8ba09d63302ccce76b4930581be89def027184ab04335c20e0"
|
||||
azureDigest: "sha256:6a3294ec8a2107048254179c3ac5121866f90d20fccf12f1d70960e61f304713"
|
||||
# operator-aws-digest
|
||||
awsDigest: "sha256:955096183e22a203bbb198ca66e3266ce4dbc2b63f1a2fbd03f9373dcd97893c"
|
||||
awsDigest: "sha256:40f235111fb2bca209ee65b12f81742596e881a0a3ee4d159776d78e3091ba7f"
|
||||
# operator-alibabacloud-digest
|
||||
alibabacloudDigest: "sha256:7cb8c23417f65348bb810fe92fb05b41d926f019d77442f3fa1058d17fea7ffe"
|
||||
alibabacloudDigest: "sha256:e9a9ab227c6e833985bde6537b4d1540b0907f21a84319de4b7d62c5302eed5c"
|
||||
useDigest: true
|
||||
pullPolicy: "IfNotPresent"
|
||||
suffix: ""
|
||||
@@ -2991,9 +2991,9 @@ preflight:
|
||||
# @schema
|
||||
override: ~
|
||||
repository: "quay.io/cilium/cilium"
|
||||
tag: "v1.17.2"
|
||||
tag: "v1.17.3"
|
||||
# cilium-digest
|
||||
digest: "sha256:3c4c9932b5d8368619cb922a497ff2ebc8def5f41c18e410bcc84025fcd385b1"
|
||||
digest: "sha256:1782794aeac951af139315c10eff34050aa7579c12827ee9ec376bb719b82873"
|
||||
useDigest: true
|
||||
pullPolicy: "IfNotPresent"
|
||||
# -- The priority class to use for the preflight pod.
|
||||
@@ -3140,9 +3140,9 @@ clustermesh:
|
||||
# @schema
|
||||
override: ~
|
||||
repository: "quay.io/cilium/clustermesh-apiserver"
|
||||
tag: "v1.17.2"
|
||||
tag: "v1.17.3"
|
||||
# clustermesh-apiserver-digest
|
||||
digest: "sha256:981250ebdc6e66e190992eaf75cfca169113a8f08d5c3793fe15822176980398"
|
||||
digest: "sha256:98d5feaf67dd9b5d8d219ff5990de10539566eedc5412bcf52df75920896ad42"
|
||||
useDigest: true
|
||||
pullPolicy: "IfNotPresent"
|
||||
# -- TCP port for the clustermesh-apiserver health API.
|
||||
@@ -3649,7 +3649,7 @@ authentication:
|
||||
override: ~
|
||||
repository: "docker.io/library/busybox"
|
||||
tag: "1.37.0"
|
||||
digest: "sha256:498a000f370d8c37927118ed80afe8adc38d1edcbfc071627d17b25c88efcab0"
|
||||
digest: "sha256:37f7b378a29ceb4c551b1b5582e27747b855bbfaa73fa11914fe0df028dc581f"
|
||||
useDigest: true
|
||||
pullPolicy: "IfNotPresent"
|
||||
# SPIRE agent configuration
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
ARG VERSION=v1.17.2
|
||||
ARG VERSION=v1.17.3
|
||||
FROM quay.io/cilium/cilium:${VERSION}
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
cozystackAPI:
|
||||
image: ghcr.io/cozystack/cozystack/cozystack-api:v0.30.4@sha256:299b50de88aa945ab90ee41eeb1a0ac7ba20d858adacd1ef125af7d676ce440f
|
||||
image: ghcr.io/cozystack/cozystack/cozystack-api:v0.30.2@sha256:7ef370dc8aeac0a6b2a50b7d949f070eb21d267ba0a70e7fc7c1564bfe6d4f83
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
cozystackController:
|
||||
image: ghcr.io/cozystack/cozystack/cozystack-controller:v0.30.4@sha256:a39395a6ce995d91bee8817c4032b8e073e0387f8b1e0de9d78909cb64189f80
|
||||
image: ghcr.io/cozystack/cozystack/cozystack-controller:v0.30.2@sha256:5b87a8ea0dcde1671f44532c1ee6db11a5dd922d1a009078ecf6495ec193e52a
|
||||
debug: false
|
||||
disableTelemetry: false
|
||||
cozystackVersion: "v0.30.4"
|
||||
cozystackVersion: "v0.30.2"
|
||||
|
||||
@@ -76,7 +76,7 @@ data:
|
||||
"kubeappsNamespace": {{ .Release.Namespace | quote }},
|
||||
"helmGlobalNamespace": {{ include "kubeapps.helmGlobalPackagingNamespace" . | quote }},
|
||||
"carvelGlobalNamespace": {{ .Values.kubeappsapis.pluginConfig.kappController.packages.v1alpha1.globalPackagingNamespace | quote }},
|
||||
"appVersion": "v0.30.4",
|
||||
"appVersion": "v0.30.2",
|
||||
"authProxyEnabled": {{ .Values.authProxy.enabled }},
|
||||
"oauthLoginURI": {{ .Values.authProxy.oauthLoginURI | quote }},
|
||||
"oauthLogoutURI": {{ .Values.authProxy.oauthLogoutURI | quote }},
|
||||
|
||||
80
packages/system/dashboard/templates/vpa.yaml
Normal file
80
packages/system/dashboard/templates/vpa.yaml
Normal file
@@ -0,0 +1,80 @@
|
||||
apiVersion: autoscaling.k8s.io/v1
|
||||
kind: VerticalPodAutoscaler
|
||||
metadata:
|
||||
name: dashboard-internal-dashboard
|
||||
namespace: cozy-dashboard
|
||||
spec:
|
||||
targetRef:
|
||||
apiVersion: "apps/v1"
|
||||
kind: Deployment
|
||||
name: dashboard-internal-dashboard
|
||||
updatePolicy:
|
||||
updateMode: "Auto"
|
||||
resourcePolicy:
|
||||
containerPolicies:
|
||||
- containerName: dashboard
|
||||
controlledResources: ["cpu", "memory"]
|
||||
minAllowed:
|
||||
cpu: 50m
|
||||
memory: 64Mi
|
||||
maxAllowed:
|
||||
cpu: 500m
|
||||
memory: 512Mi
|
||||
---
|
||||
apiVersion: autoscaling.k8s.io/v1
|
||||
kind: VerticalPodAutoscaler
|
||||
metadata:
|
||||
name: dashboard-internal-kubeappsapis
|
||||
namespace: cozy-dashboard
|
||||
spec:
|
||||
targetRef:
|
||||
apiVersion: "apps/v1"
|
||||
kind: Deployment
|
||||
name: dashboard-internal-kubeappsapis
|
||||
updatePolicy:
|
||||
updateMode: "Auto"
|
||||
resourcePolicy:
|
||||
containerPolicies:
|
||||
- containerName: kubeappsapis
|
||||
controlledResources: ["cpu", "memory"]
|
||||
minAllowed:
|
||||
cpu: 50m
|
||||
memory: 100Mi
|
||||
maxAllowed:
|
||||
cpu: 1000m
|
||||
memory: 1Gi
|
||||
---
|
||||
apiVersion: autoscaling.k8s.io/v1
|
||||
kind: VerticalPodAutoscaler
|
||||
metadata:
|
||||
name: dashboard-vpa
|
||||
namespace: cozy-dashboard
|
||||
spec:
|
||||
targetRef:
|
||||
apiVersion: "apps/v1"
|
||||
kind: Deployment
|
||||
name: dashboard
|
||||
updatePolicy:
|
||||
updateMode: "Auto"
|
||||
resourcePolicy:
|
||||
containerPolicies:
|
||||
- containerName: nginx
|
||||
controlledResources: ["cpu", "memory"]
|
||||
minAllowed:
|
||||
cpu: "50m"
|
||||
memory: "64Mi"
|
||||
maxAllowed:
|
||||
cpu: "500m"
|
||||
memory: "512Mi"
|
||||
{{- $dashboardKCconfig := lookup "v1" "ConfigMap" "cozy-dashboard" "kubeapps-auth-config" }}
|
||||
{{- $dashboardKCValues := dig "data" "values.yaml" "" $dashboardKCconfig }}
|
||||
{{- if $dashboardKCValues }}
|
||||
- containerName: auth-proxy
|
||||
controlledResources: ["cpu", "memory"]
|
||||
minAllowed:
|
||||
cpu: "50m"
|
||||
memory: "64Mi"
|
||||
maxAllowed:
|
||||
cpu: "500m"
|
||||
memory: "512Mi"
|
||||
{{- end }}
|
||||
@@ -15,17 +15,19 @@ kubeapps:
|
||||
flux:
|
||||
enabled: true
|
||||
dashboard:
|
||||
resourcesPreset: "none"
|
||||
image:
|
||||
registry: ghcr.io/cozystack/cozystack
|
||||
repository: dashboard
|
||||
tag: v0.30.4
|
||||
tag: v0.30.2
|
||||
digest: "sha256:a83fe4654f547469cfa469a02bda1273c54bca103a41eb007fdb2e18a7a91e93"
|
||||
kubeappsapis:
|
||||
resourcesPreset: "none"
|
||||
image:
|
||||
registry: ghcr.io/cozystack/cozystack
|
||||
repository: kubeapps-apis
|
||||
tag: v0.30.4
|
||||
digest: "sha256:a5ff1d3fdd69c78184554bd07e7675662201d7b27387717b4a70432a81db6301"
|
||||
tag: v0.30.2
|
||||
digest: "sha256:3b5805b56f2fb9fd25f4aa389cdfbbb28a3f2efb02245c52085a45d1dc62bf92"
|
||||
pluginConfig:
|
||||
flux:
|
||||
packages:
|
||||
|
||||
@@ -3,7 +3,7 @@ kamaji:
|
||||
deploy: false
|
||||
image:
|
||||
pullPolicy: IfNotPresent
|
||||
tag: v0.30.4@sha256:aeb2d728818fd63a0ea2e64a79dcd6a8b01f0b8b0bab03e6e0fe3b9522edc0a3
|
||||
tag: v0.30.2@sha256:e04f68e4cc5b023ed39ce2242b32aee51f97235371602239d0c4a9cea97c8d0d
|
||||
repository: ghcr.io/cozystack/cozystack/kamaji
|
||||
resources:
|
||||
limits:
|
||||
|
||||
@@ -216,6 +216,7 @@ data:
|
||||
values.yaml: |
|
||||
kubeapps:
|
||||
authProxy:
|
||||
resourcesPreset: "none"
|
||||
enabled: true
|
||||
provider: "oidc"
|
||||
clientID: "kubeapps"
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
portSecurity: true
|
||||
routes: ""
|
||||
image: ghcr.io/cozystack/cozystack/kubeovn-webhook:v0.30.4@sha256:ec3e3b63ebefe5d11ad2b4a576c7b4cd0bbe6ab27ce2732e25e06c40692e8459
|
||||
image: ghcr.io/cozystack/cozystack/kubeovn-webhook:v0.30.2@sha256:fa14fa7a0ffa628eb079ddcf6ce41d75b43de92e50f489422f8fb15c4dab2dbf
|
||||
|
||||
@@ -22,4 +22,4 @@ global:
|
||||
images:
|
||||
kubeovn:
|
||||
repository: kubeovn
|
||||
tag: v1.13.8@sha256:385329464045cdf5e01364e9f2293edc71065a0910576a8e26ea9ac7097aae71
|
||||
tag: v1.13.8@sha256:071a93df2dce484b347bbace75934ca9e1743668bfe6f1161ba307dee204767d
|
||||
|
||||
@@ -17,6 +17,7 @@ spec:
|
||||
- AutoResourceLimitsGate
|
||||
- CPUManager
|
||||
- GPU
|
||||
- VMExport
|
||||
evictionStrategy: LiveMigrate
|
||||
customizeComponents: {}
|
||||
imagePullPolicy: IfNotPresent
|
||||
|
||||
@@ -3,8 +3,8 @@ name: piraeus
|
||||
description: |
|
||||
The Piraeus Operator manages software defined storage clusters using LINSTOR in Kubernetes.
|
||||
type: application
|
||||
version: 2.7.1
|
||||
appVersion: "v2.7.1"
|
||||
version: 2.8.1
|
||||
appVersion: "v2.8.1"
|
||||
maintainers:
|
||||
- name: Piraeus Datastore
|
||||
url: https://piraeus.io
|
||||
|
||||
@@ -17,20 +17,19 @@ data:
|
||||
# quay.io/piraeusdatastore/piraeus-server:v1.24.2
|
||||
components:
|
||||
linstor-controller:
|
||||
tag: v1.29.2
|
||||
tag: v1.31.0
|
||||
image: piraeus-server
|
||||
linstor-satellite:
|
||||
# Pin with digest to ensure we pull the version with downgraded thin-send-recv
|
||||
tag: v1.29.2
|
||||
tag: v1.31.0
|
||||
image: piraeus-server
|
||||
linstor-csi:
|
||||
tag: v1.6.4
|
||||
tag: v1.7.1
|
||||
image: piraeus-csi
|
||||
drbd-reactor:
|
||||
tag: v1.6.0
|
||||
tag: v1.8.0
|
||||
image: drbd-reactor
|
||||
ha-controller:
|
||||
tag: v1.2.3
|
||||
tag: v1.3.0
|
||||
image: piraeus-ha-controller
|
||||
drbd-shutdown-guard:
|
||||
tag: v1.0.0
|
||||
@@ -39,7 +38,7 @@ data:
|
||||
tag: v0.11
|
||||
image: ktls-utils
|
||||
drbd-module-loader:
|
||||
tag: v9.2.12
|
||||
tag: v9.2.13
|
||||
# The special "match" attribute is used to select an image based on the node's reported OS.
|
||||
# The operator will first check the k8s node's ".status.nodeInfo.osImage" field, and compare it against the list
|
||||
# here. If one matches, that specific image name will be used instead of the fallback image.
|
||||
@@ -90,25 +89,25 @@ data:
|
||||
base: registry.k8s.io/sig-storage
|
||||
components:
|
||||
csi-attacher:
|
||||
tag: v4.7.0
|
||||
tag: v4.8.1
|
||||
image: csi-attacher
|
||||
csi-livenessprobe:
|
||||
tag: v2.14.0
|
||||
tag: v2.15.0
|
||||
image: livenessprobe
|
||||
csi-provisioner:
|
||||
tag: v5.1.0
|
||||
tag: v5.2.0
|
||||
image: csi-provisioner
|
||||
csi-snapshotter:
|
||||
tag: v8.1.0
|
||||
tag: v8.2.1
|
||||
image: csi-snapshotter
|
||||
csi-resizer:
|
||||
tag: v1.12.0
|
||||
tag: v1.13.2
|
||||
image: csi-resizer
|
||||
csi-external-health-monitor-controller:
|
||||
tag: v0.13.0
|
||||
tag: v0.14.0
|
||||
image: csi-external-health-monitor-controller
|
||||
csi-node-driver-registrar:
|
||||
tag: v2.12.0
|
||||
tag: v2.13.0
|
||||
image: csi-node-driver-registrar
|
||||
{{- range $idx, $value := .Values.imageConfigOverride }}
|
||||
{{ add $idx 1 }}_helm_override.yaml: |
|
||||
|
||||
@@ -21,6 +21,12 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
CozySystemConfigurationHashConfigMapName = "cozy-system-configuration-hash"
|
||||
CozyTenantConfigurationHashConfigMapName = "cozy-tenant-configuration-hash"
|
||||
CozyTenantConfigurationHashKey = "cozyTenantConfigurationHash"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ApplicationList is a list of Application objects.
|
||||
|
||||
@@ -988,6 +988,18 @@ func (r *REST) convertApplicationToHelmRelease(app *appsv1alpha1.Application) (*
|
||||
},
|
||||
}
|
||||
|
||||
valuesFromConfigMap := appsv1alpha1.CozyTenantConfigurationHashConfigMapName
|
||||
if helmRelease.Name == "tenant-root" && helmRelease.Namespace == "tenant-root" {
|
||||
valuesFromConfigMap = appsv1alpha1.CozySystemConfigurationHashConfigMapName
|
||||
}
|
||||
helmRelease.Spec.ValuesFrom = []helmv2.ValuesReference{{
|
||||
Kind: "ConfigMap",
|
||||
Name: valuesFromConfigMap,
|
||||
ValuesKey: appsv1alpha1.CozyTenantConfigurationHashKey,
|
||||
TargetPath: appsv1alpha1.CozyTenantConfigurationHashKey,
|
||||
Optional: true,
|
||||
}}
|
||||
|
||||
return helmRelease, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ set -o pipefail
|
||||
set -e
|
||||
|
||||
BUNDLE=$(set -x; kubectl get configmap -n cozy-system cozystack -o 'go-template={{index .data "bundle-name"}}')
|
||||
VERSION=10
|
||||
VERSION=$(find scripts/migrations -mindepth 1 -maxdepth 1 -type f | sort -V | awk -F/ 'END {print $NF+1}')
|
||||
|
||||
run_migrations() {
|
||||
if ! kubectl get configmap -n cozy-system cozystack-version; then
|
||||
@@ -70,7 +70,7 @@ make -C packages/core/platform namespaces-apply
|
||||
ensure_fluxcd
|
||||
|
||||
# Install platform chart
|
||||
make -C packages/core/platform apply
|
||||
make -C packages/core/platform reconcile
|
||||
|
||||
# Install basic charts
|
||||
if ! flux_is_ok; then
|
||||
@@ -93,5 +93,5 @@ done
|
||||
trap 'exit' INT TERM
|
||||
while true; do
|
||||
sleep 60 & wait
|
||||
make -C packages/core/platform apply
|
||||
make -C packages/core/platform reconcile
|
||||
done
|
||||
|
||||
15
scripts/migrations/10
Normal file
15
scripts/migrations/10
Normal file
@@ -0,0 +1,15 @@
|
||||
#!/bin/sh
|
||||
# Migration 10 --> 11
|
||||
|
||||
# Force reconcile hr keycloak-configure
|
||||
if kubectl get helmrelease keycloak-configure -n cozy-keycloak; then
|
||||
kubectl delete po -l app=source-controller -n cozy-fluxcd
|
||||
timestamp=$(date --rfc-3339=ns)
|
||||
kubectl annotate helmrelease keycloak-configure -n cozy-keycloak \
|
||||
reconcile.fluxcd.io/forceAt="$timestamp" \
|
||||
reconcile.fluxcd.io/requestedAt="$timestamp" \
|
||||
--overwrite
|
||||
fi
|
||||
|
||||
# Write version to cozystack-version config
|
||||
kubectl create configmap -n cozy-system cozystack-version --from-literal=version=11 --dry-run=client -o yaml | kubectl apply -f-
|
||||
21
scripts/migrations/11
Normal file
21
scripts/migrations/11
Normal file
@@ -0,0 +1,21 @@
|
||||
#!/bin/sh
|
||||
# Migration 11 --> 12
|
||||
|
||||
# Recreate daemonset kube-rbac-proxy
|
||||
|
||||
if kubectl get daemonset kube-rbac-proxy -n cozy-monitoring; then
|
||||
kubectl delete daemonset kube-rbac-proxy --cascade=orphan -n cozy-monitoring
|
||||
fi
|
||||
|
||||
if kubectl get helmrelease monitoring-agents -n cozy-monitoring; then
|
||||
timestamp=$(date --rfc-3339=ns)
|
||||
kubectl annotate helmrelease monitoring-agents -n cozy-monitoring \
|
||||
reconcile.fluxcd.io/forceAt="$timestamp" \
|
||||
reconcile.fluxcd.io/requestedAt="$timestamp" \
|
||||
--overwrite
|
||||
fi
|
||||
|
||||
kubectl delete pods -l app.kubernetes.io/component=kube-rbac-proxy -n cozy-monitoring
|
||||
|
||||
# Write version to cozystack-version config
|
||||
kubectl create configmap -n cozy-system cozystack-version --from-literal=version=12 --dry-run=client -o yaml | kubectl apply -f-
|
||||
Reference in New Issue
Block a user