Compare commits

..

1 Commits

Author SHA1 Message Date
Andrei Kvapil
f3b3266a30 [ci] Switch from pull-request to pull-request-target
Signed-off-by: Andrei Kvapil <kvapss@gmail.com>
2025-04-24 17:48:10 +02:00
246 changed files with 1709 additions and 18076 deletions

View File

@@ -1,8 +1,10 @@
name: Pre-Commit Checks name: Pre-Commit Checks
on: on:
pull_request: pull_request_target:
types: [labeled, opened, synchronize, reopened] types: [labeled, opened, synchronize, reopened]
paths-ignore:
- '**.md'
concurrency: concurrency:
group: pre-commit-${{ github.workflow }}-${{ github.event.pull_request.number }} group: pre-commit-${{ github.workflow }}-${{ github.event.pull_request.number }}
@@ -12,11 +14,13 @@ jobs:
pre-commit: pre-commit:
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
steps: steps:
- name: Checkout code - name: Checkout code (PR branch)
uses: actions/checkout@v3 uses: actions/checkout@v3
with: with:
repository: ${{ github.event.pull_request.head.repo.full_name }}
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0 fetch-depth: 0
fetch-tags: true fetch-tags: true
- name: Set up Python - name: Set up Python
uses: actions/setup-python@v4 uses: actions/setup-python@v4

View File

@@ -1,7 +1,7 @@
name: Releasing PR name: Releasing PR
on: on:
pull_request: pull_request_target:
types: [labeled, opened, synchronize, reopened, closed] types: [labeled, opened, synchronize, reopened, closed]
concurrency: concurrency:
@@ -16,16 +16,19 @@ jobs:
contents: read contents: read
packages: write packages: write
# Run only when the PR carries the "release" label and not closed.
if: | if: |
contains(github.event.pull_request.labels.*.name, 'release') && contains(github.event.pull_request.labels.*.name, 'release') &&
github.event.action != 'closed' github.event.action != 'closed'
steps: steps:
- name: Checkout code - name: Checkout code (PR branch)
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
repository: ${{ github.event.pull_request.head.repo.full_name }}
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0 fetch-depth: 0
fetch-tags: true fetch-tags: true
- name: Login to GitHub Container Registry - name: Login to GitHub Container Registry
uses: docker/login-action@v3 uses: docker/login-action@v3
@@ -34,64 +37,6 @@ jobs:
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GITHUB_TOKEN }}
registry: ghcr.io registry: ghcr.io
- name: Extract tag from PR branch
id: get_tag
uses: actions/github-script@v7
with:
script: |
const branch = context.payload.pull_request.head.ref;
const m = branch.match(/^release-(\d+\.\d+\.\d+(?:[-\w\.]+)?)$/);
if (!m) {
core.setFailed(`❌ Branch '${branch}' does not match 'release-X.Y.Z[-suffix]'`);
return;
}
const tag = `v${m[1]}`;
core.setOutput('tag', tag);
- name: Find draft release and get asset IDs
id: fetch_assets
uses: actions/github-script@v7
with:
github-token: ${{ secrets.GH_PAT }}
script: |
const tag = '${{ steps.get_tag.outputs.tag }}';
const releases = await github.rest.repos.listReleases({
owner: context.repo.owner,
repo: context.repo.repo,
per_page: 100
});
const draft = releases.data.find(r => r.tag_name === tag && r.draft);
if (!draft) {
core.setFailed(`Draft release '${tag}' not found`);
return;
}
const findAssetId = (name) =>
draft.assets.find(a => a.name === name)?.id;
const installerId = findAssetId("cozystack-installer.yaml");
const diskId = findAssetId("nocloud-amd64.raw.xz");
if (!installerId || !diskId) {
core.setFailed("Missing required assets");
return;
}
core.setOutput("installer_id", installerId);
core.setOutput("disk_id", diskId);
- name: Download assets from GitHub API
run: |
mkdir -p _out/assets
curl -sSL \
-H "Authorization: token ${GH_PAT}" \
-H "Accept: application/octet-stream" \
-o _out/assets/cozystack-installer.yaml \
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/assets/${{ steps.fetch_assets.outputs.installer_id }}"
curl -sSL \
-H "Authorization: token ${GH_PAT}" \
-H "Accept: application/octet-stream" \
-o _out/assets/nocloud-amd64.raw.xz \
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/assets/${{ steps.fetch_assets.outputs.disk_id }}"
env:
GH_PAT: ${{ secrets.GH_PAT }}
- name: Run tests - name: Run tests
run: make test run: make test
@@ -122,7 +67,7 @@ jobs:
core.setOutput('tag', tag); core.setOutput('tag', tag);
console.log(`✅ Tag to publish: ${tag}`); console.log(`✅ Tag to publish: ${tag}`);
# Checkout repo & create / push annotated tag # Checkout merged commit (default ref -> merge SHA)
- name: Checkout repo - name: Checkout repo
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
@@ -137,7 +82,6 @@ jobs:
- name: Ensure maintenance branch release-X.Y - name: Ensure maintenance branch release-X.Y
uses: actions/github-script@v7 uses: actions/github-script@v7
with: with:
github-token: ${{ secrets.GH_PAT }}
script: | script: |
const tag = '${{ steps.get_tag.outputs.tag }}'; // e.g. v0.1.3 or v0.1.3-rc3 const tag = '${{ steps.get_tag.outputs.tag }}'; // e.g. v0.1.3 or v0.1.3-rc3
const match = tag.match(/^v(\d+)\.(\d+)\.\d+(?:[-\w\.]+)?$/); const match = tag.match(/^v(\d+)\.(\d+)\.\d+(?:[-\w\.]+)?$/);
@@ -147,45 +91,21 @@ jobs:
} }
const line = `${match[1]}.${match[2]}`; const line = `${match[1]}.${match[2]}`;
const branch = `release-${line}`; const branch = `release-${line}`;
// Get main branch commit for the tag
const ref = await github.rest.git.getRef({
owner: context.repo.owner,
repo: context.repo.repo,
ref: `tags/${tag}`
});
const commitSha = ref.data.object.sha;
try { try {
await github.rest.repos.getBranch({ await github.rest.repos.getBranch({
owner: context.repo.owner, owner: context.repo.owner,
repo: context.repo.repo, repo: context.repo.repo,
branch branch
}); });
console.log(`Branch '${branch}' already exists`);
await github.rest.git.updateRef({ } catch (_) {
await github.rest.git.createRef({
owner: context.repo.owner, owner: context.repo.owner,
repo: context.repo.repo, repo: context.repo.repo,
ref: `heads/${branch}`, ref: `refs/heads/${branch}`,
sha: commitSha, sha: context.sha
force: true
}); });
console.log(`🔁 Force-updated '${branch}' to ${commitSha}`); console.log(`✅ Branch '${branch}' created at ${context.sha}`);
} catch (err) {
if (err.status === 404) {
await github.rest.git.createRef({
owner: context.repo.owner,
repo: context.repo.repo,
ref: `refs/heads/${branch}`,
sha: commitSha
});
console.log(`✅ Created branch '${branch}' at ${commitSha}`);
} else {
console.error('Unexpected error --', err);
core.setFailed(`Unexpected error creating/updating branch: ${err.message}`);
throw err;
}
} }
# Get the latest published release # Get the latest published release
@@ -218,13 +138,13 @@ jobs:
uses: actions/github-script@v7 uses: actions/github-script@v7
with: with:
script: | script: |
const tag = '${{ steps.get_tag.outputs.tag }}'; // v0.31.5-rc.1 const tag = '${{ steps.get_tag.outputs.tag }}'; // v0.31.5-rc1
const m = tag.match(/^v(\d+\.\d+\.\d+)(-(?:alpha|beta|rc)\.\d+)?$/); const m = tag.match(/^v(\d+\.\d+\.\d+)(-rc\d+)?$/);
if (!m) { if (!m) {
core.setFailed(`❌ tag '${tag}' must match 'vX.Y.Z' or 'vX.Y.Z-(alpha|beta|rc).N'`); core.setFailed(`❌ tag '${tag}' must match 'vX.Y.Z' or 'vX.Y.Z-rcN'`);
return; return;
} }
const version = m[1] + (m[2] ?? ''); // 0.31.5-rc.1 const version = m[1] + (m[2] ?? ''); // 0.31.5rc1
const isRc = Boolean(m[2]); const isRc = Boolean(m[2]);
core.setOutput('is_rc', isRc); core.setOutput('is_rc', isRc);
const outdated = '${{ steps.semver.outputs.comparison-result }}' === '<'; const outdated = '${{ steps.semver.outputs.comparison-result }}' === '<';

View File

@@ -1,7 +1,7 @@
name: Pull Request name: Pull Request
on: on:
pull_request: pull_request_target:
types: [labeled, opened, synchronize, reopened] types: [labeled, opened, synchronize, reopened]
concurrency: concurrency:
@@ -9,8 +9,8 @@ concurrency:
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
build: e2e:
name: Build name: Build and Test
runs-on: [self-hosted] runs-on: [self-hosted]
permissions: permissions:
contents: read contents: read
@@ -21,14 +21,13 @@ jobs:
!contains(github.event.pull_request.labels.*.name, 'release') !contains(github.event.pull_request.labels.*.name, 'release')
steps: steps:
- name: Checkout code - name: Checkout code (PR branch)
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
repository: ${{ github.event.pull_request.head.repo.full_name }}
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0 fetch-depth: 0
fetch-tags: true fetch-tags: true
- name: Set up Docker config
run: cp -r ~/.docker ${{ runner.temp }}/.docker
- name: Login to GitHub Container Registry - name: Login to GitHub Container Registry
uses: docker/login-action@v3 uses: docker/login-action@v3
@@ -36,50 +35,9 @@ jobs:
username: ${{ github.repository_owner }} username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GITHUB_TOKEN }}
registry: ghcr.io registry: ghcr.io
env:
DOCKER_CONFIG: ${{ runner.temp }}/.docker
- name: Build - name: Build
run: make build run: make build
env:
DOCKER_CONFIG: ${{ runner.temp }}/.docker
- name: Build Talos image
run: make -C packages/core/installer talos-nocloud
- name: Upload installer
uses: actions/upload-artifact@v4
with:
name: cozystack-installer
path: _out/assets/cozystack-installer.yaml
- name: Upload Talos image
uses: actions/upload-artifact@v4
with:
name: talos-image
path: _out/assets/nocloud-amd64.raw.xz
test:
name: Test
runs-on: [self-hosted]
needs: build
# Never run when the PR carries the "release" label.
if: |
!contains(github.event.pull_request.labels.*.name, 'release')
steps:
- name: Download installer
uses: actions/download-artifact@v4
with:
name: cozystack-installer
path: _out/assets/
- name: Download Talos image
uses: actions/download-artifact@v4
with:
name: talos-image
path: _out/assets/
- name: Test - name: Test
run: make test run: make test

View File

@@ -3,10 +3,7 @@ name: Versioned Tag
on: on:
push: push:
tags: tags:
- 'v*.*.*' # vX.Y.Z - 'v*.*.*' # vX.Y.Z or vX.Y.Z-rcN
- 'v*.*.*-rc.*' # vX.Y.Z-rc.N
- 'v*.*.*-beta.*' # vX.Y.Z-beta.N
- 'v*.*.*-alpha.*' # vX.Y.Z-alpha.N
concurrency: concurrency:
group: tags-${{ github.workflow }}-${{ github.ref }} group: tags-${{ github.workflow }}-${{ github.ref }}
@@ -20,7 +17,6 @@ jobs:
contents: write contents: write
packages: write packages: write
pull-requests: write pull-requests: write
actions: write
steps: steps:
# Check if a non-draft release with this tag already exists # Check if a non-draft release with this tag already exists
@@ -43,28 +39,28 @@ jobs:
if: steps.check_release.outputs.skip == 'true' if: steps.check_release.outputs.skip == 'true'
run: echo "Release already exists, skipping workflow." run: echo "Release already exists, skipping workflow."
# Parse tag meta-data (rc?, maintenance line, etc.) # Parse tag metadata (rc?, maintenance line, etc.)
- name: Parse tag - name: Parse tag
if: steps.check_release.outputs.skip == 'false' if: steps.check_release.outputs.skip == 'false'
id: tag id: tag
uses: actions/github-script@v7 uses: actions/github-script@v7
with: with:
script: | script: |
const ref = context.ref.replace('refs/tags/', ''); // e.g. v0.31.5-rc.1 const ref = context.ref.replace('refs/tags/', ''); // e.g. v0.31.5-rc1
const m = ref.match(/^v(\d+\.\d+\.\d+)(-(?:alpha|beta|rc)\.\d+)?$/); // ['0.31.5', '-rc.1' | '-beta.1' | …] const m = ref.match(/^v(\d+\.\d+\.\d+)(-rc\d+)?$/);
if (!m) { if (!m) {
core.setFailed(`❌ tag '${ref}' must match 'vX.Y.Z' or 'vX.Y.Z-(alpha|beta|rc).N'`); core.setFailed(`❌ tag '${ref}' must match 'vX.Y.Z' or 'vX.Y.Z-rcN'`);
return; return;
} }
const version = m[1] + (m[2] ?? ''); // 0.31.5-rc.1 const version = m[1] + (m[2] ?? ''); // 0.31.5rc1
const isRc = Boolean(m[2]); const isRc = Boolean(m[2]);
const [maj, min] = m[1].split('.'); const [maj, min] = m[1].split('.');
core.setOutput('tag', ref); // v0.31.5-rc.1 core.setOutput('tag', ref);
core.setOutput('version', version); // 0.31.5-rc.1 core.setOutput('version', version);
core.setOutput('is_rc', isRc); // true core.setOutput('is_rc', isRc);
core.setOutput('line', `${maj}.${min}`); // 0.31 core.setOutput('line', `${maj}.${min}`); // 0.31
# Detect base branch (main or release-X.Y) the tag was pushed from # Detect base branch (main or releaseX.Y) the tag was pushed from
- name: Get base branch - name: Get base branch
if: steps.check_release.outputs.skip == 'false' if: steps.check_release.outputs.skip == 'false'
id: get_base id: get_base
@@ -99,15 +95,11 @@ jobs:
username: ${{ github.repository_owner }} username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GITHUB_TOKEN }}
registry: ghcr.io registry: ghcr.io
env:
DOCKER_CONFIG: ${{ runner.temp }}/.docker
# Build project artifacts # Build project artifacts
- name: Build - name: Build
if: steps.check_release.outputs.skip == 'false' if: steps.check_release.outputs.skip == 'false'
run: make build run: make build
env:
DOCKER_CONFIG: ${{ runner.temp }}/.docker
# Commit built artifacts # Commit built artifacts
- name: Commit release artifacts - name: Commit release artifacts
@@ -173,7 +165,7 @@ jobs:
}); });
console.log(`Draft release created for ${tag}`); console.log(`Draft release created for ${tag}`);
} else { } else {
console.log(`Re-using existing release ${tag}`); console.log(`Reusing existing release ${tag}`);
} }
core.setOutput('upload_url', rel.upload_url); core.setOutput('upload_url', rel.upload_url);
@@ -186,7 +178,7 @@ jobs:
env: env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# Create release-X.Y.Z branch and push (force-update) # Create releaseX.Y.Z branch and push (forceupdate)
- name: Create release branch - name: Create release branch
if: steps.check_release.outputs.skip == 'false' if: steps.check_release.outputs.skip == 'false'
run: | run: |

3
.gitignore vendored
View File

@@ -1,7 +1,6 @@
_out _out
.git .git
.idea .idea
.vscode
# User-specific stuff # User-specific stuff
.idea/**/workspace.xml .idea/**/workspace.xml
@@ -76,4 +75,4 @@ fabric.properties
.idea/caches/build_file_checksums.ser .idea/caches/build_file_checksums.ser
.DS_Store .DS_Store
**/.DS_Store **/.DS_Store

View File

@@ -18,7 +18,6 @@ repos:
(cd "$dir" && make generate) (cd "$dir" && make generate)
fi fi
done done
git diff --color=always | cat
' '
language: script language: script
files: ^.*$ files: ^.*$

View File

@@ -20,7 +20,6 @@ build: build-deps
make -C packages/system/kubeovn image make -C packages/system/kubeovn image
make -C packages/system/kubeovn-webhook image make -C packages/system/kubeovn-webhook image
make -C packages/system/dashboard image make -C packages/system/dashboard image
make -C packages/system/metallb image
make -C packages/system/kamaji image make -C packages/system/kamaji image
make -C packages/system/bucket image make -C packages/system/bucket image
make -C packages/core/testing image make -C packages/core/testing image
@@ -43,11 +42,12 @@ manifests:
(cd packages/core/installer/; helm template -n cozy-installer installer .) > _out/assets/cozystack-installer.yaml (cd packages/core/installer/; helm template -n cozy-installer installer .) > _out/assets/cozystack-installer.yaml
assets: assets:
make -C packages/core/installer assets make -C packages/core/installer/ assets
test: test:
make -C packages/core/testing apply make -C packages/core/testing apply
make -C packages/core/testing test make -C packages/core/testing test
#make -C packages/core/testing test-applications
generate: generate:
hack/update-codegen.sh hack/update-codegen.sh

View File

@@ -39,8 +39,6 @@ import (
cozystackiov1alpha1 "github.com/cozystack/cozystack/api/v1alpha1" cozystackiov1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
"github.com/cozystack/cozystack/internal/controller" "github.com/cozystack/cozystack/internal/controller"
"github.com/cozystack/cozystack/internal/telemetry" "github.com/cozystack/cozystack/internal/telemetry"
helmv2 "github.com/fluxcd/helm-controller/api/v2"
// +kubebuilder:scaffold:imports // +kubebuilder:scaffold:imports
) )
@@ -53,7 +51,6 @@ func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(cozystackiov1alpha1.AddToScheme(scheme)) utilruntime.Must(cozystackiov1alpha1.AddToScheme(scheme))
utilruntime.Must(helmv2.AddToScheme(scheme))
// +kubebuilder:scaffold:scheme // +kubebuilder:scaffold:scheme
} }
@@ -185,14 +182,6 @@ func main() {
if err = (&controller.WorkloadReconciler{ if err = (&controller.WorkloadReconciler{
Client: mgr.GetClient(), Client: mgr.GetClient(),
Scheme: mgr.GetScheme(), Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "WorkloadReconciler")
os.Exit(1)
}
if err = (&controller.TenantHelmReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil { }).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Workload") setupLog.Error(err, "unable to create controller", "controller", "Workload")
os.Exit(1) os.Exit(1)

View File

@@ -1,243 +0,0 @@
Cozystack v0.31.0 is a significant release that brings new features, key fixes, and updates to underlying components.
This version enhances GPU support, improves many components of Cozystack, and introduces a more robust release process to improve stability.
Below, we'll go over the highlights in each area for current users, developers, and our community.
## Major Features and Improvements
### GPU support for tenant Kubernetes clusters
Cozystack now integrates NVIDIA GPU Operator support for tenant Kubernetes clusters.
This enables platform users to run GPU-powered AI/ML applications in their own clusters.
To enable GPU Operator, set `addons.gpuOperator.enabled: true` in the cluster configuration.
(@kvaps in https://github.com/cozystack/cozystack/pull/834)
Check out Andrei Kvapil's CNCF webinar [showcasing the GPU support by running Stable Diffusion in Cozystack](https://www.youtube.com/watch?v=S__h_QaoYEk).
<!--
* [kubernetes] Introduce GPU support for tenant Kubernetes clusters. (@kvaps in https://github.com/cozystack/cozystack/pull/834)
-->
### Cilium Improvements
Cozystacks Cilium integration received two significant enhancements.
First, Gateway API support in Cilium is now enabled, allowing advanced L4/L7 routing features via Kubernetes Gateway API.
We thank Zdenek Janda @zdenekjanda for contributing this feature in https://github.com/cozystack/cozystack/pull/924.
Second, Cozystack now permits custom user-provided parameters in the tenant clusters Cilium configuration.
(@lllamnyp in https://github.com/cozystack/cozystack/pull/917)
<!--
* [cilium] Enable Cilium Gateway API. (@zdenekjanda in https://github.com/cozystack/cozystack/pull/924)
* [cilium] Enable user-added parameters in a tenant cluster Cilium. (@lllamnyp in https://github.com/cozystack/cozystack/pull/917)
-->
### Cross-Architecture Builds (ARM Support Beta)
Cozystack's build system was refactored to support multi-architecture binaries and container images.
This paves the road to running Cozystack on ARM64 servers.
Changes include Makefile improvements (https://github.com/cozystack/cozystack/pull/907)
and multi-arch Docker image builds (https://github.com/cozystack/cozystack/pull/932 and https://github.com/cozystack/cozystack/pull/970).
We thank Nikita Bykov @nbykov0 for his ongoing work on ARM support!
<!--
* Introduce support for cross-architecture builds and Cozystack on ARM:
* [build] Refactor Makefiles introducing build variables. (@nbykov0 in https://github.com/cozystack/cozystack/pull/907)
* [build] Add support for multi-architecture and cross-platform image builds. (@nbykov0 in https://github.com/cozystack/cozystack/pull/932 and https://github.com/cozystack/cozystack/pull/970)
-->
### VerticalPodAutoscaler (VPA) Expansion
The VerticalPodAutoscaler is now enabled for more Cozystack components to automate resource tuning.
Specifically, VPA was added for tenant Kubernetes control planes (@klinch0 in https://github.com/cozystack/cozystack/pull/806),
the Cozystack Dashboard (https://github.com/cozystack/cozystack/pull/828),
and the Cozystack etcd-operator (https://github.com/cozystack/cozystack/pull/850).
All Cozystack components that have VPA enabled can automatically adjust their CPU and memory requests based on usage, improving platform and application stability.
<!--
* Add VerticalPodAutoscaler to a few more components:
* [kubernetes] Kubernetes clusters in user tenants. (@klinch0 in https://github.com/cozystack/cozystack/pull/806)
* [platform] Cozystack dashboard. (@klinch0 in https://github.com/cozystack/cozystack/pull/828)
* [platform] Cozystack etcd-operator (@klinch0 in https://github.com/cozystack/cozystack/pull/850)
-->
### Tenant HelmRelease Reconcile Controller
A new controller was introduced to monitor and synchronize HelmRelease resources across tenants.
This controller propagates configuration changes to tenant workloads and ensures that any HelmRelease defined in a tenant
stays in sync with platform updates.
It improves the reliability of deploying managed applications in Cozystack.
(@klinch0 in https://github.com/cozystack/cozystack/pull/870)
<!--
* [platform] Introduce a new controller to synchronize tenant HelmReleases and propagate configuration changes. (@klinch0 in https://github.com/cozystack/cozystack/pull/870)
-->
### Virtual Machine Improvements
**Configurable KubeVirt CPU Overcommit**: The CPU allocation ratio in KubeVirt (how virtual CPUs are overcommitted relative to physical) is now configurable
via the `cpu-allocation-ratio` value in the Cozystack configmap.
This means Cozystack administrators can now tune CPU overcommitment for VMs to balance performance vs. density.
(@lllamnyp in https://github.com/cozystack/cozystack/pull/905)
**KubeVirt VM Export**: Cozystack now allows exporting KubeVirt virtual machines.
This feature, enabled via KubeVirt's `VirtualMachineExport` capability, lets users snapshot or back up VM images.
(@kvaps in https://github.com/cozystack/cozystack/pull/808)
**Support for various storage classes in Virtual Machines**: The `virtual-machine` application (since version 0.9.2) lets you pick any StorageClass for a VM's
system disk instead of relying on a hard-coded PVC.
Refer to values `systemDisk.storage` and `systemDisk.storageClass` in the [application's configs](https://cozystack.io/docs/reference/applications/virtual-machine/#common-parameters).
(@kvaps in https://github.com/cozystack/cozystack/pull/974)
<!--
* [kubevirt] Enable exporting VMs. (@kvaps in https://github.com/cozystack/cozystack/pull/808)
* [kubevirt] Make KubeVirt's CPU allocation ratio configurable. (@lllamnyp in https://github.com/cozystack/cozystack/pull/905)
* [virtual-machine] Add support for various storages. (@kvaps in https://github.com/cozystack/cozystack/pull/974)
-->
### Other Features and Improvements
* [platform] Introduce options `expose-services`, `expose-ingress`, and `expose-external-ips` to the ingress service. (@kvaps in https://github.com/cozystack/cozystack/pull/929)
* [cozystack-controller] Record the IP address pool and storage class in Workload objects. (@lllamnyp in https://github.com/cozystack/cozystack/pull/831)
* [apps] Remove user-facing config of limits and requests. (@lllamnyp in https://github.com/cozystack/cozystack/pull/935)
## New Release Lifecycle
Cozystack release lifecycle is changing to provide a more stable and predictable lifecycle to customers running Cozystack in mission-critical environments.
* **Gradual Release with Alpha, Beta, and Release Candidates**: Cozystack will now publish pre-release versions (alpha, beta, release candidates) before a stable release.
Starting with v0.31.0, the team made three release candidates before releasing version v0.31.0.
This allows more testing and feedback before marking a release as stable.
* **Prolonged Release Support with Patch Versions**: After the initial `vX.Y.0` release, a long-lived branch `release-X.Y` will be created to backport fixes.
For example, with 0.31.0s release, a `release-0.31` branch will track patch fixes (`0.31.x`).
This strategy lets Cozystack users receive timely patch releases and updates with minimal risks.
To implement these new changes, we have rebuilt our CI/CD workflows and introduced automation, enabling automatic backports.
You can read more about how it's implemented in the Development section below.
For more information, read the [Cozystack Release Workflow](https://github.com/cozystack/cozystack/blob/main/docs/release.md) documentation.
## Fixes
* [virtual-machine] Add GPU names to the virtual machine specifications. (@kvaps in https://github.com/cozystack/cozystack/pull/862)
* [virtual-machine] Count Workload resources for pods by requests, not limits. Other improvements to VM resource tracking. (@lllamnyp in https://github.com/cozystack/cozystack/pull/904)
* [virtual-machine] Set PortList method by default. (@kvaps in https://github.com/cozystack/cozystack/pull/996)
* [virtual-machine] Specify ports even for wholeIP mode. (@kvaps in https://github.com/cozystack/cozystack/pull/1000)
* [platform] Fix installing HelmReleases on initial setup. (@kvaps in https://github.com/cozystack/cozystack/pull/833)
* [platform] Migration scripts update Kubernetes ConfigMap with the current stack version for improved version tracking. (@klinch0 in https://github.com/cozystack/cozystack/pull/840)
* [platform] Reduce requested CPU and RAM for the `kamaji` provider. (@klinch0 in https://github.com/cozystack/cozystack/pull/825)
* [platform] Improve the reconciliation loop for the Cozystack system HelmReleases logic. (@klinch0 in https://github.com/cozystack/cozystack/pull/809 and https://github.com/cozystack/cozystack/pull/810, @kvaps in https://github.com/cozystack/cozystack/pull/811)
* [platform] Remove extra dependencies for the Piraeus operator. (@klinch0 in https://github.com/cozystack/cozystack/pull/856)
* [platform] Refactor dashboard values. (@kvaps in https://github.com/cozystack/cozystack/pull/928, patched by @llamnyp in https://github.com/cozystack/cozystack/pull/952)
* [platform] Make FluxCD artifact disabled by default. (@klinch0 in https://github.com/cozystack/cozystack/pull/964)
* [kubernetes] Update garbage collection of HelmReleases in tenant Kubernetes clusters. (@kvaps in https://github.com/cozystack/cozystack/pull/835)
* [kubernetes] Fix merging `valuesOverride` for tenant clusters. (@kvaps in https://github.com/cozystack/cozystack/pull/879)
* [kubernetes] Fix `ubuntu-container-disk` tag. (@kvaps in https://github.com/cozystack/cozystack/pull/887)
* [kubernetes] Refactor Helm manifests for tenant Kubernetes clusters. (@kvaps in https://github.com/cozystack/cozystack/pull/866)
* [kubernetes] Fix Ingress-NGINX depends on Cert-Manager. (@kvaps in https://github.com/cozystack/cozystack/pull/976)
* [kubernetes, apps] Enable `topologySpreadConstraints` for tenant Kubernetes clusters and fix it for managed PostgreSQL. (@klinch0 in https://github.com/cozystack/cozystack/pull/995)
* [tenant] Fix an issue with accessing external IPs of a cluster from the cluster itself. (@kvaps in https://github.com/cozystack/cozystack/pull/854)
* [cluster-api] Remove the no longer necessary workaround for Kamaji. (@kvaps in https://github.com/cozystack/cozystack/pull/867, patched in https://github.com/cozystack/cozystack/pull/956)
* [monitoring] Remove legacy label "POD" from the exclude filter in metrics. (@xy2 in https://github.com/cozystack/cozystack/pull/826)
* [monitoring] Refactor management etcd monitoring config. Introduce a migration script for updating monitoring resources (`kube-rbac-proxy` daemonset). (@lllamnyp in https://github.com/cozystack/cozystack/pull/799 and https://github.com/cozystack/cozystack/pull/830)
* [monitoring] Fix VerticalPodAutoscaler resource allocation for VMagent. (@klinch0 in https://github.com/cozystack/cozystack/pull/820)
* [postgres] Remove duplicated `template` entry from backup manifest. (@etoshutka in https://github.com/cozystack/cozystack/pull/872)
* [kube-ovn] Fix versions mapping in Makefile. (@kvaps in https://github.com/cozystack/cozystack/pull/883)
* [dx] Automatically detect version for migrations in the installer.sh. (@kvaps in https://github.com/cozystack/cozystack/pull/837)
* [dx] remove version_map and building for library charts. (@kvaps in https://github.com/cozystack/cozystack/pull/998)
* [docs] Review the tenant Kubernetes cluster docs. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/969)
* [docs] Explain that tenants cannot have dashes in their names. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/980)
## Dependencies
* MetalLB images are now built in-tree based on version 0.14.9 with additional critical patches. (@lllamnyp in https://github.com/cozystack/cozystack/pull/945)
* Update Kubernetes to v1.32.4. (@kvaps in https://github.com/cozystack/cozystack/pull/949)
* Update Talos Linux to v1.10.1. (@kvaps in https://github.com/cozystack/cozystack/pull/931)
* Update Cilium to v1.17.3. (@kvaps in https://github.com/cozystack/cozystack/pull/848)
* Update LINSTOR to v1.31.0. (@kvaps in https://github.com/cozystack/cozystack/pull/846)
* Update Kube-OVN to v1.13.11. (@kvaps in https://github.com/cozystack/cozystack/pull/847, @lllamnyp in https://github.com/cozystack/cozystack/pull/922)
* Update tenant Kubernetes to v1.32. (@kvaps in https://github.com/cozystack/cozystack/pull/871)
* Update flux-operator to 0.20.0. (@kingdonb in https://github.com/cozystack/cozystack/pull/880 and https://github.com/cozystack/cozystack/pull/934)
* Update multiple Cluster API components. (@kvaps in https://github.com/cozystack/cozystack/pull/867 and https://github.com/cozystack/cozystack/pull/947)
* Update KamajiControlPlane to edge-25.4.1. (@kvaps in https://github.com/cozystack/cozystack/pull/953, fixed by @nbykov0 in https://github.com/cozystack/cozystack/pull/983)
* Update cert-manager to v1.17.2. (@kvaps in https://github.com/cozystack/cozystack/pull/975)
## Documentation
* [Installing Talos in Air-Gapped Environment](https://cozystack.io/docs/operations/talos/configuration/air-gapped/):
new guide for configuring and bootstrapping Talos Linux clusters in air-gapped environments.
(@klinch0 in https://github.com/cozystack/website/pull/203)
* [Cozystack Bundles](https://cozystack.io/docs/guides/bundles/): new page in the learning section explaining how Cozystack bundles work and how to choose a bundle.
(@NickVolynkin in https://github.com/cozystack/website/pull/188, https://github.com/cozystack/website/pull/189, and others;
updated by @kvaps in https://github.com/cozystack/website/pull/192 and https://github.com/cozystack/website/pull/193)
* [Managed Application Reference](https://cozystack.io/docs/reference/applications/): A set of new pages in the docs, mirroring application docs from the Cozystack dashboard.
(@NickVolynkin in https://github.com/cozystack/website/pull/198, https://github.com/cozystack/website/pull/202, and https://github.com/cozystack/website/pull/204)
* **LINSTOR Networking**: Guides on [configuring dedicated network for LINSTOR](https://cozystack.io/docs/operations/storage/dedicated-network/)
and [configuring network for distributed storage in multi-datacenter setup](https://cozystack.io/docs/operations/stretched/linstor-dedicated-network/).
(@xy2, edited by @NickVolynkin in https://github.com/cozystack/website/pull/171, https://github.com/cozystack/website/pull/182, and https://github.com/cozystack/website/pull/184)
### Fixes
* Correct error in the doc for the command to edit the configmap. (@lb0o in https://github.com/cozystack/website/pull/207)
* Fix group name in OIDC docs (@kingdonb in https://github.com/cozystack/website/pull/179)
* A bit more explanation of Docker buildx builders. (@nbykov0 in https://github.com/cozystack/website/pull/187)
## Development, Testing, and CI/CD
### Testing
Improvements:
* Introduce `cozytest` — a new [BATS-based](https://github.com/bats-core/bats-core) testing framework. (@kvaps in https://github.com/cozystack/cozystack/pull/982)
Fixes:
* Fix `device_ownership_from_security_context` CRI. (@dtrdnk in https://github.com/cozystack/cozystack/pull/896)
* Increase timeout durations for `capi` and `keycloak` to improve reliability during e2e-tests. (@kvaps in https://github.com/cozystack/cozystack/pull/858)
* Return `genisoimage` to the e2e-test Dockerfile (@gwynbleidd2106 in https://github.com/cozystack/cozystack/pull/962)
### CI/CD Changes
Improvements:
* Use release branches `release-X.Y` for gathering and releasing fixes after initial `vX.Y.0` release. (@kvaps in https://github.com/cozystack/cozystack/pull/816)
* Automatically create release branches after initial `vX.Y.0` release is published. (@kvaps in https://github.com/cozystack/cozystack/pull/886)
* Introduce Release Candidate versions. Automate patch backporting by applying patches from pull requests labeled `[backport]` to the current release branch. (@kvaps in https://github.com/cozystack/cozystack/pull/841 and https://github.com/cozystack/cozystack/pull/901, @nickvolynkin in https://github.com/cozystack/cozystack/pull/890)
* Support alpha and beta pre-releases. (@kvaps in https://github.com/cozystack/cozystack/pull/978)
* Commit changes in release pipelines under `github-actions <github-actions@github.com>`. (@kvaps in https://github.com/cozystack/cozystack/pull/823)
* Describe the Cozystack release workflow. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/817 and https://github.com/cozystack/cozystack/pull/897)
Fixes:
* Improve the check for `versions_map` running on pull requests. (@kvaps and @klinch0 in https://github.com/cozystack/cozystack/pull/836, https://github.com/cozystack/cozystack/pull/842, and https://github.com/cozystack/cozystack/pull/845)
* If the release step was skipped on a tag, skip tests as well. (@kvaps in https://github.com/cozystack/cozystack/pull/822)
* Allow CI to cancel the previous job if a new one is scheduled. (@kvaps in https://github.com/cozystack/cozystack/pull/873)
* Use the correct version name when uploading build assets to the release page. (@kvaps in https://github.com/cozystack/cozystack/pull/876)
* Stop using `ok-to-test` label to trigger CI in pull requests. (@kvaps in https://github.com/cozystack/cozystack/pull/875)
* Do not run tests in the release building pipeline. (@kvaps in https://github.com/cozystack/cozystack/pull/882)
* Fix release branch creation. (@kvaps in https://github.com/cozystack/cozystack/pull/884)
* Reduce noise in the test logs by suppressing the `wget` progress bar. (@lllamnyp in https://github.com/cozystack/cozystack/pull/865)
* Revert "automatically trigger tests in releasing PR". (@kvaps in https://github.com/cozystack/cozystack/pull/900)
* Force-update release branch on tagged main commits. (@kvaps in https://github.com/cozystack/cozystack/pull/977)
* Show detailed errors in the `pull-request-release` workflow. (@lllamnyp in https://github.com/cozystack/cozystack/pull/992)
## Community and Maintenance
### Repository Maintenance
Added @klinch0 to CODEOWNERS. (@kvaps in https://github.com/cozystack/cozystack/pull/838)
### New Contributors
* @etoshutka made their first contribution in https://github.com/cozystack/cozystack/pull/872
* @dtrdnk made their first contribution in https://github.com/cozystack/cozystack/pull/896
* @zdenekjanda made their first contribution in https://github.com/cozystack/cozystack/pull/924
* @gwynbleidd2106 made their first contribution in https://github.com/cozystack/cozystack/pull/962
## Full Changelog
See https://github.com/cozystack/cozystack/compare/v0.30.0...v0.31.0

View File

@@ -1,37 +1,10 @@
# Release Workflow # Release Workflow
This document describes Cozystacks release process. This section explains how Cozystack builds and releases are made.
## Introduction
Cozystack uses a staged release process to ensure stability and flexibility during development.
There are three types of releases:
- **Release Candidates (RC)** Preview versions (e.g., `v0.42.0-rc.1`) used for final testing and validation.
- **Regular Releases** Final versions (e.g., `v0.42.0`) that are feature-complete and thoroughly tested.
- **Patch Releases** Bugfix-only updates (e.g., `v0.42.1`) made after a stable release, based on a dedicated release branch.
Each type plays a distinct role in delivering reliable and tested updates while allowing ongoing development to continue smoothly.
## Release Candidates
Release candidates are Cozystack versions that introduce new features and are published before a stable release.
Their purpose is to help validate stability before finalizing a new feature release.
They allow for final rounds of testing and bug fixes without freezing development.
Release candidates are given numbers `vX.Y.0-rc.N`, for example, `v0.42.0-rc.1`.
They are created directly in the `main` branch.
An RC is typically tagged when all major features for the upcoming release have been merged into main and the release enters its testing phase.
However, new features and changes can still be added before the regular release `vX.Y.0`.
Each RC contributes to a cumulative set of release notes that will be finalized when `vX.Y.0` is released.
After testing, if no critical issues remain, the regular release (`vX.Y.0`) is tagged from the last RC or a later commit in main.
This begins the regular release process, creates a dedicated `release-X.Y` branch, and opens the way for patch releases.
## Regular Releases ## Regular Releases
When making a regular release, we tag the latest RC or a subsequent minimal-change commit as `vX.Y.0`. When making regular releases, we take a commit in `main` and decide to make it a release `x.y.0`.
In this explanation, we'll use version `v0.42.0` as an example: In this explanation, we'll use version `v0.42.0` as an example:
```mermaid ```mermaid

View File

@@ -1,117 +0,0 @@
#!/bin/sh
###############################################################################
# cozytest.sh - Bats-compatible test runner with live trace and enhanced #
# output, written in pure shell #
###############################################################################
set -eu
TEST_FILE=${1:?Usage: ./cozytest.sh <file.bats> [pattern]}
PATTERN=${2:-*}
LINE='----------------------------------------------------------------'
cols() { stty size 2>/dev/null | awk '{print $2}' || echo 80; }
MAXW=$(( $(cols) - 12 )); [ "$MAXW" -lt 40 ] && MAXW=70
BEGIN=$(date +%s)
timestamp() { s=$(( $(date +%s) - BEGIN )); printf '[%02d:%02d]' $((s/60)) $((s%60)); }
###############################################################################
# run_one <fn> <title> #
###############################################################################
run_one() {
fn=$1 title=$2
tmp=$(mktemp -d) || { echo "Failed to create temp directory" >&2; exit 1; }
log="$tmp/log"
echo "╭ » Run test: $title"
START=$(date +%s)
skip_next="+ $fn" # первую строку трассировки с именем функции пропустим
{
(
PS4='+ ' # prefix for set -x
set -eu -x # strict + trace
"$fn"
)
printf '__RC__%s\n' "$?"
} 2>&1 | tee "$log" | while IFS= read -r line; do
case "$line" in
'__RC__'*) : ;;
'+ '*) cmd=${line#'+ '}
[ "$cmd" = "${skip_next#+ }" ] && continue
case "$cmd" in
'set -e'|'set -x'|'set -u'|'return 0') continue ;;
esac
out=$cmd ;;
*) out=$line ;;
esac
now=$(( $(date +%s) - START ))
[ ${#out} -gt "$MAXW" ] && out="$(printf '%.*s…' "$MAXW" "$out")"
printf '┊[%02d:%02d] %s\n' $((now/60)) $((now%60)) "$out"
done
rc=$(awk '/^__RC__/ {print substr($0,7)}' "$log" | tail -n1)
[ -z "$rc" ] && rc=1
now=$(( $(date +%s) - START ))
if [ "$rc" -eq 0 ]; then
printf '╰[%02d:%02d] ✅ Test OK: %s\n' $((now/60)) $((now%60)) "$title"
else
printf '╰[%02d:%02d] ❌ Test failed: %s (exit %s)\n' \
$((now/60)) $((now%60)) "$title" "$rc"
echo "----- captured output -----------------------------------------"
grep -v '^__RC__' "$log"
echo "$LINE"
exit "$rc"
fi
rm -rf "$tmp"
}
###############################################################################
# convert .bats -> shell-functions #
###############################################################################
TMP_SH=$(mktemp) || { echo "Failed to create temp file" >&2; exit 1; }
trap 'rm -f "$TMP_SH"' EXIT
awk '
/^@test[[:space:]]+"/ {
line = substr($0, index($0, "\"") + 1)
title = substr(line, 1, index(line, "\"") - 1)
fname = "test_"
for (i = 1; i <= length(title); i++) {
c = substr(title, i, 1)
fname = fname (c ~ /[A-Za-z0-9]/ ? c : "_")
}
printf("### %s\n", title)
printf("%s() {\n", fname)
print " set -e" # ошибка → падение теста
next
}
/^}$/ {
print " return 0" # если автор не сделал exit 1 — тест ОК
print "}"
next
}
{ print }
' "$TEST_FILE" > "$TMP_SH"
[ -f "$TMP_SH" ] || { echo "Failed to generate test functions" >&2; exit 1; }
# shellcheck disable=SC1090
. "$TMP_SH"
###############################################################################
# run selected tests #
###############################################################################
awk -v pat="$PATTERN" '
/^### / {
title = substr($0, 5)
name = "test_"
for (i = 1; i <= length(title); i++) {
c = substr(title, i, 1)
name = name (c ~ /[A-Za-z0-9]/ ? c : "_")
}
if (pat == "*" || index(title, pat) > 0)
printf("%s %s\n", name, title)
}
' "$TMP_SH" | while IFS=' ' read -r fn title; do
run_one "$fn" "$title"
done

View File

@@ -1,94 +0,0 @@
#!/usr/bin/env bats
# -----------------------------------------------------------------------------
# Cozystack endtoend provisioning test (Bats)
# -----------------------------------------------------------------------------
@test "Create tenant with isolated mode enabled" {
kubectl create -f - <<EOF
apiVersion: apps.cozystack.io/v1alpha1
kind: Tenant
metadata:
name: test
namespace: tenant-root
spec:
etcd: false
host: ""
ingress: false
isolated: true
monitoring: false
resourceQuotas: {}
seaweedfs: false
EOF
kubectl wait hr/tenant-test -n tenant-root --timeout=1m --for=condition=ready
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
}
@test "Create a tenant Kubernetes control plane" {
kubectl create -f - <<EOF
apiVersion: apps.cozystack.io/v1alpha1
kind: Kubernetes
metadata:
name: test
namespace: tenant-test
spec:
addons:
certManager:
enabled: false
valuesOverride: {}
cilium:
valuesOverride: {}
fluxcd:
enabled: false
valuesOverride: {}
gatewayAPI:
enabled: false
gpuOperator:
enabled: false
valuesOverride: {}
ingressNginx:
enabled: true
hosts: []
valuesOverride: {}
monitoringAgents:
enabled: false
valuesOverride: {}
verticalPodAutoscaler:
valuesOverride: {}
controlPlane:
apiServer:
resources: {}
resourcesPreset: small
controllerManager:
resources: {}
resourcesPreset: micro
konnectivity:
server:
resources: {}
resourcesPreset: micro
replicas: 2
scheduler:
resources: {}
resourcesPreset: micro
host: ""
nodeGroups:
md0:
ephemeralStorage: 20Gi
gpus: []
instanceType: u1.medium
maxReplicas: 10
minReplicas: 0
resources:
cpu: ""
memory: ""
roles:
- ingress-nginx
storageClass: replicated
EOF
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
timeout 10 sh -ec 'until kubectl get kamajicontrolplane -n tenant-test kubernetes-test; do sleep 1; done'
kubectl wait --for=condition=TenantControlPlaneCreated kamajicontrolplane -n tenant-test kubernetes-test --timeout=4m
kubectl wait tcp -n tenant-test kubernetes-test --timeout=2m --for=jsonpath='{.status.kubernetesResources.version.status}'=Ready
kubectl wait deploy --timeout=4m --for=condition=available -n tenant-test kubernetes-test kubernetes-test-cluster-autoscaler kubernetes-test-kccm kubernetes-test-kcsi-controller
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=1m --for=jsonpath='{.status.replicas}'=2
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=5m --for=jsonpath='{.status.v1beta2.readyReplicas}'=2
}

View File

@@ -1,391 +0,0 @@
#!/usr/bin/env bats
# -----------------------------------------------------------------------------
# Cozystack endtoend provisioning test (Bats)
# -----------------------------------------------------------------------------
@test "Required installer assets exist" {
if [ ! -f _out/assets/cozystack-installer.yaml ]; then
echo "Missing: _out/assets/cozystack-installer.yaml" >&2
exit 1
fi
if [ ! -f _out/assets/nocloud-amd64.raw.xz ]; then
echo "Missing: _out/assets/nocloud-amd64.raw.xz" >&2
exit 1
fi
}
@test "IPv4 forwarding is enabled" {
if [ "$(cat /proc/sys/net/ipv4/ip_forward)" != 1 ]; then
echo "IPv4 forwarding is disabled!" >&2
echo >&2
echo "Enable it with:" >&2
echo " echo 1 > /proc/sys/net/ipv4/ip_forward" >&2
exit 1
fi
}
@test "Clean previous VMs" {
kill $(cat srv1/qemu.pid srv2/qemu.pid srv3/qemu.pid 2>/dev/null) 2>/dev/null || true
rm -rf srv1 srv2 srv3
}
@test "Prepare networking and masquerading" {
ip link del cozy-br0 2>/dev/null || true
ip link add cozy-br0 type bridge
ip link set cozy-br0 up
ip address add 192.168.123.1/24 dev cozy-br0
# Masquerading rule idempotent (delete first, then add)
iptables -t nat -D POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE 2>/dev/null || true
iptables -t nat -A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
}
@test "Prepare cloudinit drive for VMs" {
mkdir -p srv1 srv2 srv3
# Generate cloudinit ISOs
for i in 1 2 3; do
echo "hostname: srv${i}" > "srv${i}/meta-data"
cat > "srv${i}/user-data" <<'EOF'
#cloud-config
EOF
cat > "srv${i}/network-config" <<EOF
version: 2
ethernets:
eth0:
dhcp4: false
addresses:
- "192.168.123.1${i}/26"
gateway4: "192.168.123.1"
nameservers:
search: [cluster.local]
addresses: [8.8.8.8]
EOF
( cd "srv${i}" && genisoimage \
-output seed.img \
-volid cidata -rational-rock -joliet \
user-data meta-data network-config )
done
}
@test "Use Talos NoCloud image from assets" {
if [ ! -f _out/assets/nocloud-amd64.raw.xz ]; then
echo "Missing _out/assets/nocloud-amd64.raw.xz" 2>&1
exit 1
fi
rm -f nocloud-amd64.raw
cp _out/assets/nocloud-amd64.raw.xz .
xz --decompress nocloud-amd64.raw.xz
}
@test "Prepare VM disks" {
for i in 1 2 3; do
cp nocloud-amd64.raw srv${i}/system.img
qemu-img resize srv${i}/system.img 50G
qemu-img create srv${i}/data.img 100G
done
}
@test "Create tap devices" {
for i in 1 2 3; do
ip link del cozy-srv${i} 2>/dev/null || true
ip tuntap add dev cozy-srv${i} mode tap
ip link set cozy-srv${i} up
ip link set cozy-srv${i} master cozy-br0
done
}
@test "Boot QEMU VMs" {
for i in 1 2 3; do
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 8 -m 16384 \
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5${i} \
-netdev tap,id=net0,ifname=cozy-srv${i},script=no,downscript=no \
-drive file=srv${i}/system.img,if=virtio,format=raw \
-drive file=srv${i}/seed.img,if=virtio,format=raw \
-drive file=srv${i}/data.img,if=virtio,format=raw \
-display none -daemonize -pidfile srv${i}/qemu.pid
done
# Give qemu a few seconds to start up networking
sleep 5
}
@test "Wait until Talos API port 50000 is reachable on all machines" {
timeout 60 sh -ec 'until nc -nz 192.168.123.11 50000 && nc -nz 192.168.123.12 50000 && nc -nz 192.168.123.13 50000; do sleep 1; done'
}
@test "Generate Talos cluster configuration" {
# Clusterwide patches
cat > patch.yaml <<'EOF'
machine:
kubelet:
nodeIP:
validSubnets:
- 192.168.123.0/24
extraConfig:
maxPods: 512
kernel:
modules:
- name: openvswitch
- name: drbd
parameters:
- usermode_helper=disabled
- name: zfs
- name: spl
registries:
mirrors:
docker.io:
endpoints:
- https://mirror.gcr.io
files:
- content: |
[plugins]
[plugins."io.containerd.cri.v1.runtime"]
device_ownership_from_security_context = true
path: /etc/cri/conf.d/20-customization.part
op: create
cluster:
apiServer:
extraArgs:
oidc-issuer-url: "https://keycloak.example.org/realms/cozy"
oidc-client-id: "kubernetes"
oidc-username-claim: "preferred_username"
oidc-groups-claim: "groups"
network:
cni:
name: none
dnsDomain: cozy.local
podSubnets:
- 10.244.0.0/16
serviceSubnets:
- 10.96.0.0/16
EOF
# Controlplaneonly patches
cat > patch-controlplane.yaml <<'EOF'
machine:
nodeLabels:
node.kubernetes.io/exclude-from-external-load-balancers:
$patch: delete
network:
interfaces:
- interface: eth0
vip:
ip: 192.168.123.10
cluster:
allowSchedulingOnControlPlanes: true
controllerManager:
extraArgs:
bind-address: 0.0.0.0
scheduler:
extraArgs:
bind-address: 0.0.0.0
apiServer:
certSANs:
- 127.0.0.1
proxy:
disabled: true
discovery:
enabled: false
etcd:
advertisedSubnets:
- 192.168.123.0/24
EOF
# Generate secrets once
if [ ! -f secrets.yaml ]; then
talosctl gen secrets
fi
rm -f controlplane.yaml worker.yaml talosconfig kubeconfig
talosctl gen config --with-secrets secrets.yaml cozystack https://192.168.123.10:6443 \
--config-patch=@patch.yaml --config-patch-control-plane @patch-controlplane.yaml
}
@test "Apply Talos configuration to the node" {
# Apply the configuration to all three nodes
for node in 11 12 13; do
talosctl apply -f controlplane.yaml -n 192.168.123.${node} -e 192.168.123.${node} -i
done
# Wait for Talos services to come up again
timeout 60 sh -ec 'until nc -nz 192.168.123.11 50000 && nc -nz 192.168.123.12 50000 && nc -nz 192.168.123.13 50000; do sleep 1; done'
}
@test "Bootstrap Talos cluster" {
# Bootstrap etcd on the first node
timeout 10 sh -ec 'until talosctl bootstrap -n 192.168.123.11 -e 192.168.123.11; do sleep 1; done'
# Wait until etcd is healthy
timeout 180 sh -ec 'until talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 >/dev/null 2>&1; do sleep 1; done'
timeout 60 sh -ec 'while talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1 | grep -q "rpc error"; do sleep 1; done'
# Retrieve kubeconfig
rm -f kubeconfig
talosctl kubeconfig kubeconfig -e 192.168.123.10 -n 192.168.123.10
# Wait until all three nodes register in Kubernetes
timeout 60 sh -ec 'until [ $(kubectl get node --no-headers | wc -l) -eq 3 ]; do sleep 1; done'
}
@test "Install Cozystack" {
# Create namespace & configmap required by installer
kubectl create namespace cozy-system --dry-run=client -o yaml | kubectl apply -f -
kubectl create configmap cozystack -n cozy-system \
--from-literal=bundle-name=paas-full \
--from-literal=ipv4-pod-cidr=10.244.0.0/16 \
--from-literal=ipv4-pod-gateway=10.244.0.1 \
--from-literal=ipv4-svc-cidr=10.96.0.0/16 \
--from-literal=ipv4-join-cidr=100.64.0.0/16 \
--from-literal=root-host=example.org \
--from-literal=api-server-endpoint=https://192.168.123.10:6443 \
--dry-run=client -o yaml | kubectl apply -f -
# Apply installer manifests from file
kubectl apply -f _out/assets/cozystack-installer.yaml
# Wait for the installer deployment to become available
kubectl wait deployment/cozystack -n cozy-system --timeout=1m --for=condition=Available
# Wait until HelmReleases appear & reconcile them
timeout 60 sh -ec 'until kubectl get hr -A | grep -q cozys; do sleep 1; done'
sleep 5
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n "$1" hr/"$2" &"} END {print "wait"}' | sh -ex
# Fail the test if any HelmRelease is not Ready
if kubectl get hr -A | grep -v " True " | grep -v NAME; then
kubectl get hr -A
fail "Some HelmReleases failed to reconcile"
fi
}
@test "Wait for ClusterAPI provider deployments" {
# Wait for ClusterAPI provider deployments
timeout 60 sh -ec 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager >/dev/null 2>&1; do sleep 1; done'
kubectl wait deployment/capi-controller-manager deployment/capi-kamaji-controller-manager deployment/capi-kubeadm-bootstrap-controller-manager deployment/capi-operator-cluster-api-operator deployment/capk-controller-manager -n cozy-cluster-api --timeout=1m --for=condition=available
}
@test "Wait for LINSTOR and configure storage" {
# Linstor controller and nodes
kubectl wait deployment/linstor-controller -n cozy-linstor --timeout=5m --for=condition=available
timeout 60 sh -ec 'until [ $(kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor node list | grep -c Online) -eq 3 ]; do sleep 1; done'
for node in srv1 srv2 srv3; do
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs ${node} /dev/vdc --pool-name data --storage-pool data
done
# Storage classes
kubectl apply -f - <<'EOF'
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: local
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: linstor.csi.linbit.com
parameters:
linstor.csi.linbit.com/storagePool: "data"
linstor.csi.linbit.com/layerList: "storage"
linstor.csi.linbit.com/allowRemoteVolumeAccess: "false"
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: replicated
provisioner: linstor.csi.linbit.com
parameters:
linstor.csi.linbit.com/storagePool: "data"
linstor.csi.linbit.com/autoPlace: "3"
linstor.csi.linbit.com/layerList: "drbd storage"
linstor.csi.linbit.com/allowRemoteVolumeAccess: "true"
property.linstor.csi.linbit.com/DrbdOptions/auto-quorum: suspend-io
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-no-data-accessible: suspend-io
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-suspended-primary-outdated: force-secondary
property.linstor.csi.linbit.com/DrbdOptions/Net/rr-conflict: retry-connect
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
EOF
}
@test "Wait for MetalLB and configure address pool" {
# MetalLB address pool
kubectl apply -f - <<'EOF'
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: cozystack
namespace: cozy-metallb
spec:
ipAddressPools: [cozystack]
---
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: cozystack
namespace: cozy-metallb
spec:
addresses: [192.168.123.200-192.168.123.250]
autoAssign: true
avoidBuggyIPs: false
EOF
}
@test "Check Cozystack API service" {
kubectl wait --for=condition=Available apiservices/v1alpha1.apps.cozystack.io --timeout=2m
}
@test "Configure Tenant and wait for applications" {
# Patch root tenant and wait for its releases
kubectl patch tenants/root -n tenant-root --type merge -p '{"spec":{"host":"example.org","ingress":true,"monitoring":true,"etcd":true,"isolated":true}}'
timeout 60 sh -ec 'until kubectl get hr -n tenant-root etcd ingress monitoring tenant-root >/dev/null 2>&1; do sleep 1; done'
kubectl wait hr/etcd hr/ingress hr/tenant-root -n tenant-root --timeout=2m --for=condition=ready
if ! kubectl wait hr/monitoring -n tenant-root --timeout=2m --for=condition=ready; then
flux reconcile hr monitoring -n tenant-root --force
kubectl wait hr/monitoring -n tenant-root --timeout=2m --for=condition=ready
fi
# Expose Cozystack services through ingress
kubectl patch configmap/cozystack -n cozy-system --type merge -p '{"data":{"expose-services":"api,dashboard,cdi-uploadproxy,vm-exportproxy,keycloak"}}'
# NGINX ingress controller
timeout 60 sh -ec 'until kubectl get deploy root-ingress-controller -n tenant-root >/dev/null 2>&1; do sleep 1; done'
kubectl wait deploy/root-ingress-controller -n tenant-root --timeout=5m --for=condition=available
# etcd statefulset
kubectl wait sts/etcd -n tenant-root --for=jsonpath='{.status.readyReplicas}'=3 --timeout=5m
# VictoriaMetrics components
kubectl wait vmalert/vmalert-shortterm vmalertmanager/alertmanager -n tenant-root --for=jsonpath='{.status.updateStatus}'=operational --timeout=5m
kubectl wait vlogs/generic -n tenant-root --for=jsonpath='{.status.updateStatus}'=operational --timeout=5m
kubectl wait vmcluster/shortterm vmcluster/longterm -n tenant-root --for=jsonpath='{.status.clusterStatus}'=operational --timeout=5m
# Grafana
kubectl wait clusters.postgresql.cnpg.io/grafana-db -n tenant-root --for=condition=ready --timeout=5m
kubectl wait deploy/grafana-deployment -n tenant-root --for=condition=available --timeout=5m
# Verify Grafana via ingress
ingress_ip=$(kubectl get svc root-ingress-controller -n tenant-root -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
if ! curl -sS -k "https://${ingress_ip}" -H 'Host: grafana.example.org' --max-time 30 | grep -q Found; then
echo "Failed to access Grafana via ingress at ${ingress_ip}" >&2
exit 1
fi
}
@test "Keycloak OIDC stack is healthy" {
kubectl patch configmap/cozystack -n cozy-system --type merge -p '{"data":{"oidc-enabled":"true"}}'
timeout 120 sh -ec 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator >/dev/null 2>&1; do sleep 1; done'
kubectl wait hr/keycloak hr/keycloak-configure hr/keycloak-operator -n cozy-keycloak --timeout=10m --for=condition=ready
}

165
hack/e2e.application.sh Executable file
View File

@@ -0,0 +1,165 @@
#!/bin/bash
RED='\033[0;31m'
GREEN='\033[0;32m'
RESET='\033[0m'
YELLOW='\033[0;33m'
ROOT_NS="tenant-root"
TEST_TENANT="tenant-e2e"
values_base_path="/hack/testdata/"
checks_base_path="/hack/testdata/"
function delete_hr() {
local release_name="$1"
local namespace="$2"
if [[ -z "$release_name" ]]; then
echo -e "${RED}Error: Release name is required.${RESET}"
exit 1
fi
if [[ -z "$namespace" ]]; then
echo -e "${RED}Error: Namespace name is required.${RESET}"
exit 1
fi
if [[ "$release_name" == "tenant-e2e" ]]; then
echo -e "${YELLOW}Skipping deletion for release tenant-e2e.${RESET}"
return 0
fi
kubectl delete helmrelease $release_name -n $namespace
}
function install_helmrelease() {
local release_name="$1"
local namespace="$2"
local chart_path="$3"
local repo_name="$4"
local repo_ns="$5"
local values_file="$6"
if [[ -z "$release_name" ]]; then
echo -e "${RED}Error: Release name is required.${RESET}"
exit 1
fi
if [[ -z "$namespace" ]]; then
echo -e "${RED}Error: Namespace name is required.${RESET}"
exit 1
fi
if [[ -z "$chart_path" ]]; then
echo -e "${RED}Error: Chart path name is required.${RESET}"
exit 1
fi
if [[ -n "$values_file" && -f "$values_file" ]]; then
local values_section
values_section=$(echo " values:" && sed 's/^/ /' "$values_file")
fi
local helmrelease_file=$(mktemp /tmp/HelmRelease.XXXXXX.yaml)
{
echo "apiVersion: helm.toolkit.fluxcd.io/v2"
echo "kind: HelmRelease"
echo "metadata:"
echo " labels:"
echo " cozystack.io/ui: \"true\""
echo " name: \"$release_name\""
echo " namespace: \"$namespace\""
echo "spec:"
echo " chart:"
echo " spec:"
echo " chart: \"$chart_path\""
echo " reconcileStrategy: Revision"
echo " sourceRef:"
echo " kind: HelmRepository"
echo " name: \"$repo_name\""
echo " namespace: \"$repo_ns\""
echo " version: '*'"
echo " interval: 1m0s"
echo " timeout: 5m0s"
[[ -n "$values_section" ]] && echo "$values_section"
} > "$helmrelease_file"
kubectl apply -f "$helmrelease_file"
rm -f "$helmrelease_file"
}
function install_tenant (){
local release_name="$1"
local namespace="$2"
local values_file="${values_base_path}tenant/values.yaml"
local repo_name="cozystack-apps"
local repo_ns="cozy-public"
install_helmrelease "$release_name" "$namespace" "tenant" "$repo_name" "$repo_ns" "$values_file"
}
function make_extra_checks(){
local checks_file="$1"
echo "after exec make $checks_file"
if [[ -n "$checks_file" && -f "$checks_file" ]]; then
echo -e "${YELLOW}Start extra checks with file: ${checks_file}${RESET}"
fi
}
function check_helmrelease_status() {
local release_name="$1"
local namespace="$2"
local checks_file="$3"
local timeout=300 # Timeout in seconds
local interval=5 # Interval between checks in seconds
local elapsed=0
while [[ $elapsed -lt $timeout ]]; do
local status_output
status_output=$(kubectl get helmrelease "$release_name" -n "$namespace" -o json | jq -r '.status.conditions[-1].reason')
if [[ "$status_output" == "InstallSucceeded" || "$status_output" == "UpgradeSucceeded" ]]; then
echo -e "${GREEN}Helm release '$release_name' is ready.${RESET}"
make_extra_checks "$checks_file"
delete_hr $release_name $namespace
return 0
elif [[ "$status_output" == "InstallFailed" ]]; then
echo -e "${RED}Helm release '$release_name': InstallFailed${RESET}"
exit 1
else
echo -e "${YELLOW}Helm release '$release_name' is not ready. Current status: $status_output${RESET}"
fi
sleep "$interval"
elapsed=$((elapsed + interval))
done
echo -e "${RED}Timeout reached. Helm release '$release_name' is still not ready after $timeout seconds.${RESET}"
exit 1
}
chart_name="$1"
if [ -z "$chart_name" ]; then
echo -e "${RED}No chart name provided. Exiting...${RESET}"
exit 1
fi
checks_file="${checks_base_path}${chart_name}/check.sh"
repo_name="cozystack-apps"
repo_ns="cozy-public"
release_name="$chart_name-e2e"
values_file="${values_base_path}${chart_name}/values.yaml"
install_tenant $TEST_TENANT $ROOT_NS
check_helmrelease_status $TEST_TENANT $ROOT_NS "${checks_base_path}tenant/check.sh"
echo -e "${YELLOW}Running tests for chart: $chart_name${RESET}"
install_helmrelease $release_name $TEST_TENANT $chart_name $repo_name $repo_ns $values_file
check_helmrelease_status $release_name $TEST_TENANT $checks_file

361
hack/e2e.sh Executable file
View File

@@ -0,0 +1,361 @@
#!/bin/bash
if [ "$COZYSTACK_INSTALLER_YAML" = "" ]; then
echo 'COZYSTACK_INSTALLER_YAML variable is not set!' >&2
echo 'please set it with following command:' >&2
echo >&2
echo 'export COZYSTACK_INSTALLER_YAML=$(helm template -n cozy-system installer packages/core/installer)' >&2
echo >&2
exit 1
fi
if [ "$(cat /proc/sys/net/ipv4/ip_forward)" != 1 ]; then
echo "IPv4 forwarding is not enabled!" >&2
echo 'please enable forwarding with the following command:' >&2
echo >&2
echo 'echo 1 > /proc/sys/net/ipv4/ip_forward' >&2
echo >&2
exit 1
fi
set -x
set -e
kill `cat srv1/qemu.pid srv2/qemu.pid srv3/qemu.pid` || true
ip link del cozy-br0 || true
ip link add cozy-br0 type bridge
ip link set cozy-br0 up
ip addr add 192.168.123.1/24 dev cozy-br0
# Enable masquerading
iptables -t nat -D POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE 2>/dev/null || true
iptables -t nat -A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
rm -rf srv1 srv2 srv3
mkdir -p srv1 srv2 srv3
# Prepare cloud-init
for i in 1 2 3; do
echo "hostname: srv$i" > "srv$i/meta-data"
echo '#cloud-config' > "srv$i/user-data"
cat > "srv$i/network-config" <<EOT
version: 2
ethernets:
eth0:
dhcp4: false
addresses:
- "192.168.123.1$i/26"
gateway4: "192.168.123.1"
nameservers:
search: [cluster.local]
addresses: [8.8.8.8]
EOT
( cd srv$i && genisoimage \
-output seed.img \
-volid cidata -rational-rock -joliet \
user-data meta-data network-config
)
done
# Prepare system drive
if [ ! -f nocloud-amd64.raw ]; then
wget https://github.com/cozystack/cozystack/releases/latest/download/nocloud-amd64.raw.xz -O nocloud-amd64.raw.xz --show-progress --output-file /dev/stdout --progress=dot:giga 2>/dev/null
rm -f nocloud-amd64.raw
xz --decompress nocloud-amd64.raw.xz
fi
for i in 1 2 3; do
cp nocloud-amd64.raw srv$i/system.img
qemu-img resize srv$i/system.img 20G
done
# Prepare data drives
for i in 1 2 3; do
qemu-img create srv$i/data.img 100G
done
# Prepare networking
for i in 1 2 3; do
ip link del cozy-srv$i || true
ip tuntap add dev cozy-srv$i mode tap
ip link set cozy-srv$i up
ip link set cozy-srv$i master cozy-br0
done
# Start VMs
for i in 1 2 3; do
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 8 -m 16384 \
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5$i -netdev tap,id=net0,ifname=cozy-srv$i,script=no,downscript=no \
-drive file=srv$i/system.img,if=virtio,format=raw \
-drive file=srv$i/seed.img,if=virtio,format=raw \
-drive file=srv$i/data.img,if=virtio,format=raw \
-display none -daemonize -pidfile srv$i/qemu.pid
done
sleep 5
# Wait for VM to start up
timeout 60 sh -c 'until nc -nzv 192.168.123.11 50000 && nc -nzv 192.168.123.12 50000 && nc -nzv 192.168.123.13 50000; do sleep 1; done'
cat > patch.yaml <<\EOT
machine:
kubelet:
nodeIP:
validSubnets:
- 192.168.123.0/24
extraConfig:
maxPods: 512
kernel:
modules:
- name: openvswitch
- name: drbd
parameters:
- usermode_helper=disabled
- name: zfs
- name: spl
registries:
mirrors:
docker.io:
endpoints:
- https://mirror.gcr.io
files:
- content: |
[plugins]
[plugins."io.containerd.grpc.v1.cri"]
device_ownership_from_security_context = true
path: /etc/cri/conf.d/20-customization.part
op: create
cluster:
apiServer:
extraArgs:
oidc-issuer-url: "https://keycloak.example.org/realms/cozy"
oidc-client-id: "kubernetes"
oidc-username-claim: "preferred_username"
oidc-groups-claim: "groups"
network:
cni:
name: none
dnsDomain: cozy.local
podSubnets:
- 10.244.0.0/16
serviceSubnets:
- 10.96.0.0/16
EOT
cat > patch-controlplane.yaml <<\EOT
machine:
nodeLabels:
node.kubernetes.io/exclude-from-external-load-balancers:
$patch: delete
network:
interfaces:
- interface: eth0
vip:
ip: 192.168.123.10
cluster:
allowSchedulingOnControlPlanes: true
controllerManager:
extraArgs:
bind-address: 0.0.0.0
scheduler:
extraArgs:
bind-address: 0.0.0.0
apiServer:
certSANs:
- 127.0.0.1
proxy:
disabled: true
discovery:
enabled: false
etcd:
advertisedSubnets:
- 192.168.123.0/24
EOT
# Gen configuration
if [ ! -f secrets.yaml ]; then
talosctl gen secrets
fi
rm -f controlplane.yaml worker.yaml talosconfig kubeconfig
talosctl gen config --with-secrets secrets.yaml cozystack https://192.168.123.10:6443 --config-patch=@patch.yaml --config-patch-control-plane @patch-controlplane.yaml
export TALOSCONFIG=$PWD/talosconfig
# Apply configuration
talosctl apply -f controlplane.yaml -n 192.168.123.11 -e 192.168.123.11 -i
talosctl apply -f controlplane.yaml -n 192.168.123.12 -e 192.168.123.12 -i
talosctl apply -f controlplane.yaml -n 192.168.123.13 -e 192.168.123.13 -i
# Wait for VM to be configured
timeout 60 sh -c 'until nc -nzv 192.168.123.11 50000 && nc -nzv 192.168.123.12 50000 && nc -nzv 192.168.123.13 50000; do sleep 1; done'
# Bootstrap
timeout 10 sh -c 'until talosctl bootstrap -n 192.168.123.11 -e 192.168.123.11; do sleep 1; done'
# Wait for etcd
timeout 180 sh -c 'until timeout -s 9 2 talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1; do sleep 1; done'
timeout 60 sh -c 'while talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1 | grep "rpc error"; do sleep 1; done'
rm -f kubeconfig
talosctl kubeconfig kubeconfig -e 192.168.123.10 -n 192.168.123.10
export KUBECONFIG=$PWD/kubeconfig
# Wait for kubernetes nodes appear
timeout 60 sh -c 'until [ $(kubectl get node -o name | wc -l) = 3 ]; do sleep 1; done'
kubectl create ns cozy-system -o yaml | kubectl apply -f -
kubectl create -f - <<\EOT
apiVersion: v1
kind: ConfigMap
metadata:
name: cozystack
namespace: cozy-system
data:
bundle-name: "paas-full"
ipv4-pod-cidr: "10.244.0.0/16"
ipv4-pod-gateway: "10.244.0.1"
ipv4-svc-cidr: "10.96.0.0/16"
ipv4-join-cidr: "100.64.0.0/16"
root-host: example.org
api-server-endpoint: https://192.168.123.10:6443
EOT
#
echo "$COZYSTACK_INSTALLER_YAML" | kubectl apply -f -
# wait for cozystack pod to start
kubectl wait deploy --timeout=1m --for=condition=available -n cozy-system cozystack
# wait for helmreleases appear
timeout 60 sh -c 'until kubectl get hr -A | grep cozy; do sleep 1; done'
sleep 5
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n " $1 " hr/" $2 " &"} END{print "wait"}' | sh -x
# Wait for Cluster-API providers
timeout 60 sh -c 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager; do sleep 1; done'
kubectl wait deploy --timeout=1m --for=condition=available -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager
# Wait for linstor controller
kubectl wait deploy --timeout=5m --for=condition=available -n cozy-linstor linstor-controller
# Wait for all linstor nodes become Online
timeout 60 sh -c 'until [ $(kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor node list | grep -c Online) = 3 ]; do sleep 1; done'
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv1 /dev/vdc --pool-name data --storage-pool data
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv2 /dev/vdc --pool-name data --storage-pool data
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv3 /dev/vdc --pool-name data --storage-pool data
kubectl create -f- <<EOT
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: local
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: linstor.csi.linbit.com
parameters:
linstor.csi.linbit.com/storagePool: "data"
linstor.csi.linbit.com/layerList: "storage"
linstor.csi.linbit.com/allowRemoteVolumeAccess: "false"
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: replicated
provisioner: linstor.csi.linbit.com
parameters:
linstor.csi.linbit.com/storagePool: "data"
linstor.csi.linbit.com/autoPlace: "3"
linstor.csi.linbit.com/layerList: "drbd storage"
linstor.csi.linbit.com/allowRemoteVolumeAccess: "true"
property.linstor.csi.linbit.com/DrbdOptions/auto-quorum: suspend-io
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-no-data-accessible: suspend-io
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-suspended-primary-outdated: force-secondary
property.linstor.csi.linbit.com/DrbdOptions/Net/rr-conflict: retry-connect
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
EOT
kubectl create -f- <<EOT
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: cozystack
namespace: cozy-metallb
spec:
ipAddressPools:
- cozystack
---
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: cozystack
namespace: cozy-metallb
spec:
addresses:
- 192.168.123.200-192.168.123.250
autoAssign: true
avoidBuggyIPs: false
EOT
# Wait for cozystack-api
kubectl wait --for=condition=Available apiservices v1alpha1.apps.cozystack.io --timeout=2m
kubectl patch -n tenant-root tenants.apps.cozystack.io root --type=merge -p '{"spec":{
"host": "example.org",
"ingress": true,
"monitoring": true,
"etcd": true,
"isolated": true
}}'
# Wait for HelmRelease be created
timeout 60 sh -c 'until kubectl get hr -n tenant-root etcd ingress monitoring tenant-root; do sleep 1; done'
# Wait for HelmReleases be installed
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr etcd ingress tenant-root
if ! kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr monitoring; then
flux reconcile hr monitoring -n tenant-root --force
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr monitoring
fi
kubectl patch -n tenant-root ingresses.apps.cozystack.io ingress --type=merge -p '{"spec":{
"dashboard": true
}}'
# Wait for nginx-ingress-controller
timeout 60 sh -c 'until kubectl get deploy -n tenant-root root-ingress-controller; do sleep 1; done'
kubectl wait --timeout=5m --for=condition=available -n tenant-root deploy root-ingress-controller
# Wait for etcd
kubectl wait --timeout=5m --for=jsonpath=.status.readyReplicas=3 -n tenant-root sts etcd
# Wait for Victoria metrics
kubectl wait --timeout=5m --for=jsonpath=.status.updateStatus=operational -n tenant-root vmalert/vmalert-shortterm vmalertmanager/alertmanager
kubectl wait --timeout=5m --for=jsonpath=.status.updateStatus=operational -n tenant-root vlogs/generic
kubectl wait --timeout=5m --for=jsonpath=.status.clusterStatus=operational -n tenant-root vmcluster/shortterm vmcluster/longterm
# Wait for grafana
kubectl wait --timeout=5m --for=condition=ready -n tenant-root clusters.postgresql.cnpg.io grafana-db
kubectl wait --timeout=5m --for=condition=available -n tenant-root deploy grafana-deployment
# Get IP of nginx-ingress
ip=$(kubectl get svc -n tenant-root root-ingress-controller -o jsonpath='{.status.loadBalancer.ingress..ip}')
# Check Grafana
curl -sS -k "https://$ip" -H 'Host: grafana.example.org' | grep Found
# Test OIDC
kubectl patch -n cozy-system cm/cozystack --type=merge -p '{"data":{
"oidc-enabled": "true"
}}'
timeout 120 sh -c 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator; do sleep 1; done'
kubectl wait --timeout=10m --for=condition=ready -n cozy-keycloak hr keycloak keycloak-configure keycloak-operator

View File

@@ -16,15 +16,13 @@ if [ ! -f "$file" ] || [ ! -s "$file" ]; then
exit 0 exit 0
fi fi
miss_map=$(mktemp) miss_map=$(echo "$new_map" | awk 'NR==FNR { nm[$1 " " $2] = $3; next } { if (!($1 " " $2 in nm)) print $1, $2, $3}' - "$file")
trap 'rm -f "$miss_map"' EXIT
echo -n "$new_map" | awk 'NR==FNR { nm[$1 " " $2] = $3; next } { if (!($1 " " $2 in nm)) print $1, $2, $3}' - "$file" > $miss_map
# search accross all tags sorted by version # search accross all tags sorted by version
search_commits=$(git ls-remote --tags origin | awk -F/ '$3 ~ /v[0-9]+.[0-9]+.[0-9]+/ {print}' | sort -k2,2 -rV | awk '{print $1}') search_commits=$(git ls-remote --tags origin | awk -F/ '$3 ~ /v[0-9]+.[0-9]+.[0-9]+/ {print}' | sort -k2,2 -rV | awk '{print $1}')
resolved_miss_map=$( resolved_miss_map=$(
while read -r chart version commit; do echo "$miss_map" | while read -r chart version commit; do
# if version is found in HEAD, it's HEAD # if version is found in HEAD, it's HEAD
if [ "$(awk '$1 == "version:" {print $2}' ./${chart}/Chart.yaml)" = "${version}" ]; then if [ "$(awk '$1 == "version:" {print $2}' ./${chart}/Chart.yaml)" = "${version}" ]; then
echo "$chart $version HEAD" echo "$chart $version HEAD"
@@ -58,7 +56,7 @@ resolved_miss_map=$(
fi fi
echo "$chart $version $found_tag" echo "$chart $version $found_tag"
done < $miss_map done
) )
printf "%s\n" "$new_map" "$resolved_miss_map" | sort -k1,1 -k2,2 -V | awk '$1' > "$file" printf "%s\n" "$new_map" "$resolved_miss_map" | sort -k1,1 -k2,2 -V | awk '$1' > "$file"

View File

@@ -1,65 +0,0 @@
#!/bin/sh
set -e
usage() {
printf "%s\n" "Usage:" >&2 ;
printf -- "%s\n" '---' >&2 ;
printf "%s %s\n" "$0" "INPUT_DIR OUTPUT_DIR TMP_DIR [DEPENDENCY_DIR]" >&2 ;
printf -- "%s\n" '---' >&2 ;
printf "%s\n" "Takes a helm repository from INPUT_DIR, with an optional library repository in" >&2 ;
printf "%s\n" "DEPENDENCY_DIR, prepares a view of the git archive at select points in history" >&2 ;
printf "%s\n" "in TMP_DIR and packages helm charts, outputting the tarballs to OUTPUT_DIR" >&2 ;
}
if [ "x$(basename $PWD)" != "xpackages" ]
then
echo "Error: This script must run from the ./packages/ directory" >&2
echo >&2
usage
exit 1
fi
if [ "x$#" != "x3" ] && [ "x$#" != "x4" ]
then
echo "Error: This script takes 3 or 4 arguments" >&2
echo "Got $# arguments:" "$@" >&2
echo >&2
usage
exit 1
fi
input_dir=$1
output_dir=$2
tmp_dir=$3
if [ "x$#" = "x4" ]
then
dependency_dir=$4
fi
rm -rf "${output_dir:?}"
mkdir -p "${output_dir}"
while read package _ commit
do
# this lets devs build the packages from a dirty repo for quick local testing
if [ "x$commit" = "xHEAD" ]
then
helm package "${input_dir}/${package}" -d "${output_dir}"
continue
fi
git archive --format tar "${commit}" "${input_dir}/${package}" | tar -xf- -C "${tmp_dir}/"
# the library chart is not present in older commits and git archive doesn't fail gracefully if the path is not found
if [ "x${dependency_dir}" != "x" ] && git ls-tree --name-only "${commit}" "${dependency_dir}" | grep -qx "${dependency_dir}"
then
git archive --format tar "${commit}" "${dependency_dir}" | tar -xf- -C "${tmp_dir}/"
fi
helm package "${tmp_dir}/${input_dir}/${package}" -d "${output_dir}"
rm -rf "${tmp_dir:?}/${input_dir:?}/${package:?}"
if [ "x${dependency_dir}" != "x" ]
then
rm -rf "${tmp_dir:?}/${dependency_dir:?}"
fi
done < "${input_dir}/versions_map"
helm repo index "${output_dir}"

1
hack/testdata/http-cache/check.sh vendored Normal file
View File

@@ -0,0 +1 @@
return 0

2
hack/testdata/http-cache/values.yaml vendored Normal file
View File

@@ -0,0 +1,2 @@
endpoints:
- 8.8.8.8:443

1
hack/testdata/kubernetes/check.sh vendored Normal file
View File

@@ -0,0 +1 @@
return 0

62
hack/testdata/kubernetes/values.yaml vendored Normal file
View File

@@ -0,0 +1,62 @@
## @section Common parameters
## @param host The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host).
## @param controlPlane.replicas Number of replicas for Kubernetes contorl-plane components
## @param storageClass StorageClass used to store user data
##
host: ""
controlPlane:
replicas: 2
storageClass: replicated
## @param nodeGroups [object] nodeGroups configuration
##
nodeGroups:
md0:
minReplicas: 0
maxReplicas: 10
instanceType: "u1.medium"
ephemeralStorage: 20Gi
roles:
- ingress-nginx
resources:
cpu: ""
memory: ""
## @section Cluster Addons
##
addons:
## Cert-manager: automatically creates and manages SSL/TLS certificate
##
certManager:
## @param addons.certManager.enabled Enables the cert-manager
## @param addons.certManager.valuesOverride Custom values to override
enabled: true
valuesOverride: {}
## Ingress-NGINX Controller
##
ingressNginx:
## @param addons.ingressNginx.enabled Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role)
## @param addons.ingressNginx.valuesOverride Custom values to override
##
enabled: true
## @param addons.ingressNginx.hosts List of domain names that should be passed through to the cluster by upper cluster
## e.g:
## hosts:
## - example.org
## - foo.example.net
##
hosts: []
valuesOverride: {}
## Flux CD
##
fluxcd:
## @param addons.fluxcd.enabled Enables Flux CD
## @param addons.fluxcd.valuesOverride Custom values to override
##
enabled: true
valuesOverride: {}

1
hack/testdata/nats/check.sh vendored Normal file
View File

@@ -0,0 +1 @@
return 0

10
hack/testdata/nats/values.yaml vendored Normal file
View File

@@ -0,0 +1,10 @@
## @section Common parameters
## @param external Enable external access from outside the cluster
## @param replicas Persistent Volume size for NATS
## @param storageClass StorageClass used to store the data
##
external: false
replicas: 2
storageClass: ""

1
hack/testdata/tenant/check.sh vendored Normal file
View File

@@ -0,0 +1 @@
return 0

6
hack/testdata/tenant/values.yaml vendored Normal file
View File

@@ -0,0 +1,6 @@
host: ""
etcd: false
monitoring: false
ingress: false
seaweedfs: false
isolated: true

View File

@@ -1,158 +0,0 @@
package controller
import (
"context"
"fmt"
"strings"
"time"
e "errors"
helmv2 "github.com/fluxcd/helm-controller/api/v2"
"gopkg.in/yaml.v2"
corev1 "k8s.io/api/core/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
)
type TenantHelmReconciler struct {
client.Client
Scheme *runtime.Scheme
}
func (r *TenantHelmReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logger := log.FromContext(ctx)
hr := &helmv2.HelmRelease{}
if err := r.Get(ctx, req.NamespacedName, hr); err != nil {
if errors.IsNotFound(err) {
return ctrl.Result{}, nil
}
logger.Error(err, "unable to fetch HelmRelease")
return ctrl.Result{}, err
}
if !strings.HasPrefix(hr.Name, "tenant-") {
return ctrl.Result{}, nil
}
if len(hr.Status.Conditions) == 0 || hr.Status.Conditions[0].Type != "Ready" {
return ctrl.Result{}, nil
}
if len(hr.Status.History) == 0 {
logger.Info("no history in HelmRelease status", "name", hr.Name)
return ctrl.Result{}, nil
}
if hr.Status.History[0].Status != "deployed" {
return ctrl.Result{}, nil
}
newDigest := hr.Status.History[0].Digest
var hrList helmv2.HelmReleaseList
childNamespace := getChildNamespace(hr.Namespace, hr.Name)
if childNamespace == "tenant-root" && hr.Name == "tenant-root" {
if hr.Spec.Values == nil {
logger.Error(e.New("hr.Spec.Values is nil"), "cant annotate tenant-root ns")
return ctrl.Result{}, nil
}
err := annotateTenantRootNs(*hr.Spec.Values, r.Client)
if err != nil {
logger.Error(err, "cant annotate tenant-root ns")
return ctrl.Result{}, nil
}
logger.Info("namespace 'tenant-root' annotated")
}
if err := r.List(ctx, &hrList, client.InNamespace(childNamespace)); err != nil {
logger.Error(err, "unable to list HelmReleases in namespace", "namespace", hr.Name)
return ctrl.Result{}, err
}
for _, item := range hrList.Items {
if item.Name == hr.Name {
continue
}
oldDigest := item.GetAnnotations()["cozystack.io/tenant-config-digest"]
if oldDigest == newDigest {
continue
}
patchTarget := item.DeepCopy()
if patchTarget.Annotations == nil {
patchTarget.Annotations = map[string]string{}
}
ts := time.Now().Format(time.RFC3339Nano)
patchTarget.Annotations["cozystack.io/tenant-config-digest"] = newDigest
patchTarget.Annotations["reconcile.fluxcd.io/forceAt"] = ts
patchTarget.Annotations["reconcile.fluxcd.io/requestedAt"] = ts
patch := client.MergeFrom(item.DeepCopy())
if err := r.Patch(ctx, patchTarget, patch); err != nil {
logger.Error(err, "failed to patch HelmRelease", "name", patchTarget.Name)
continue
}
logger.Info("patched HelmRelease with new digest", "name", patchTarget.Name, "digest", newDigest, "version", hr.Status.History[0].Version)
}
return ctrl.Result{}, nil
}
func (r *TenantHelmReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&helmv2.HelmRelease{}).
Complete(r)
}
func getChildNamespace(currentNamespace, hrName string) string {
tenantName := strings.TrimPrefix(hrName, "tenant-")
switch {
case currentNamespace == "tenant-root" && hrName == "tenant-root":
// 1) root tenant inside root namespace
return "tenant-root"
case currentNamespace == "tenant-root":
// 2) any other tenant in root namespace
return fmt.Sprintf("tenant-%s", tenantName)
default:
// 3) tenant in a dedicated namespace
return fmt.Sprintf("%s-%s", currentNamespace, tenantName)
}
}
func annotateTenantRootNs(values apiextensionsv1.JSON, c client.Client) error {
var data map[string]interface{}
if err := yaml.Unmarshal(values.Raw, &data); err != nil {
return fmt.Errorf("failed to parse HelmRelease values: %w", err)
}
host, ok := data["host"].(string)
if !ok || host == "" {
return fmt.Errorf("host field not found or not a string")
}
var ns corev1.Namespace
if err := c.Get(context.TODO(), client.ObjectKey{Name: "tenant-root"}, &ns); err != nil {
return fmt.Errorf("failed to get namespace tenant-root: %w", err)
}
if ns.Annotations == nil {
ns.Annotations = map[string]string{}
}
ns.Annotations["namespace.cozystack.io/host"] = host
if err := c.Update(context.TODO(), &ns); err != nil {
return fmt.Errorf("failed to update namespace: %w", err)
}
return nil
}

View File

@@ -39,15 +39,6 @@ func (r *WorkloadReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c
} }
t := getMonitoredObject(w) t := getMonitoredObject(w)
if t == nil {
err = r.Delete(ctx, w)
if err != nil {
logger.Error(err, "failed to delete workload")
}
return ctrl.Result{}, err
}
err = r.Get(ctx, types.NamespacedName{Name: t.GetName(), Namespace: t.GetNamespace()}, t) err = r.Get(ctx, types.NamespacedName{Name: t.GetName(), Namespace: t.GetNamespace()}, t)
// found object, nothing to do // found object, nothing to do
@@ -77,23 +68,20 @@ func (r *WorkloadReconciler) SetupWithManager(mgr ctrl.Manager) error {
} }
func getMonitoredObject(w *cozyv1alpha1.Workload) client.Object { func getMonitoredObject(w *cozyv1alpha1.Workload) client.Object {
switch { if strings.HasPrefix(w.Name, "pvc-") {
case strings.HasPrefix(w.Name, "pvc-"):
obj := &corev1.PersistentVolumeClaim{} obj := &corev1.PersistentVolumeClaim{}
obj.Name = strings.TrimPrefix(w.Name, "pvc-") obj.Name = strings.TrimPrefix(w.Name, "pvc-")
obj.Namespace = w.Namespace obj.Namespace = w.Namespace
return obj return obj
case strings.HasPrefix(w.Name, "svc-"): }
if strings.HasPrefix(w.Name, "svc-") {
obj := &corev1.Service{} obj := &corev1.Service{}
obj.Name = strings.TrimPrefix(w.Name, "svc-") obj.Name = strings.TrimPrefix(w.Name, "svc-")
obj.Namespace = w.Namespace obj.Namespace = w.Namespace
return obj return obj
case strings.HasPrefix(w.Name, "pod-"):
obj := &corev1.Pod{}
obj.Name = strings.TrimPrefix(w.Name, "pod-")
obj.Namespace = w.Namespace
return obj
} }
var obj client.Object obj := &corev1.Pod{}
obj.Name = w.Name
obj.Namespace = w.Namespace
return obj return obj
} }

View File

@@ -1,26 +0,0 @@
package controller
import (
"testing"
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
corev1 "k8s.io/api/core/v1"
)
func TestUnprefixedMonitoredObjectReturnsNil(t *testing.T) {
w := &cozyv1alpha1.Workload{}
w.Name = "unprefixed-name"
obj := getMonitoredObject(w)
if obj != nil {
t.Errorf(`getMonitoredObject(&Workload{Name: "%s"}) == %v, want nil`, w.Name, obj)
}
}
func TestPodMonitoredObject(t *testing.T) {
w := &cozyv1alpha1.Workload{}
w.Name = "pod-mypod"
obj := getMonitoredObject(w)
if pod, ok := obj.(*corev1.Pod); !ok || pod.Name != "mypod" {
t.Errorf(`getMonitoredObject(&Workload{Name: "%s"}) == %v, want &Pod{Name: "mypod"}`, w.Name, obj)
}
}

View File

@@ -212,12 +212,15 @@ func (r *WorkloadMonitorReconciler) reconcilePodForMonitor(
) error { ) error {
logger := log.FromContext(ctx) logger := log.FromContext(ctx)
// totalResources will store the sum of all container resource requests // Combine both init containers and normal containers to sum resources properly
combinedContainers := append(pod.Spec.InitContainers, pod.Spec.Containers...)
// totalResources will store the sum of all container resource limits
totalResources := make(map[string]resource.Quantity) totalResources := make(map[string]resource.Quantity)
// Iterate over all containers to aggregate their requests // Iterate over all containers to aggregate their Limits
for _, container := range pod.Spec.Containers { for _, container := range combinedContainers {
for name, qty := range container.Resources.Requests { for name, qty := range container.Resources.Limits {
if existing, exists := totalResources[name.String()]; exists { if existing, exists := totalResources[name.String()]; exists {
existing.Add(qty) existing.Add(qty)
totalResources[name.String()] = existing totalResources[name.String()] = existing
@@ -246,7 +249,7 @@ func (r *WorkloadMonitorReconciler) reconcilePodForMonitor(
workload := &cozyv1alpha1.Workload{ workload := &cozyv1alpha1.Workload{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("pod-%s", pod.Name), Name: pod.Name,
Namespace: pod.Namespace, Namespace: pod.Namespace,
}, },
} }

View File

@@ -1,8 +1,14 @@
OUT=../_out/repos/apps OUT=../../_out/repos/apps
TMP := $(shell mktemp -d) TMP=../../_out/repos/apps/historical
repo: repo:
cd .. && ../hack/package_chart.sh apps $(OUT) $(TMP) library rm -rf "$(OUT)"
mkdir -p "$(OUT)"
awk '$$3 != "HEAD" {print "mkdir -p $(TMP)/" $$1 "-" $$2}' versions_map | sh -ex
awk '$$3 != "HEAD" {print "git archive " $$3 " " $$1 " | tar -xf- --strip-components=1 -C $(TMP)/" $$1 "-" $$2 }' versions_map | sh -ex
helm package -d "$(OUT)" $$(find . $(TMP) -mindepth 2 -maxdepth 2 -name Chart.yaml | awk 'sub("/Chart.yaml", "")' | sort -V)
cd "$(OUT)" && helm repo index . --url http://cozystack.cozy-system.svc/repos/apps
rm -rf "$(TMP)"
fix-chartnames: fix-chartnames:
find . -maxdepth 2 -name Chart.yaml | awk -F/ '{print $$2}' | while read i; do sed -i "s/^name: .*/name: $$i/" "$$i/Chart.yaml"; done find . -maxdepth 2 -name Chart.yaml | awk -F/ '{print $$2}' | while read i; do sed -i "s/^name: .*/name: $$i/" "$$i/Chart.yaml"; done

View File

@@ -1,3 +0,0 @@
# S3 bucket
## Parameters

View File

@@ -11,7 +11,7 @@ spec:
kind: HelmRepository kind: HelmRepository
name: cozystack-system name: cozystack-system
namespace: cozy-system namespace: cozy-system
version: '>= 0.0.0-0' version: '*'
interval: 1m0s interval: 1m0s
timeout: 5m0s timeout: 5m0s
values: values:

View File

@@ -1,5 +0,0 @@
{
"title": "Chart Values",
"type": "object",
"properties": {}
}

View File

@@ -1 +0,0 @@
{}

View File

@@ -16,15 +16,10 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes # This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version. # to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/) # Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.9.0 version: 0.7.0
# This is the version number of the application being deployed. This version number should be # This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to # incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using. # follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes. # It is recommended to use it with quotes.
appVersion: "24.9.2" appVersion: "24.9.2"
dependencies:
- name: cozy-lib
version: 0.1.0
repository: "http://cozystack.cozy-system.svc/repos/library"

View File

@@ -7,10 +7,8 @@ generate:
readme-generator -v values.yaml -s values.schema.json -r README.md readme-generator -v values.yaml -s values.schema.json -r README.md
image: image:
docker buildx build images/clickhouse-backup \ docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/clickhouse-backup \
--provenance false \ --provenance false \
--builder=$(BUILDER) \
--platform=$(PLATFORM) \
--tag $(REGISTRY)/clickhouse-backup:$(call settag,$(CLICKHOUSE_BACKUP_TAG)) \ --tag $(REGISTRY)/clickhouse-backup:$(call settag,$(CLICKHOUSE_BACKUP_TAG)) \
--cache-from type=registry,ref=$(REGISTRY)/clickhouse-backup:latest \ --cache-from type=registry,ref=$(REGISTRY)/clickhouse-backup:latest \
--cache-to type=inline \ --cache-to type=inline \

View File

@@ -1 +0,0 @@
../../../library/cozy-lib

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/clickhouse-backup:0.9.0@sha256:3faf7a4cebf390b9053763107482de175aa0fdb88c1e77424fd81100b1c3a205 ghcr.io/cozystack/cozystack/clickhouse-backup:0.7.0@sha256:3faf7a4cebf390b9053763107482de175aa0fdb88c1e77424fd81100b1c3a205

View File

@@ -11,34 +11,35 @@ These presets are for basic testing and not meant to be used in production
{{ include "resources.preset" (dict "type" "nano") -}} {{ include "resources.preset" (dict "type" "nano") -}}
*/}} */}}
{{- define "resources.preset" -}} {{- define "resources.preset" -}}
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
{{- $presets := dict {{- $presets := dict
"nano" (dict "nano" (dict
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
) )
"micro" (dict "micro" (dict
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
) )
"small" (dict "small" (dict
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
) )
"medium" (dict "medium" (dict
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
) )
"large" (dict "large" (dict
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
) )
"xlarge" (dict "xlarge" (dict
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
) )
"2xlarge" (dict "2xlarge" (dict
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
) )
}} }}
{{- if hasKey $presets .type -}} {{- if hasKey $presets .type -}}

View File

@@ -122,9 +122,9 @@ spec:
- name: clickhouse - name: clickhouse
image: clickhouse/clickhouse-server:24.9.2.42 image: clickhouse/clickhouse-server:24.9.2.42
{{- if .Values.resources }} {{- if .Values.resources }}
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.resources $) | nindent 16 }} resources: {{- toYaml .Values.resources | nindent 16 }}
{{- else if ne .Values.resourcesPreset "none" }} {{- else if ne .Values.resourcesPreset "none" }}
resources: {{- include "cozy-lib.resources.preset" (list .Values.resourcesPreset $) | nindent 16 }} resources: {{- include "resources.preset" (dict "type" .Values.resourcesPreset "Release" .Release) | nindent 16 }}
{{- end }} {{- end }}
volumeMounts: volumeMounts:
- name: data-volume-template - name: data-volume-template

View File

@@ -16,7 +16,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes # This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version. # to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/) # Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.6.0 version: 0.5.0
# This is the version number of the application being deployed. This version number should be # This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to # incremented each time you make changes to the application. Versions are not expected to

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/postgres-backup:0.12.0@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f ghcr.io/cozystack/cozystack/postgres-backup:0.10.0@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f

View File

@@ -11,34 +11,35 @@ These presets are for basic testing and not meant to be used in production
{{ include "resources.preset" (dict "type" "nano") -}} {{ include "resources.preset" (dict "type" "nano") -}}
*/}} */}}
{{- define "resources.preset" -}} {{- define "resources.preset" -}}
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
{{- $presets := dict {{- $presets := dict
"nano" (dict "nano" (dict
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
) )
"micro" (dict "micro" (dict
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
) )
"small" (dict "small" (dict
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
) )
"medium" (dict "medium" (dict
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
) )
"large" (dict "large" (dict
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
) )
"xlarge" (dict "xlarge" (dict
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
) )
"2xlarge" (dict "2xlarge" (dict
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
) )
}} }}
{{- if hasKey $presets .type -}} {{- if hasKey $presets .type -}}

View File

@@ -11,9 +11,6 @@ spec:
{{- $rawConstraints := get $configMap.data "globalAppTopologySpreadConstraints" }} {{- $rawConstraints := get $configMap.data "globalAppTopologySpreadConstraints" }}
{{- if $rawConstraints }} {{- if $rawConstraints }}
{{- $rawConstraints | fromYaml | toYaml | nindent 2 }} {{- $rawConstraints | fromYaml | toYaml | nindent 2 }}
labelSelector:
matchLabels:
cnpg.io/cluster: {{ .Release.Name }}-postgres
{{- end }} {{- end }}
{{- end }} {{- end }}
minSyncReplicas: {{ .Values.quorum.minSyncReplicas }} minSyncReplicas: {{ .Values.quorum.minSyncReplicas }}

View File

@@ -16,7 +16,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes # This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version. # to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/) # Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.5.0 version: 0.4.0
# This is the version number of the application being deployed. This version number should be # This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to # incremented each time you make changes to the application. Versions are not expected to

View File

@@ -6,10 +6,8 @@ include ../../../scripts/package.mk
image: image-nginx image: image-nginx
image-nginx: image-nginx:
docker buildx build images/nginx-cache \ docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/nginx-cache \
--provenance false \ --provenance false \
--builder=$(BUILDER) \
--platform=$(PLATFORM) \
--tag $(REGISTRY)/nginx-cache:$(call settag,$(NGINX_CACHE_TAG)) \ --tag $(REGISTRY)/nginx-cache:$(call settag,$(NGINX_CACHE_TAG)) \
--cache-from type=registry,ref=$(REGISTRY)/nginx-cache:latest \ --cache-from type=registry,ref=$(REGISTRY)/nginx-cache:latest \
--cache-to type=inline \ --cache-to type=inline \

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/nginx-cache:0.5.0@sha256:99cd04f09f80eb0c60cc0b2f6bc8180ada7ada00cb594606447674953dfa1b67 ghcr.io/cozystack/cozystack/nginx-cache:0.4.0@sha256:bef7344da098c4dc400a9e20ffad10ac991df67d09a30026207454abbc91f28b

View File

@@ -1,4 +1,4 @@
FROM ubuntu:22.04 AS stage FROM ubuntu:22.04 as stage
ARG NGINX_VERSION=1.25.3 ARG NGINX_VERSION=1.25.3
ARG IP2LOCATION_C_VERSION=8.6.1 ARG IP2LOCATION_C_VERSION=8.6.1
@@ -9,15 +9,11 @@ ARG FIFTYONEDEGREES_NGINX_VERSION=3.2.21.1
ARG NGINX_CACHE_PURGE_VERSION=2.5.3 ARG NGINX_CACHE_PURGE_VERSION=2.5.3
ARG NGINX_VTS_VERSION=0.2.2 ARG NGINX_VTS_VERSION=0.2.2
ARG TARGETOS
ARG TARGETARCH
# Install required packages for development # Install required packages for development
RUN apt update -q \ RUN apt-get update -q \
&& apt install -yq --no-install-recommends \ && apt-get install -yq \
ca-certificates \
unzip \ unzip \
automake \ autoconf \
build-essential \ build-essential \
libtool \ libtool \
libpcre3 \ libpcre3 \
@@ -72,7 +68,7 @@ RUN checkinstall \
--default \ --default \
--pkgname=ip2location-c \ --pkgname=ip2location-c \
--pkgversion=${IP2LOCATION_C_VERSION} \ --pkgversion=${IP2LOCATION_C_VERSION} \
--pkgarch=${TARGETARCH} \ --pkgarch=amd64 \
--pkggroup=lib \ --pkggroup=lib \
--pkgsource="https://github.com/chrislim2888/IP2Location-C-Library" \ --pkgsource="https://github.com/chrislim2888/IP2Location-C-Library" \
--maintainer="Eduard Generalov <eduard@generalov.net>" \ --maintainer="Eduard Generalov <eduard@generalov.net>" \
@@ -101,7 +97,7 @@ RUN checkinstall \
--default \ --default \
--pkgname=ip2proxy-c \ --pkgname=ip2proxy-c \
--pkgversion=${IP2PROXY_C_VERSION} \ --pkgversion=${IP2PROXY_C_VERSION} \
--pkgarch=${TARGETARCH} \ --pkgarch=amd64 \
--pkggroup=lib \ --pkggroup=lib \
--pkgsource="https://github.com/ip2location/ip2proxy-c" \ --pkgsource="https://github.com/ip2location/ip2proxy-c" \
--maintainer="Eduard Generalov <eduard@generalov.net>" \ --maintainer="Eduard Generalov <eduard@generalov.net>" \
@@ -148,7 +144,7 @@ RUN checkinstall \
--default \ --default \
--pkgname=nginx \ --pkgname=nginx \
--pkgversion=$VERS \ --pkgversion=$VERS \
--pkgarch=${TARGETARCH} \ --pkgarch=amd64 \
--pkggroup=web \ --pkggroup=web \
--provides=nginx \ --provides=nginx \
--requires=ip2location-c,ip2proxy-c,libssl3,libc-bin,libc6,libzstd1,libpcre++0v5,libpcre16-3,libpcre2-8-0,libpcre3,libpcre32-3,libpcrecpp0v5,libmaxminddb0 \ --requires=ip2location-c,ip2proxy-c,libssl3,libc-bin,libc6,libzstd1,libpcre++0v5,libpcre16-3,libpcre2-8-0,libpcre3,libpcre32-3,libpcrecpp0v5,libmaxminddb0 \
@@ -169,9 +165,10 @@ COPY nginx-reloader.sh /usr/bin/nginx-reloader.sh
RUN set -x \ RUN set -x \
&& groupadd --system --gid 101 nginx \ && groupadd --system --gid 101 nginx \
&& useradd --system --gid nginx --no-create-home --home /nonexistent --comment "nginx user" --shell /bin/false --uid 101 nginx \ && useradd --system --gid nginx --no-create-home --home /nonexistent --comment "nginx user" --shell /bin/false --uid 101 nginx \
&& apt update -q \ && apt update \
&& apt install -yq --no-install-recommends --no-install-suggests gnupg1 ca-certificates inotify-tools \ && apt-get install --no-install-recommends --no-install-suggests -y gnupg1 ca-certificates inotify-tools \
&& apt install -y /packages/*.deb \ && apt -y install /packages/*.deb \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* \ && rm -rf /var/lib/apt/lists/* \
&& mkdir -p /var/lib/nginx /var/log/nginx \ && mkdir -p /var/lib/nginx /var/log/nginx \
&& ln -sf /dev/stdout /var/log/nginx/access.log \ && ln -sf /dev/stdout /var/log/nginx/access.log \

View File

@@ -11,34 +11,35 @@ These presets are for basic testing and not meant to be used in production
{{ include "resources.preset" (dict "type" "nano") -}} {{ include "resources.preset" (dict "type" "nano") -}}
*/}} */}}
{{- define "resources.preset" -}} {{- define "resources.preset" -}}
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
{{- $presets := dict {{- $presets := dict
"nano" (dict "nano" (dict
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
) )
"micro" (dict "micro" (dict
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
) )
"small" (dict "small" (dict
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
) )
"medium" (dict "medium" (dict
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
) )
"large" (dict "large" (dict
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
) )
"xlarge" (dict "xlarge" (dict
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
) )
"2xlarge" (dict "2xlarge" (dict
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
) )
}} }}
{{- if hasKey $presets .type -}} {{- if hasKey $presets .type -}}

View File

@@ -16,7 +16,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes # This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version. # to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/) # Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.6.0 version: 0.5.0
# This is the version number of the application being deployed. This version number should be # This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to # incremented each time you make changes to the application. Versions are not expected to

View File

@@ -11,34 +11,35 @@ These presets are for basic testing and not meant to be used in production
{{ include "resources.preset" (dict "type" "nano") -}} {{ include "resources.preset" (dict "type" "nano") -}}
*/}} */}}
{{- define "resources.preset" -}} {{- define "resources.preset" -}}
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
{{- $presets := dict {{- $presets := dict
"nano" (dict "nano" (dict
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
) )
"micro" (dict "micro" (dict
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
) )
"small" (dict "small" (dict
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
) )
"medium" (dict "medium" (dict
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
) )
"large" (dict "large" (dict
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
) )
"xlarge" (dict "xlarge" (dict
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
) )
"2xlarge" (dict "2xlarge" (dict
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
) )
}} }}
{{- if hasKey $presets .type -}} {{- if hasKey $presets .type -}}

View File

@@ -16,10 +16,10 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes # This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version. # to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/) # Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.21.0 version: 0.19.0
# This is the version number of the application being deployed. This version number should be # This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to # incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using. # follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes. # It is recommended to use it with quotes.
appVersion: 1.32.4 appVersion: "1.30.1"

View File

@@ -14,10 +14,8 @@ generate:
image: image-ubuntu-container-disk image-kubevirt-cloud-provider image-kubevirt-csi-driver image-cluster-autoscaler image: image-ubuntu-container-disk image-kubevirt-cloud-provider image-kubevirt-csi-driver image-cluster-autoscaler
image-ubuntu-container-disk: image-ubuntu-container-disk:
docker buildx build images/ubuntu-container-disk \ docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/ubuntu-container-disk \
--provenance false \ --provenance false \
--builder=$(BUILDER) \
--platform=$(PLATFORM) \
--build-arg KUBERNETES_VERSION=${KUBERNETES_VERSION} \ --build-arg KUBERNETES_VERSION=${KUBERNETES_VERSION} \
--tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(KUBERNETES_VERSION)) \ --tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(KUBERNETES_VERSION)) \
--tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(KUBERNETES_VERSION)-$(TAG)) \ --tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(KUBERNETES_VERSION)-$(TAG)) \
@@ -32,10 +30,8 @@ image-ubuntu-container-disk:
rm -f images/ubuntu-container-disk.json rm -f images/ubuntu-container-disk.json
image-kubevirt-cloud-provider: image-kubevirt-cloud-provider:
docker buildx build images/kubevirt-cloud-provider \ docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/kubevirt-cloud-provider \
--provenance false \ --provenance false \
--builder=$(BUILDER) \
--platform=$(PLATFORM) \
--tag $(REGISTRY)/kubevirt-cloud-provider:$(call settag,$(KUBERNETES_PKG_TAG)) \ --tag $(REGISTRY)/kubevirt-cloud-provider:$(call settag,$(KUBERNETES_PKG_TAG)) \
--tag $(REGISTRY)/kubevirt-cloud-provider:$(call settag,$(KUBERNETES_PKG_TAG)-$(TAG)) \ --tag $(REGISTRY)/kubevirt-cloud-provider:$(call settag,$(KUBERNETES_PKG_TAG)-$(TAG)) \
--cache-from type=registry,ref=$(REGISTRY)/kubevirt-cloud-provider:latest \ --cache-from type=registry,ref=$(REGISTRY)/kubevirt-cloud-provider:latest \
@@ -49,10 +45,8 @@ image-kubevirt-cloud-provider:
rm -f images/kubevirt-cloud-provider.json rm -f images/kubevirt-cloud-provider.json
image-kubevirt-csi-driver: image-kubevirt-csi-driver:
docker buildx build images/kubevirt-csi-driver \ docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/kubevirt-csi-driver \
--provenance false \ --provenance false \
--builder=$(BUILDER) \
--platform=$(PLATFORM) \
--tag $(REGISTRY)/kubevirt-csi-driver:$(call settag,$(KUBERNETES_PKG_TAG)) \ --tag $(REGISTRY)/kubevirt-csi-driver:$(call settag,$(KUBERNETES_PKG_TAG)) \
--tag $(REGISTRY)/kubevirt-csi-driver:$(call settag,$(KUBERNETES_PKG_TAG)-$(TAG)) \ --tag $(REGISTRY)/kubevirt-csi-driver:$(call settag,$(KUBERNETES_PKG_TAG)-$(TAG)) \
--cache-from type=registry,ref=$(REGISTRY)/kubevirt-csi-driver:latest \ --cache-from type=registry,ref=$(REGISTRY)/kubevirt-csi-driver:latest \
@@ -67,10 +61,8 @@ image-kubevirt-csi-driver:
image-cluster-autoscaler: image-cluster-autoscaler:
docker buildx build images/cluster-autoscaler \ docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/cluster-autoscaler \
--provenance false \ --provenance false \
--builder=$(BUILDER) \
--platform=$(PLATFORM) \
--tag $(REGISTRY)/cluster-autoscaler:$(call settag,$(KUBERNETES_PKG_TAG)) \ --tag $(REGISTRY)/cluster-autoscaler:$(call settag,$(KUBERNETES_PKG_TAG)) \
--tag $(REGISTRY)/cluster-autoscaler:$(call settag,$(KUBERNETES_PKG_TAG)-$(TAG)) \ --tag $(REGISTRY)/cluster-autoscaler:$(call settag,$(KUBERNETES_PKG_TAG)-$(TAG)) \
--cache-from type=registry,ref=$(REGISTRY)/cluster-autoscaler:latest \ --cache-from type=registry,ref=$(REGISTRY)/cluster-autoscaler:latest \

View File

@@ -1,199 +1,75 @@
# Managed Kubernetes Service # Managed Kubernetes Service
## Managed Kubernetes in Cozystack ## Overview
Whenever you want to deploy a custom containerized application in Cozystack, it's best to deploy it to a managed Kubernetes cluster. The Managed Kubernetes Service offers a streamlined solution for efficiently managing server workloads. Kubernetes has emerged as the industry standard, providing a unified and accessible API, primarily utilizing YAML for configuration. This means that teams can easily understand and work with Kubernetes, streamlining infrastructure management.
Cozystack deploys and manages Kubernetes-as-a-service as standalone applications within each tenants isolated environment. The Kubernetes leverages robust software design patterns, enabling continuous recovery in any scenario through the reconciliation method. Additionally, it ensures seamless scaling across a multitude of servers, addressing the challenges posed by complex and outdated APIs found in traditional virtualization platforms. This managed service eliminates the need for developing custom solutions or modifying source code, saving valuable time and effort.
In Cozystack, such clusters are named tenant Kubernetes clusters, while the base Cozystack cluster is called a management or root cluster.
Tenant clusters are fully separated from the management cluster and are intended for deploying tenant-specific or customer-developed applications.
Within a tenant cluster, users can take advantage of LoadBalancer services and easily provision physical volumes as needed. ## Deployment Details
The control-plane operates within containers, while the worker nodes are deployed as virtual machines, all seamlessly managed by the application.
## Why Use a Managed Kubernetes Cluster? The managed Kubernetes service deploys a standard Kubernetes cluster utilizing the Cluster API, Kamaji as control-plane provicer and the KubeVirt infrastructure provider. This ensures a consistent and reliable setup for workloads.
Kubernetes has emerged as the industry standard, providing a unified and accessible API, primarily utilizing YAML for configuration. Within this cluster, users can take advantage of LoadBalancer services and easily provision physical volumes as needed. The control-plane operates within containers, while the worker nodes are deployed as virtual machines, all seamlessly managed by the application.
This means that teams can easily understand and work with Kubernetes, streamlining infrastructure management.
Kubernetes leverages robust software design patterns, enabling continuous recovery in any scenario through the reconciliation method. - Docs: https://github.com/clastix/kamaji
Additionally, it ensures seamless scaling across a multitude of servers, - Docs: https://cluster-api.sigs.k8s.io/
addressing the challenges posed by complex and outdated APIs found in traditional virtualization platforms. - GitHub: https://github.com/clastix/kamaji
This managed service eliminates the need for developing custom solutions or modifying source code, saving valuable time and effort. - GitHub: https://github.com/kubernetes-sigs/cluster-api-provider-kubevirt
- GitHub: https://github.com/kubevirt/csi-driver
The Managed Kubernetes Service in Cozystack offers a streamlined solution for efficiently managing server workloads.
## Starting Work ## How-Tos
Once the tenant Kubernetes cluster is ready, you can get a kubeconfig file to work with it. How to access to deployed cluster:
It can be done via UI or a `kubectl` request:
- Open the Cozystack dashboard, switch to your tenant, find and open the application page. Copy one of the config files from the **Secrets** section. ```
- Run the following command (using the management cluster kubeconfig): kubectl get secret -n <namespace> kubernetes-<clusterName>-admin-kubeconfig -o go-template='{{ printf "%s\n" (index .data "super-admin.conf" | base64decode) }}' > test
```
```bash
kubectl get secret -n tenant-<name> kubernetes-<clusterName>-admin-kubeconfig -o go-template='{{ printf "%s\n" (index .data "admin.conf" | base64decode) }}' > admin.conf
```
There are several kubeconfig options available:
- `admin.conf` — The standard kubeconfig for accessing your new cluster.
You can create additional Kubernetes users using this configuration.
- `admin.svc` — Same token as `admin.conf`, but with the API server address set to the internal service name.
Use it for applications running inside the cluster that need API access.
- `super-admin.conf` — Similar to `admin.conf`, but with extended administrative permissions.
Intended for troubleshooting and cluster maintenance tasks.
- `super-admin.svc` — Same as `super-admin.conf`, but pointing to the internal API server address.
## Implementation Details
A tenant Kubernetes cluster in Cozystack is essentially Kubernetes-in-Kubernetes.
Deploying it involves the following components:
- **Kamaji Control Plane**: [Kamaji](https://kamaji.clastix.io/) is an open-source project that facilitates the deployment
of Kubernetes control planes as pods within a root cluster.
Each control plane pod includes essential components like `kube-apiserver`, `controller-manager`, and `scheduler`,
allowing for efficient multi-tenancy and resource utilization.
- **Etcd Cluster**: A dedicated etcd cluster is deployed using Ænix's [etcd-operator](https://github.com/aenix-io/etcd-operator).
It provides reliable and scalable key-value storage for the Kubernetes control plane.
- **Worker Nodes**: Virtual Machines are provisioned to serve as worker nodes using KubeVirt.
These nodes are configured to join the tenant Kubernetes cluster, enabling the deployment and management of workloads.
- **Cluster API**: Cozystack is using the [Kubernetes Cluster API](https://cluster-api.sigs.k8s.io/) to provision the components of a cluster.
This architecture ensures isolated, scalable, and efficient tenant Kubernetes environments.
See the reference for components utilized in this service:
- [Kamaji Control Plane](https://kamaji.clastix.io)
- [Kamaji — Cluster API](https://kamaji.clastix.io/cluster-api/)
- [github.com/clastix/kamaji](https://github.com/clastix/kamaji)
- [KubeVirt](https://kubevirt.io/)
- [github.com/kubevirt/kubevirt](https://github.com/kubevirt/kubevirt)
- [github.com/aenix-io/etcd-operator](https://github.com/aenix-io/etcd-operator)
- [Kubernetes Cluster API](https://cluster-api.sigs.k8s.io/)
- [github.com/kubernetes-sigs/cluster-api-provider-kubevirt](https://github.com/kubernetes-sigs/cluster-api-provider-kubevirt)
- [github.com/kubevirt/csi-driver](https://github.com/kubevirt/csi-driver)
## Parameters ## Parameters
### Common Parameters ### Common parameters
| Name | Description | Value | | Name | Description | Value |
| ----------------------- | ----------------------------------------------------------------------------------------------------------------- | ------------ | | ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | ------------ |
| `host` | Hostname used to access the Kubernetes cluster externally. Defaults to `<cluster-name>.<tenant-host>` when empty. | `""` | | `host` | The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host). | `""` |
| `controlPlane.replicas` | Number of replicas for Kubernetes control-plane components. | `2` | | `controlPlane.replicas` | Number of replicas for Kubernetes control-plane components | `2` |
| `storageClass` | StorageClass used to store user data. | `replicated` | | `storageClass` | StorageClass used to store user data | `replicated` |
| `nodeGroups` | nodeGroups configuration | `{}` | | `nodeGroups` | nodeGroups configuration | `{}` |
### Cluster Addons ### Cluster Addons
| Name | Description | Value | | Name | Description | Value |
| --------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | | --------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| `addons.certManager.enabled` | Enable cert-manager, which automatically creates and manages SSL/TLS certificates. | `false` | | `addons.certManager.enabled` | Enables the cert-manager | `false` |
| `addons.certManager.valuesOverride` | Custom values to override | `{}` | | `addons.certManager.valuesOverride` | Custom values to override | `{}` |
| `addons.cilium.valuesOverride` | Custom values to override | `{}` | | `addons.ingressNginx.enabled` | Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role) | `false` |
| `addons.gatewayAPI.enabled` | Enable the Gateway API | `false` | | `addons.ingressNginx.valuesOverride` | Custom values to override | `{}` |
| `addons.ingressNginx.enabled` | Enable the Ingress-NGINX controller (requires nodes labeled with the 'ingress-nginx' role). | `false` | | `addons.ingressNginx.hosts` | List of domain names that should be passed through to the cluster by upper cluster | `[]` |
| `addons.ingressNginx.valuesOverride` | Custom values to override | `{}` | | `addons.gpuOperator.enabled` | Enables the gpu-operator | `false` |
| `addons.ingressNginx.hosts` | List of domain names that the parent cluster should route to this tenant cluster. | `[]` | | `addons.gpuOperator.valuesOverride` | Custom values to override | `{}` |
| `addons.gpuOperator.enabled` | Enable the GPU-operator | `false` | | `addons.fluxcd.enabled` | Enables Flux CD | `false` |
| `addons.gpuOperator.valuesOverride` | Custom values to override | `{}` | | `addons.fluxcd.valuesOverride` | Custom values to override | `{}` |
| `addons.fluxcd.enabled` | Enable FluxCD | `false` | | `addons.monitoringAgents.enabled` | Enables MonitoringAgents (fluentbit, vmagents for sending logs and metrics to storage) if tenant monitoring enabled, send to tenant storage, else to root storage | `false` |
| `addons.fluxcd.valuesOverride` | Custom values to override | `{}` | | `addons.monitoringAgents.valuesOverride` | Custom values to override | `{}` |
| `addons.monitoringAgents.enabled` | Enable monitoring agents (Fluent Bit and VMAgents) to send logs and metrics. If tenant monitoring is enabled, data is sent to tenant storage; otherwise, it goes to root storage. | `false` | | `addons.verticalPodAutoscaler.valuesOverride` | Custom values to override | `{}` |
| `addons.monitoringAgents.valuesOverride` | Custom values to override | `{}` |
| `addons.verticalPodAutoscaler.valuesOverride` | Custom values to override | `{}` |
### Kubernetes Control Plane Configuration ### Kubernetes control plane configuration
| Name | Description | Value | | Name | Description | Value |
| -------------------------------------------------- | ---------------------------------------------------------------------------- | ------- | | -------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| `controlPlane.apiServer.resources` | Explicit CPU/memory resource requests and limits for the API server. | `{}` | | `controlPlane.apiServer.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `small` |
| `controlPlane.apiServer.resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `small` | | `controlPlane.apiServer.resources` | Resources | `{}` |
| `controlPlane.controllerManager.resources` | Explicit CPU/memory resource requests and limits for the controller manager. | `{}` | | `controlPlane.controllerManager.resources` | Resources | `{}` |
| `controlPlane.controllerManager.resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `micro` | | `controlPlane.controllerManager.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` |
| `controlPlane.scheduler.resources` | Explicit CPU/memory resource requests and limits for the scheduler. | `{}` | | `controlPlane.scheduler.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` |
| `controlPlane.scheduler.resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `micro` | | `controlPlane.scheduler.resources` | Resources | `{}` |
| `controlPlane.konnectivity.server.resources` | Explicit CPU/memory resource requests and limits for the Konnectivity. | `{}` | | `controlPlane.konnectivity.server.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` |
| `controlPlane.konnectivity.server.resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `micro` | | `controlPlane.konnectivity.server.resources` | Resources | `{}` |
In production environments, it's recommended to set `resources` explicitly.
Example of `controlPlane.*.resources`:
```yaml ## U Series
resources:
limits:
cpu: 4000m
memory: 4Gi
requests:
cpu: 100m
memory: 512Mi
```
Allowed values for `controlPlane.*.resourcesPreset` are `none`, `nano`, `micro`, `small`, `medium`, `large`, `xlarge`, `2xlarge`.
This value is ignored if the corresponding `resources` value is set.
## Resources Reference
### instanceType Resources
The following instanceType resources are provided by Cozystack:
| Name | vCPUs | Memory |
|---------------|-------|--------|
| `cx1.2xlarge` | 8 | 16Gi |
| `cx1.4xlarge` | 16 | 32Gi |
| `cx1.8xlarge` | 32 | 64Gi |
| `cx1.large` | 2 | 4Gi |
| `cx1.medium` | 1 | 2Gi |
| `cx1.xlarge` | 4 | 8Gi |
| `gn1.2xlarge` | 8 | 32Gi |
| `gn1.4xlarge` | 16 | 64Gi |
| `gn1.8xlarge` | 32 | 128Gi |
| `gn1.xlarge` | 4 | 16Gi |
| `m1.2xlarge` | 8 | 64Gi |
| `m1.4xlarge` | 16 | 128Gi |
| `m1.8xlarge` | 32 | 256Gi |
| `m1.large` | 2 | 16Gi |
| `m1.xlarge` | 4 | 32Gi |
| `n1.2xlarge` | 16 | 32Gi |
| `n1.4xlarge` | 32 | 64Gi |
| `n1.8xlarge` | 64 | 128Gi |
| `n1.large` | 4 | 8Gi |
| `n1.medium` | 4 | 4Gi |
| `n1.xlarge` | 8 | 16Gi |
| `o1.2xlarge` | 8 | 32Gi |
| `o1.4xlarge` | 16 | 64Gi |
| `o1.8xlarge` | 32 | 128Gi |
| `o1.large` | 2 | 8Gi |
| `o1.medium` | 1 | 4Gi |
| `o1.micro` | 1 | 1Gi |
| `o1.nano` | 1 | 512Mi |
| `o1.small` | 1 | 2Gi |
| `o1.xlarge` | 4 | 16Gi |
| `rt1.2xlarge` | 8 | 32Gi |
| `rt1.4xlarge` | 16 | 64Gi |
| `rt1.8xlarge` | 32 | 128Gi |
| `rt1.large` | 2 | 8Gi |
| `rt1.medium` | 1 | 4Gi |
| `rt1.micro` | 1 | 1Gi |
| `rt1.small` | 1 | 2Gi |
| `rt1.xlarge` | 4 | 16Gi |
| `u1.2xlarge` | 8 | 32Gi |
| `u1.2xmedium` | 2 | 4Gi |
| `u1.4xlarge` | 16 | 64Gi |
| `u1.8xlarge` | 32 | 128Gi |
| `u1.large` | 2 | 8Gi |
| `u1.medium` | 1 | 4Gi |
| `u1.micro` | 1 | 1Gi |
| `u1.nano` | 1 | 512Mi |
| `u1.small` | 1 | 2Gi |
| `u1.xlarge` | 4 | 16Gi |
### U Series: Universal
The U Series is quite neutral and provides resources for The U Series is quite neutral and provides resources for
general purpose applications. general purpose applications.
@@ -204,7 +80,7 @@ attitude towards workloads.
VMs of instance types will share physical CPU cores on a VMs of instance types will share physical CPU cores on a
time-slice basis with other VMs. time-slice basis with other VMs.
#### U Series Characteristics ### U Series Characteristics
Specific characteristics of this series are: Specific characteristics of this series are:
- *Burstable CPU performance* - The workload has a baseline compute - *Burstable CPU performance* - The workload has a baseline compute
@@ -213,14 +89,14 @@ Specific characteristics of this series are:
- *vCPU-To-Memory Ratio (1:4)* - A vCPU-to-Memory ratio of 1:4, for less - *vCPU-To-Memory Ratio (1:4)* - A vCPU-to-Memory ratio of 1:4, for less
noise per node. noise per node.
### O Series: Overcommitted ## O Series
The O Series is based on the U Series, with the only difference The O Series is based on the U Series, with the only difference
being that memory is overcommitted. being that memory is overcommitted.
*O* is the abbreviation for "Overcommitted". *O* is the abbreviation for "Overcommitted".
#### O Series Characteristics ### UO Series Characteristics
Specific characteristics of this series are: Specific characteristics of this series are:
- *Burstable CPU performance* - The workload has a baseline compute - *Burstable CPU performance* - The workload has a baseline compute
@@ -231,7 +107,7 @@ Specific characteristics of this series are:
- *vCPU-To-Memory Ratio (1:4)* - A vCPU-to-Memory ratio of 1:4, for less - *vCPU-To-Memory Ratio (1:4)* - A vCPU-to-Memory ratio of 1:4, for less
noise per node. noise per node.
### CX Series: Compute Exclusive ## CX Series
The CX Series provides exclusive compute resources for compute The CX Series provides exclusive compute resources for compute
intensive applications. intensive applications.
@@ -245,7 +121,7 @@ the IO threading from cores dedicated to the workload.
In addition, in this series, the NUMA topology of the used In addition, in this series, the NUMA topology of the used
cores is provided to the VM. cores is provided to the VM.
#### CX Series Characteristics ### CX Series Characteristics
Specific characteristics of this series are: Specific characteristics of this series are:
- *Hugepages* - Hugepages are used in order to improve memory - *Hugepages* - Hugepages are used in order to improve memory
@@ -260,14 +136,14 @@ Specific characteristics of this series are:
optimize guest sided cache utilization. optimize guest sided cache utilization.
- *vCPU-To-Memory Ratio (1:2)* - A vCPU-to-Memory ratio of 1:2. - *vCPU-To-Memory Ratio (1:2)* - A vCPU-to-Memory ratio of 1:2.
### M Series: Memory ## M Series
The M Series provides resources for memory intensive The M Series provides resources for memory intensive
applications. applications.
*M* is the abbreviation of "Memory". *M* is the abbreviation of "Memory".
#### M Series Characteristics ### M Series Characteristics
Specific characteristics of this series are: Specific characteristics of this series are:
- *Hugepages* - Hugepages are used in order to improve memory - *Hugepages* - Hugepages are used in order to improve memory
@@ -278,7 +154,7 @@ Specific characteristics of this series are:
- *vCPU-To-Memory Ratio (1:8)* - A vCPU-to-Memory ratio of 1:8, for much - *vCPU-To-Memory Ratio (1:8)* - A vCPU-to-Memory ratio of 1:8, for much
less noise per node. less noise per node.
### RT Series: RealTime ## RT Series
The RT Series provides resources for realtime applications, like Oslat. The RT Series provides resources for realtime applications, like Oslat.
@@ -287,7 +163,7 @@ The RT Series provides resources for realtime applications, like Oslat.
This series of instance types requires nodes capable of running This series of instance types requires nodes capable of running
realtime applications. realtime applications.
#### RT Series Characteristics ### RT Series Characteristics
Specific characteristics of this series are: Specific characteristics of this series are:
- *Hugepages* - Hugepages are used in order to improve memory - *Hugepages* - Hugepages are used in order to improve memory
@@ -301,3 +177,57 @@ Specific characteristics of this series are:
- *vCPU-To-Memory Ratio (1:4)* - A vCPU-to-Memory ratio of 1:4 starting from - *vCPU-To-Memory Ratio (1:4)* - A vCPU-to-Memory ratio of 1:4 starting from
the medium size. the medium size.
## Resources
The following instancetype resources are provided by Cozystack:
Name | vCPUs | Memory
-----|-------|-------
cx1.2xlarge | 8 | 16Gi
cx1.4xlarge | 16 | 32Gi
cx1.8xlarge | 32 | 64Gi
cx1.large | 2 | 4Gi
cx1.medium | 1 | 2Gi
cx1.xlarge | 4 | 8Gi
gn1.2xlarge | 8 | 32Gi
gn1.4xlarge | 16 | 64Gi
gn1.8xlarge | 32 | 128Gi
gn1.xlarge | 4 | 16Gi
m1.2xlarge | 8 | 64Gi
m1.4xlarge | 16 | 128Gi
m1.8xlarge | 32 | 256Gi
m1.large | 2 | 16Gi
m1.xlarge | 4 | 32Gi
n1.2xlarge | 16 | 32Gi
n1.4xlarge | 32 | 64Gi
n1.8xlarge | 64 | 128Gi
n1.large | 4 | 8Gi
n1.medium | 4 | 4Gi
n1.xlarge | 8 | 16Gi
o1.2xlarge | 8 | 32Gi
o1.4xlarge | 16 | 64Gi
o1.8xlarge | 32 | 128Gi
o1.large | 2 | 8Gi
o1.medium | 1 | 4Gi
o1.micro | 1 | 1Gi
o1.nano | 1 | 512Mi
o1.small | 1 | 2Gi
o1.xlarge | 4 | 16Gi
rt1.2xlarge | 8 | 32Gi
rt1.4xlarge | 16 | 64Gi
rt1.8xlarge | 32 | 128Gi
rt1.large | 2 | 8Gi
rt1.medium | 1 | 4Gi
rt1.micro | 1 | 1Gi
rt1.small | 1 | 2Gi
rt1.xlarge | 4 | 16Gi
u1.2xlarge | 8 | 32Gi
u1.2xmedium | 2 | 4Gi
u1.4xlarge | 16 | 64Gi
u1.8xlarge | 32 | 128Gi
u1.large | 2 | 8Gi
u1.medium | 1 | 4Gi
u1.micro | 1 | 1Gi
u1.nano | 1 | 512Mi
u1.small | 1 | 2Gi
u1.xlarge | 4 | 16Gi

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.21.0@sha256:7315850634728a5864a3de3150c12f0e1454f3f1ce33cdf21a278f57611dd5e9 ghcr.io/cozystack/cozystack/cluster-autoscaler:0.18.0@sha256:85371c6aabf5a7fea2214556deac930c600e362f92673464fe2443784e2869c3

View File

@@ -1,14 +1,7 @@
# Source: https://raw.githubusercontent.com/kubernetes/autoscaler/refs/heads/master/cluster-autoscaler/Dockerfile.amd64 # Source: https://raw.githubusercontent.com/kubernetes/autoscaler/refs/heads/master/cluster-autoscaler/Dockerfile.amd64
ARG builder_image=docker.io/library/golang:1.23.4 ARG builder_image=docker.io/library/golang:1.23.4
ARG BASEIMAGE=gcr.io/distroless/static:nonroot-${TARGETARCH} ARG BASEIMAGE=gcr.io/distroless/static:nonroot-amd64
FROM ${builder_image} AS builder FROM ${builder_image} AS builder
ARG TARGETOS
ARG TARGETARCH
ENV GOOS=$TARGETOS
ENV GOARCH=$TARGETARCH
RUN git clone https://github.com/kubernetes/autoscaler /src/autoscaler \ RUN git clone https://github.com/kubernetes/autoscaler /src/autoscaler \
&& cd /src/autoscaler/cluster-autoscaler \ && cd /src/autoscaler/cluster-autoscaler \
&& git checkout cluster-autoscaler-1.32.0 && git checkout cluster-autoscaler-1.32.0
@@ -21,8 +14,6 @@ RUN make build
FROM $BASEIMAGE FROM $BASEIMAGE
LABEL maintainer="Marcin Wielgus <mwielgus@google.com>" LABEL maintainer="Marcin Wielgus <mwielgus@google.com>"
ARG TARGETARCH COPY --from=builder /src/autoscaler/cluster-autoscaler/cluster-autoscaler-amd64 /cluster-autoscaler
COPY --from=builder /src/autoscaler/cluster-autoscaler/cluster-autoscaler-${TARGETARCH} /cluster-autoscaler
WORKDIR / WORKDIR /
CMD ["/cluster-autoscaler"] CMD ["/cluster-autoscaler"]

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.21.0@sha256:6962bdf51ab2ff40b420b9cff7c850aeea02187da2a65a67f10e0471744649d7 ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.18.0@sha256:795d8e1ef4b2b0df2aa1e09d96cd13476ebb545b4bf4b5779b7547a70ef64cf9

View File

@@ -1,10 +1,5 @@
# Source: https://github.com/kubevirt/cloud-provider-kubevirt/blob/main/build/images/kubevirt-cloud-controller-manager/Dockerfile # Source: https://github.com/kubevirt/cloud-provider-kubevirt/blob/main/build/images/kubevirt-cloud-controller-manager/Dockerfile
FROM golang:1.20.6 AS builder FROM --platform=linux/amd64 golang:1.20.6 AS builder
ARG TARGETOS
ARG TARGETARCH
ENV GOOS=$TARGETOS
ENV GOARCH=$TARGETARCH
RUN git clone https://github.com/kubevirt/cloud-provider-kubevirt /go/src/kubevirt.io/cloud-provider-kubevirt \ RUN git clone https://github.com/kubevirt/cloud-provider-kubevirt /go/src/kubevirt.io/cloud-provider-kubevirt \
&& cd /go/src/kubevirt.io/cloud-provider-kubevirt \ && cd /go/src/kubevirt.io/cloud-provider-kubevirt \
@@ -19,7 +14,7 @@ RUN go get 'k8s.io/endpointslice/util@v0.28' 'k8s.io/apiserver@v0.28'
RUN go mod tidy RUN go mod tidy
RUN go mod vendor RUN go mod vendor
RUN CGO_ENABLED=0 go build -mod=vendor -ldflags="-s -w" -o bin/kubevirt-cloud-controller-manager ./cmd/kubevirt-cloud-controller-manager RUN CGO_ENABLED=0 GOOS=linux go build -mod=vendor -ldflags="-s -w" -o bin/kubevirt-cloud-controller-manager ./cmd/kubevirt-cloud-controller-manager
FROM registry.access.redhat.com/ubi9/ubi-micro FROM registry.access.redhat.com/ubi9/ubi-micro
COPY --from=builder /go/src/kubevirt.io/cloud-provider-kubevirt/bin/kubevirt-cloud-controller-manager /bin/kubevirt-cloud-controller-manager COPY --from=builder /go/src/kubevirt.io/cloud-provider-kubevirt/bin/kubevirt-cloud-controller-manager /bin/kubevirt-cloud-controller-manager

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.21.0@sha256:b1525163cd21938ac934bb1b860f2f3151464fa463b82880ab058167aeaf3e29 ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.18.0@sha256:6f9091c3e7e4951c5e43fdafd505705fcc9f1ead290ee3ae42e97e9ec2b87b20

View File

@@ -5,11 +5,6 @@ RUN git clone https://github.com/kubevirt/csi-driver /src/kubevirt-csi-driver \
&& cd /src/kubevirt-csi-driver \ && cd /src/kubevirt-csi-driver \
&& git checkout 35836e0c8b68d9916d29a838ea60cdd3fc6199cf && git checkout 35836e0c8b68d9916d29a838ea60cdd3fc6199cf
ARG TARGETOS
ARG TARGETARCH
ENV GOOS=$TARGETOS
ENV GOARCH=$TARGETARCH
WORKDIR /src/kubevirt-csi-driver WORKDIR /src/kubevirt-csi-driver
RUN make build RUN make build

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/ubuntu-container-disk:v1.32@sha256:bfe568db4b768a4b6c67a8d562892bbba766d0245e140d431754589b347f0b41 ghcr.io/cozystack/cozystack/ubuntu-container-disk:v1.30.1@sha256:07392e7a87a3d4ef1c86c1b146e6c5de5c2b524aed5a53bf48870dc8a296f99a

View File

@@ -1,5 +1,5 @@
# TODO: Here we use ubuntu:22.04, as guestfish has some network issues running in ubuntu:24.04 # TODO: Here we use ubuntu:22.04, as guestfish has some network issues running in ubuntu:24.04
FROM ubuntu:22.04 AS guestfish FROM ubuntu:22.04 as guestfish
ARG DEBIAN_FRONTEND=noninteractive ARG DEBIAN_FRONTEND=noninteractive
RUN apt-get update \ RUN apt-get update \
@@ -8,17 +8,15 @@ RUN apt-get update \
linux-image-generic \ linux-image-generic \
wget \ wget \
make \ make \
bash-completion bash-completion \
&& apt-get clean
WORKDIR /build WORKDIR /build
FROM guestfish AS builder FROM guestfish as builder
ARG TARGETOS
ARG TARGETARCH
# noble is a code name for the Ubuntu 24.04 LTS release # noble is a code name for the Ubuntu 24.04 LTS release
RUN wget -O image.img https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-${TARGETARCH}.img --show-progress --output-file /dev/stdout --progress=dot:giga 2>/dev/null RUN wget -O image.img https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img --show-progress --output-file /dev/stdout --progress=dot:giga 2>/dev/null
ARG KUBERNETES_VERSION ARG KUBERNETES_VERSION
@@ -31,21 +29,19 @@ RUN qemu-img resize image.img 5G \
&& guestfish --remote command "resize2fs /dev/sda1" \ && guestfish --remote command "resize2fs /dev/sda1" \
# docker repo # docker repo
&& guestfish --remote sh "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg" \ && guestfish --remote sh "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg" \
&& guestfish --remote sh 'echo "deb [signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list' \ && guestfish --remote sh 'echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list' \
# kubernetes repo # kubernetes repo
&& guestfish --remote sh "curl -fsSL https://pkgs.k8s.io/core:/stable:/${KUBERNETES_VERSION}/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg" \ && guestfish --remote sh "curl -fsSL https://pkgs.k8s.io/core:/stable:/${KUBERNETES_VERSION}/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg" \
&& guestfish --remote sh "echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/${KUBERNETES_VERSION}/deb/ /' | tee /etc/apt/sources.list.d/kubernetes.list" \ && guestfish --remote sh "echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/${KUBERNETES_VERSION}/deb/ /' | tee /etc/apt/sources.list.d/kubernetes.list" \
&& guestfish --remote command "apt-get check -q" \
# install containerd # install containerd
&& guestfish --remote command "apt-get update -q" \ && guestfish --remote command "apt-get update -y" \
&& guestfish --remote command "apt-get install -yq containerd.io" \ && guestfish --remote command "apt-get install -y containerd.io" \
# configure containerd # configure containerd
&& guestfish --remote command "mkdir -p /etc/containerd" \ && guestfish --remote command "mkdir -p /etc/containerd" \
&& guestfish --remote sh "containerd config default | tee /etc/containerd/config.toml" \ && guestfish --remote sh "containerd config default | tee /etc/containerd/config.toml" \
&& guestfish --remote command "sed -i '/SystemdCgroup/ s/=.*/= true/' /etc/containerd/config.toml" \ && guestfish --remote command "sed -i '/SystemdCgroup/ s/=.*/= true/' /etc/containerd/config.toml" \
&& guestfish --remote command "containerd config dump >/dev/null" \
# install kubernetes # install kubernetes
&& guestfish --remote command "apt-get install -yq kubelet kubeadm" \ && guestfish --remote command "apt-get install -y kubelet kubeadm" \
# clean apt cache # clean apt cache
&& guestfish --remote sh 'apt-get clean && rm -rf /var/lib/apt/lists/*' \ && guestfish --remote sh 'apt-get clean && rm -rf /var/lib/apt/lists/*' \
# write system configuration # write system configuration

View File

@@ -11,34 +11,35 @@ These presets are for basic testing and not meant to be used in production
{{ include "resources.preset" (dict "type" "nano") -}} {{ include "resources.preset" (dict "type" "nano") -}}
*/}} */}}
{{- define "resources.preset" -}} {{- define "resources.preset" -}}
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
{{- $presets := dict {{- $presets := dict
"nano" (dict "nano" (dict
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
) )
"micro" (dict "micro" (dict
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
) )
"small" (dict "small" (dict
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
) )
"medium" (dict "medium" (dict
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
) )
"large" (dict "large" (dict
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
) )
"xlarge" (dict "xlarge" (dict
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
) )
"2xlarge" (dict "2xlarge" (dict
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
) )
}} }}
{{- if hasKey $presets .type -}} {{- if hasKey $presets .type -}}

View File

@@ -31,16 +31,6 @@ spec:
{{- end }} {{- end }}
cluster.x-k8s.io/deployment-name: {{ $.Release.Name }}-{{ .groupName }} cluster.x-k8s.io/deployment-name: {{ $.Release.Name }}-{{ .groupName }}
spec: spec:
{{- $configMap := lookup "v1" "ConfigMap" "cozy-system" "cozystack-scheduling" }}
{{- if $configMap }}
{{- $rawConstraints := get $configMap.data "globalAppTopologySpreadConstraints" }}
{{- if $rawConstraints }}
{{- $rawConstraints | fromYaml | toYaml | nindent 10 }}
labelSelector:
matchLabels:
cluster.x-k8s.io/cluster-name: {{ $.Release.Name }}
{{- end }}
{{- end }}
domain: domain:
{{- if and .group.resources .group.resources.cpu }} {{- if and .group.resources .group.resources.cpu }}
cpu: cpu:
@@ -160,14 +150,14 @@ spec:
ingress: ingress:
extraAnnotations: extraAnnotations:
nginx.ingress.kubernetes.io/ssl-passthrough: "true" nginx.ingress.kubernetes.io/ssl-passthrough: "true"
hostname: {{ .Values.host | default (printf "%s.%s" .Release.Name $host) }}:443 hostname: {{ .Values.host | default (printf "%s.%s" .Release.Name $host) }}
className: "{{ $ingress }}" className: "{{ $ingress }}"
deployment: deployment:
podAdditionalMetadata: podAdditionalMetadata:
labels: labels:
policy.cozystack.io/allow-to-etcd: "true" policy.cozystack.io/allow-to-etcd: "true"
replicas: 2 replicas: 2
version: {{ $.Chart.AppVersion }} version: 1.30.1
--- ---
apiVersion: cozystack.io/v1alpha1 apiVersion: cozystack.io/v1alpha1
kind: WorkloadMonitor kind: WorkloadMonitor
@@ -293,7 +283,7 @@ spec:
kind: KubevirtMachineTemplate kind: KubevirtMachineTemplate
name: {{ $.Release.Name }}-{{ $groupName }}-{{ $kubevirtmachinetemplateHash }} name: {{ $.Release.Name }}-{{ $groupName }}-{{ $kubevirtmachinetemplateHash }}
namespace: {{ $.Release.Namespace }} namespace: {{ $.Release.Namespace }}
version: v{{ $.Chart.AppVersion }} version: v1.32.3
--- ---
apiVersion: cluster.x-k8s.io/v1beta1 apiVersion: cluster.x-k8s.io/v1beta1
kind: MachineHealthCheck kind: MachineHealthCheck

View File

@@ -16,7 +16,6 @@ spec:
kind: HelmRepository kind: HelmRepository
name: cozystack-system name: cozystack-system
namespace: cozy-system namespace: cozy-system
version: '>= 0.0.0-0'
kubeConfig: kubeConfig:
secretRef: secretRef:
name: {{ .Release.Name }}-admin-kubeconfig name: {{ .Release.Name }}-admin-kubeconfig

View File

@@ -17,7 +17,6 @@ spec:
kind: HelmRepository kind: HelmRepository
name: cozystack-system name: cozystack-system
namespace: cozy-system namespace: cozy-system
version: '>= 0.0.0-0'
kubeConfig: kubeConfig:
secretRef: secretRef:
name: {{ .Release.Name }}-admin-kubeconfig name: {{ .Release.Name }}-admin-kubeconfig

View File

@@ -1,18 +1,3 @@
{{- define "cozystack.defaultCiliumValues" -}}
cilium:
k8sServiceHost: {{ .Release.Name }}.{{ .Release.Namespace }}.svc
k8sServicePort: 6443
routingMode: tunnel
enableIPv4Masquerade: true
ipv4NativeRoutingCIDR: ""
{{- if $.Values.addons.gatewayAPI.enabled }}
gatewayAPI:
enabled: true
envoy:
enabled: true
{{- end }}
{{- end }}
apiVersion: helm.toolkit.fluxcd.io/v2 apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease kind: HelmRelease
metadata: metadata:
@@ -31,7 +16,6 @@ spec:
kind: HelmRepository kind: HelmRepository
name: cozystack-system name: cozystack-system
namespace: cozy-system namespace: cozy-system
version: '>= 0.0.0-0'
kubeConfig: kubeConfig:
secretRef: secretRef:
name: {{ .Release.Name }}-admin-kubeconfig name: {{ .Release.Name }}-admin-kubeconfig
@@ -46,13 +30,14 @@ spec:
remediation: remediation:
retries: -1 retries: -1
values: values:
{{- toYaml (deepCopy .Values.addons.cilium.valuesOverride | mergeOverwrite (fromYaml (include "cozystack.defaultCiliumValues" .))) | nindent 4 }} cilium:
k8sServiceHost: {{ .Release.Name }}.{{ .Release.Namespace }}.svc
k8sServicePort: 6443
routingMode: tunnel
enableIPv4Masquerade: true
ipv4NativeRoutingCIDR: ""
dependsOn: dependsOn:
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }} {{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
- name: {{ .Release.Name }} - name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }} namespace: {{ .Release.Namespace }}
{{- end }} {{- end }}
{{- if $.Values.addons.gatewayAPI.enabled }}
- name: {{ .Release.Name }}-gateway-api-crds
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@@ -16,7 +16,6 @@ spec:
kind: HelmRepository kind: HelmRepository
name: cozystack-system name: cozystack-system
namespace: cozy-system namespace: cozy-system
version: '>= 0.0.0-0'
kubeConfig: kubeConfig:
secretRef: secretRef:
name: {{ .Release.Name }}-admin-kubeconfig name: {{ .Release.Name }}-admin-kubeconfig

View File

@@ -30,7 +30,6 @@ spec:
patch patch
helmrelease helmrelease
{{ .Release.Name }}-cilium {{ .Release.Name }}-cilium
{{ .Release.Name }}-gateway-api-crds
{{ .Release.Name }}-csi {{ .Release.Name }}-csi
{{ .Release.Name }}-cert-manager {{ .Release.Name }}-cert-manager
{{ .Release.Name }}-cert-manager-crds {{ .Release.Name }}-cert-manager-crds

View File

@@ -17,7 +17,6 @@ spec:
kind: HelmRepository kind: HelmRepository
name: cozystack-system name: cozystack-system
namespace: cozy-system namespace: cozy-system
version: '>= 0.0.0-0'
kubeConfig: kubeConfig:
secretRef: secretRef:
name: {{ .Release.Name }}-admin-kubeconfig name: {{ .Release.Name }}-admin-kubeconfig
@@ -62,7 +61,6 @@ spec:
kind: HelmRepository kind: HelmRepository
name: cozystack-system name: cozystack-system
namespace: cozy-system namespace: cozy-system
version: '>= 0.0.0-0'
kubeConfig: kubeConfig:
secretRef: secretRef:
name: {{ .Release.Name }}-kubeconfig name: {{ .Release.Name }}-kubeconfig

View File

@@ -1,38 +0,0 @@
{{- if $.Values.addons.gatewayAPI.enabled }}
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: {{ .Release.Name }}-gateway-api-crds
labels:
cozystack.io/repository: system
cozystack.io/target-cluster-name: {{ .Release.Name }}
spec:
interval: 5m
releaseName: gateway-api-crds
chart:
spec:
chart: cozy-gateway-api-crds
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
kubeConfig:
secretRef:
name: {{ .Release.Name }}-admin-kubeconfig
key: super-admin.svc
targetNamespace: kube-system
storageNamespace: kube-system
install:
createNamespace: false
remediation:
retries: -1
upgrade:
remediation:
retries: -1
dependsOn:
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
- name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
{{- end }}
{{- end }}

View File

@@ -17,7 +17,6 @@ spec:
kind: HelmRepository kind: HelmRepository
name: cozystack-system name: cozystack-system
namespace: cozy-system namespace: cozy-system
version: '>= 0.0.0-0'
kubeConfig: kubeConfig:
secretRef: secretRef:
name: {{ .Release.Name }}-admin-kubeconfig name: {{ .Release.Name }}-admin-kubeconfig

View File

@@ -6,11 +6,6 @@ ingress-nginx:
hostNetwork: true hostNetwork: true
service: service:
enabled: false enabled: false
{{- if not .Values.addons.certManager.enabled }}
admissionWebhooks:
certManager:
enabled: false
{{- end }}
nodeSelector: nodeSelector:
node-role.kubernetes.io/ingress-nginx: "" node-role.kubernetes.io/ingress-nginx: ""
{{- end }} {{- end }}
@@ -34,7 +29,6 @@ spec:
kind: HelmRepository kind: HelmRepository
name: cozystack-system name: cozystack-system
namespace: cozy-system namespace: cozy-system
version: '>= 0.0.0-0'
kubeConfig: kubeConfig:
secretRef: secretRef:
name: {{ .Release.Name }}-admin-kubeconfig name: {{ .Release.Name }}-admin-kubeconfig

View File

@@ -19,7 +19,6 @@ spec:
kind: HelmRepository kind: HelmRepository
name: cozystack-system name: cozystack-system
namespace: cozy-system namespace: cozy-system
version: '>= 0.0.0-0'
kubeConfig: kubeConfig:
secretRef: secretRef:
name: {{ .Release.Name }}-admin-kubeconfig name: {{ .Release.Name }}-admin-kubeconfig

View File

@@ -17,7 +17,6 @@ spec:
kind: HelmRepository kind: HelmRepository
name: cozystack-system name: cozystack-system
namespace: cozy-system namespace: cozy-system
version: '>= 0.0.0-0'
kubeConfig: kubeConfig:
secretRef: secretRef:
name: {{ .Release.Name }}-admin-kubeconfig name: {{ .Release.Name }}-admin-kubeconfig

View File

@@ -42,7 +42,6 @@ spec:
kind: HelmRepository kind: HelmRepository
name: cozystack-system name: cozystack-system
namespace: cozy-system namespace: cozy-system
version: '>= 0.0.0-0'
kubeConfig: kubeConfig:
secretRef: secretRef:
name: {{ .Release.Name }}-admin-kubeconfig name: {{ .Release.Name }}-admin-kubeconfig

View File

@@ -17,7 +17,6 @@ spec:
kind: HelmRepository kind: HelmRepository
name: cozystack-system name: cozystack-system
namespace: cozy-system namespace: cozy-system
version: '>= 0.0.0-0'
kubeConfig: kubeConfig:
secretRef: secretRef:
name: {{ .Release.Name }}-admin-kubeconfig name: {{ .Release.Name }}-admin-kubeconfig

View File

@@ -4,7 +4,7 @@
"properties": { "properties": {
"host": { "host": {
"type": "string", "type": "string",
"description": "Hostname used to access the Kubernetes cluster externally. Defaults to `<cluster-name>.<tenant-host>` when empty.", "description": "The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host).",
"default": "" "default": ""
}, },
"controlPlane": { "controlPlane": {
@@ -12,20 +12,15 @@
"properties": { "properties": {
"replicas": { "replicas": {
"type": "number", "type": "number",
"description": "Number of replicas for Kubernetes control-plane components.", "description": "Number of replicas for Kubernetes control-plane components",
"default": 2 "default": 2
}, },
"apiServer": { "apiServer": {
"type": "object", "type": "object",
"properties": { "properties": {
"resources": {
"type": "object",
"description": "Explicit CPU/memory resource requests and limits for the API server.",
"default": {}
},
"resourcesPreset": { "resourcesPreset": {
"type": "string", "type": "string",
"description": "Use a common resources preset when `resources` is not set explicitly.", "description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
"default": "small", "default": "small",
"enum": [ "enum": [
"none", "none",
@@ -37,6 +32,11 @@
"xlarge", "xlarge",
"2xlarge" "2xlarge"
] ]
},
"resources": {
"type": "object",
"description": "Resources",
"default": {}
} }
} }
}, },
@@ -45,12 +45,12 @@
"properties": { "properties": {
"resources": { "resources": {
"type": "object", "type": "object",
"description": "Explicit CPU/memory resource requests and limits for the controller manager.", "description": "Resources",
"default": {} "default": {}
}, },
"resourcesPreset": { "resourcesPreset": {
"type": "string", "type": "string",
"description": "Use a common resources preset when `resources` is not set explicitly.", "description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
"default": "micro", "default": "micro",
"enum": [ "enum": [
"none", "none",
@@ -68,14 +68,9 @@
"scheduler": { "scheduler": {
"type": "object", "type": "object",
"properties": { "properties": {
"resources": {
"type": "object",
"description": "Explicit CPU/memory resource requests and limits for the scheduler.",
"default": {}
},
"resourcesPreset": { "resourcesPreset": {
"type": "string", "type": "string",
"description": "Use a common resources preset when `resources` is not set explicitly.", "description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
"default": "micro", "default": "micro",
"enum": [ "enum": [
"none", "none",
@@ -87,6 +82,11 @@
"xlarge", "xlarge",
"2xlarge" "2xlarge"
] ]
},
"resources": {
"type": "object",
"description": "Resources",
"default": {}
} }
} }
}, },
@@ -96,14 +96,9 @@
"server": { "server": {
"type": "object", "type": "object",
"properties": { "properties": {
"resources": {
"type": "object",
"description": "Explicit CPU/memory resource requests and limits for the Konnectivity.",
"default": {}
},
"resourcesPreset": { "resourcesPreset": {
"type": "string", "type": "string",
"description": "Use a common resources preset when `resources` is not set explicitly.", "description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
"default": "micro", "default": "micro",
"enum": [ "enum": [
"none", "none",
@@ -115,6 +110,11 @@
"xlarge", "xlarge",
"2xlarge" "2xlarge"
] ]
},
"resources": {
"type": "object",
"description": "Resources",
"default": {}
} }
} }
} }
@@ -124,7 +124,7 @@
}, },
"storageClass": { "storageClass": {
"type": "string", "type": "string",
"description": "StorageClass used to store user data.", "description": "StorageClass used to store user data",
"default": "replicated" "default": "replicated"
}, },
"addons": { "addons": {
@@ -135,7 +135,7 @@
"properties": { "properties": {
"enabled": { "enabled": {
"type": "boolean", "type": "boolean",
"description": "Enable cert-manager, which automatically creates and manages SSL/TLS certificates.", "description": "Enables the cert-manager",
"default": false "default": false
}, },
"valuesOverride": { "valuesOverride": {
@@ -145,32 +145,12 @@
} }
} }
}, },
"cilium": {
"type": "object",
"properties": {
"valuesOverride": {
"type": "object",
"description": "Custom values to override",
"default": {}
}
}
},
"gatewayAPI": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean",
"description": "Enable the Gateway API",
"default": false
}
}
},
"ingressNginx": { "ingressNginx": {
"type": "object", "type": "object",
"properties": { "properties": {
"enabled": { "enabled": {
"type": "boolean", "type": "boolean",
"description": "Enable the Ingress-NGINX controller (requires nodes labeled with the 'ingress-nginx' role).", "description": "Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role)",
"default": false "default": false
}, },
"valuesOverride": { "valuesOverride": {
@@ -180,7 +160,7 @@
}, },
"hosts": { "hosts": {
"type": "array", "type": "array",
"description": "List of domain names that the parent cluster should route to this tenant cluster.", "description": "List of domain names that should be passed through to the cluster by upper cluster",
"default": [], "default": [],
"items": {} "items": {}
} }
@@ -191,7 +171,7 @@
"properties": { "properties": {
"enabled": { "enabled": {
"type": "boolean", "type": "boolean",
"description": "Enable the GPU-operator", "description": "Enables the gpu-operator",
"default": false "default": false
}, },
"valuesOverride": { "valuesOverride": {
@@ -206,7 +186,7 @@
"properties": { "properties": {
"enabled": { "enabled": {
"type": "boolean", "type": "boolean",
"description": "Enable FluxCD", "description": "Enables Flux CD",
"default": false "default": false
}, },
"valuesOverride": { "valuesOverride": {
@@ -221,7 +201,7 @@
"properties": { "properties": {
"enabled": { "enabled": {
"type": "boolean", "type": "boolean",
"description": "Enable monitoring agents (Fluent Bit and VMAgents) to send logs and metrics. If tenant monitoring is enabled, data is sent to tenant storage; otherwise, it goes to root storage.", "description": "Enables MonitoringAgents (fluentbit, vmagents for sending logs and metrics to storage) if tenant monitoring enabled, send to tenant storage, else to root storage",
"default": false "default": false
}, },
"valuesOverride": { "valuesOverride": {

View File

@@ -1,8 +1,8 @@
## @section Common Parameters ## @section Common parameters
## @param host Hostname used to access the Kubernetes cluster externally. Defaults to `<cluster-name>.<tenant-host>` when empty. ## @param host The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host).
## @param controlPlane.replicas Number of replicas for Kubernetes control-plane components. ## @param controlPlane.replicas Number of replicas for Kubernetes control-plane components
## @param storageClass StorageClass used to store user data. ## @param storageClass StorageClass used to store user data
## ##
host: "" host: ""
storageClass: replicated storageClass: replicated
@@ -37,31 +37,19 @@ addons:
## Cert-manager: automatically creates and manages SSL/TLS certificate ## Cert-manager: automatically creates and manages SSL/TLS certificate
## ##
certManager: certManager:
## @param addons.certManager.enabled Enable cert-manager, which automatically creates and manages SSL/TLS certificates. ## @param addons.certManager.enabled Enables the cert-manager
## @param addons.certManager.valuesOverride Custom values to override ## @param addons.certManager.valuesOverride Custom values to override
enabled: false enabled: false
valuesOverride: {} valuesOverride: {}
## Cilium CNI plugin
##
cilium:
## @param addons.cilium.valuesOverride Custom values to override
valuesOverride: {}
## Gateway API
##
gatewayAPI:
## @param addons.gatewayAPI.enabled Enable the Gateway API
enabled: false
## Ingress-NGINX Controller ## Ingress-NGINX Controller
## ##
ingressNginx: ingressNginx:
## @param addons.ingressNginx.enabled Enable the Ingress-NGINX controller (requires nodes labeled with the 'ingress-nginx' role). ## @param addons.ingressNginx.enabled Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role)
## @param addons.ingressNginx.valuesOverride Custom values to override ## @param addons.ingressNginx.valuesOverride Custom values to override
## ##
enabled: false enabled: false
## @param addons.ingressNginx.hosts List of domain names that the parent cluster should route to this tenant cluster. ## @param addons.ingressNginx.hosts List of domain names that should be passed through to the cluster by upper cluster
## e.g: ## e.g:
## hosts: ## hosts:
## - example.org ## - example.org
@@ -73,7 +61,7 @@ addons:
## GPU-operator: NVIDIA GPU Operator ## GPU-operator: NVIDIA GPU Operator
## ##
gpuOperator: gpuOperator:
## @param addons.gpuOperator.enabled Enable the GPU-operator ## @param addons.gpuOperator.enabled Enables the gpu-operator
## @param addons.gpuOperator.valuesOverride Custom values to override ## @param addons.gpuOperator.valuesOverride Custom values to override
enabled: false enabled: false
valuesOverride: {} valuesOverride: {}
@@ -81,7 +69,7 @@ addons:
## Flux CD ## Flux CD
## ##
fluxcd: fluxcd:
## @param addons.fluxcd.enabled Enable FluxCD ## @param addons.fluxcd.enabled Enables Flux CD
## @param addons.fluxcd.valuesOverride Custom values to override ## @param addons.fluxcd.valuesOverride Custom values to override
## ##
enabled: false enabled: false
@@ -90,7 +78,7 @@ addons:
## MonitoringAgents ## MonitoringAgents
## ##
monitoringAgents: monitoringAgents:
## @param addons.monitoringAgents.enabled Enable monitoring agents (Fluent Bit and VMAgents) to send logs and metrics. If tenant monitoring is enabled, data is sent to tenant storage; otherwise, it goes to root storage. ## @param addons.monitoringAgents.enabled Enables MonitoringAgents (fluentbit, vmagents for sending logs and metrics to storage) if tenant monitoring enabled, send to tenant storage, else to root storage
## @param addons.monitoringAgents.valuesOverride Custom values to override ## @param addons.monitoringAgents.valuesOverride Custom values to override
## ##
enabled: false enabled: false
@@ -103,15 +91,15 @@ addons:
## ##
valuesOverride: {} valuesOverride: {}
## @section Kubernetes Control Plane Configuration ## @section Kubernetes control plane configuration
## ##
controlPlane: controlPlane:
replicas: 2 replicas: 2
apiServer: apiServer:
## @param controlPlane.apiServer.resources Explicit CPU/memory resource requests and limits for the API server. ## @param controlPlane.apiServer.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
## @param controlPlane.apiServer.resourcesPreset Use a common resources preset when `resources` is not set explicitly. ## @param controlPlane.apiServer.resources Resources
## e.g: ## e.g:
## resources: ## resources:
## limits: ## limits:
@@ -125,20 +113,20 @@ controlPlane:
resources: {} resources: {}
controllerManager: controllerManager:
## @param controlPlane.controllerManager.resources Explicit CPU/memory resource requests and limits for the controller manager. ## @param controlPlane.controllerManager.resources Resources
## @param controlPlane.controllerManager.resourcesPreset Use a common resources preset when `resources` is not set explicitly. ## @param controlPlane.controllerManager.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
resourcesPreset: "micro" resourcesPreset: "micro"
resources: {} resources: {}
scheduler: scheduler:
## @param controlPlane.scheduler.resources Explicit CPU/memory resource requests and limits for the scheduler. ## @param controlPlane.scheduler.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
## @param controlPlane.scheduler.resourcesPreset Use a common resources preset when `resources` is not set explicitly. ## @param controlPlane.scheduler.resources Resources
resourcesPreset: "micro" resourcesPreset: "micro"
resources: {} resources: {}
konnectivity: konnectivity:
server: server:
## @param controlPlane.konnectivity.server.resources Explicit CPU/memory resource requests and limits for the Konnectivity. ## @param controlPlane.konnectivity.server.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
## @param controlPlane.konnectivity.server.resourcesPreset Use a common resources preset when `resources` is not set explicitly. ## @param controlPlane.konnectivity.server.resources Resources
resourcesPreset: "micro" resourcesPreset: "micro"
resources: {} resources: {}

View File

@@ -16,7 +16,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes # This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version. # to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/) # Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.7.0 version: 0.6.0
# This is the version number of the application being deployed. This version number should be # This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to # incremented each time you make changes to the application. Versions are not expected to

View File

@@ -7,10 +7,8 @@ generate:
readme-generator -v values.yaml -s values.schema.json -r README.md readme-generator -v values.yaml -s values.schema.json -r README.md
image: image:
docker buildx build images/mariadb-backup \ docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/mariadb-backup \
--provenance false \ --provenance false \
--builder=$(BUILDER) \
--platform=$(PLATFORM) \
--tag $(REGISTRY)/mariadb-backup:$(call settag,$(MARIADB_BACKUP_TAG)) \ --tag $(REGISTRY)/mariadb-backup:$(call settag,$(MARIADB_BACKUP_TAG)) \
--cache-from type=registry,ref=$(REGISTRY)/mariadb-backup:latest \ --cache-from type=registry,ref=$(REGISTRY)/mariadb-backup:latest \
--cache-to type=inline \ --cache-to type=inline \

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/mariadb-backup:0.7.0@sha256:cfd1c37d8ad24e10681d82d6e6ce8a641b4602c1b0ffa8516ae15b4958bb12d4 ghcr.io/cozystack/cozystack/mariadb-backup:0.6.0@sha256:cfd1c37d8ad24e10681d82d6e6ce8a641b4602c1b0ffa8516ae15b4958bb12d4

View File

@@ -11,34 +11,35 @@ These presets are for basic testing and not meant to be used in production
{{ include "resources.preset" (dict "type" "nano") -}} {{ include "resources.preset" (dict "type" "nano") -}}
*/}} */}}
{{- define "resources.preset" -}} {{- define "resources.preset" -}}
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
{{- $presets := dict {{- $presets := dict
"nano" (dict "nano" (dict
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
) )
"micro" (dict "micro" (dict
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
) )
"small" (dict "small" (dict
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
) )
"medium" (dict "medium" (dict
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
) )
"large" (dict "large" (dict
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
) )
"xlarge" (dict "xlarge" (dict
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
) )
"2xlarge" (dict "2xlarge" (dict
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
) )
}} }}
{{- if hasKey $presets .type -}} {{- if hasKey $presets .type -}}

View File

@@ -16,7 +16,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes # This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version. # to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/) # Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.6.0 version: 0.5.0
# This is the version number of the application being deployed. This version number should be # This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to # incremented each time you make changes to the application. Versions are not expected to

View File

@@ -11,34 +11,35 @@ These presets are for basic testing and not meant to be used in production
{{ include "resources.preset" (dict "type" "nano") -}} {{ include "resources.preset" (dict "type" "nano") -}}
*/}} */}}
{{- define "resources.preset" -}} {{- define "resources.preset" -}}
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
{{- $presets := dict {{- $presets := dict
"nano" (dict "nano" (dict
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
) )
"micro" (dict "micro" (dict
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
) )
"small" (dict "small" (dict
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
) )
"medium" (dict "medium" (dict
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
) )
"large" (dict "large" (dict
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
) )
"xlarge" (dict "xlarge" (dict
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
) )
"2xlarge" (dict "2xlarge" (dict
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
) )
}} }}
{{- if hasKey $presets .type -}} {{- if hasKey $presets .type -}}

View File

@@ -33,7 +33,7 @@ spec:
kind: HelmRepository kind: HelmRepository
name: cozystack-system name: cozystack-system
namespace: cozy-system namespace: cozy-system
version: '>= 0.0.0-0' version: '*'
interval: 1m0s interval: 1m0s
timeout: 5m0s timeout: 5m0s
values: values:

View File

@@ -16,7 +16,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes # This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version. # to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/) # Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.12.0 version: 0.10.1
# This is the version number of the application being deployed. This version number should be # This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to # incremented each time you make changes to the application. Versions are not expected to

View File

@@ -7,10 +7,8 @@ generate:
readme-generator -v values.yaml -s values.schema.json -r README.md readme-generator -v values.yaml -s values.schema.json -r README.md
image: image:
docker buildx build images/postgres-backup \ docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/postgres-backup \
--provenance false \ --provenance false \
--builder=$(BUILDER) \
--platform=$(PLATFORM) \
--tag $(REGISTRY)/postgres-backup:$(call settag,$(POSTGRES_BACKUP_TAG)) \ --tag $(REGISTRY)/postgres-backup:$(call settag,$(POSTGRES_BACKUP_TAG)) \
--cache-from type=registry,ref=$(REGISTRY)/postgres-backup:latest \ --cache-from type=registry,ref=$(REGISTRY)/postgres-backup:latest \
--cache-to type=inline \ --cache-to type=inline \

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/postgres-backup:0.12.0@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f ghcr.io/cozystack/cozystack/postgres-backup:0.10.0@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f

View File

@@ -11,34 +11,35 @@ These presets are for basic testing and not meant to be used in production
{{ include "resources.preset" (dict "type" "nano") -}} {{ include "resources.preset" (dict "type" "nano") -}}
*/}} */}}
{{- define "resources.preset" -}} {{- define "resources.preset" -}}
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
{{- $presets := dict {{- $presets := dict
"nano" (dict "nano" (dict
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
) )
"micro" (dict "micro" (dict
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
) )
"small" (dict "small" (dict
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
) )
"medium" (dict "medium" (dict
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
) )
"large" (dict "large" (dict
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
) )
"xlarge" (dict "xlarge" (dict
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
) )
"2xlarge" (dict "2xlarge" (dict
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
) )
}} }}
{{- if hasKey $presets .type -}} {{- if hasKey $presets .type -}}

View File

@@ -17,9 +17,6 @@ spec:
{{- $rawConstraints := get $configMap.data "globalAppTopologySpreadConstraints" }} {{- $rawConstraints := get $configMap.data "globalAppTopologySpreadConstraints" }}
{{- if $rawConstraints }} {{- if $rawConstraints }}
{{- $rawConstraints | fromYaml | toYaml | nindent 2 }} {{- $rawConstraints | fromYaml | toYaml | nindent 2 }}
labelSelector:
matchLabels:
cnpg.io/cluster: {{ .Release.Name }}
{{- end }} {{- end }}
{{- end }} {{- end }}
postgresql: postgresql:

View File

@@ -16,7 +16,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes # This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version. # to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/) # Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.6.0 version: 0.5.0
# This is the version number of the application being deployed. This version number should be # This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to # incremented each time you make changes to the application. Versions are not expected to

View File

@@ -11,34 +11,35 @@ These presets are for basic testing and not meant to be used in production
{{ include "resources.preset" (dict "type" "nano") -}} {{ include "resources.preset" (dict "type" "nano") -}}
*/}} */}}
{{- define "resources.preset" -}} {{- define "resources.preset" -}}
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
{{- $presets := dict {{- $presets := dict
"nano" (dict "nano" (dict
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
) )
"micro" (dict "micro" (dict
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
) )
"small" (dict "small" (dict
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
) )
"medium" (dict "medium" (dict
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
) )
"large" (dict "large" (dict
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
) )
"xlarge" (dict "xlarge" (dict
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
) )
"2xlarge" (dict "2xlarge" (dict
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
) )
}} }}
{{- if hasKey $presets .type -}} {{- if hasKey $presets .type -}}

View File

@@ -16,7 +16,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes # This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version. # to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/) # Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.7.0 version: 0.6.0
# This is the version number of the application being deployed. This version number should be # This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to # incremented each time you make changes to the application. Versions are not expected to

View File

@@ -11,34 +11,35 @@ These presets are for basic testing and not meant to be used in production
{{ include "resources.preset" (dict "type" "nano") -}} {{ include "resources.preset" (dict "type" "nano") -}}
*/}} */}}
{{- define "resources.preset" -}} {{- define "resources.preset" -}}
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
{{- $presets := dict {{- $presets := dict
"nano" (dict "nano" (dict
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
) )
"micro" (dict "micro" (dict
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
) )
"small" (dict "small" (dict
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
) )
"medium" (dict "medium" (dict
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
) )
"large" (dict "large" (dict
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
) )
"xlarge" (dict "xlarge" (dict
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
) )
"2xlarge" (dict "2xlarge" (dict
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
) )
}} }}
{{- if hasKey $presets .type -}} {{- if hasKey $presets .type -}}

View File

@@ -16,7 +16,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes # This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version. # to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/) # Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.4.0 version: 0.3.0
# This is the version number of the application being deployed. This version number should be # This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to # incremented each time you make changes to the application. Versions are not expected to

View File

@@ -11,34 +11,35 @@ These presets are for basic testing and not meant to be used in production
{{ include "resources.preset" (dict "type" "nano") -}} {{ include "resources.preset" (dict "type" "nano") -}}
*/}} */}}
{{- define "resources.preset" -}} {{- define "resources.preset" -}}
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
{{- $presets := dict {{- $presets := dict
"nano" (dict "nano" (dict
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
) )
"micro" (dict "micro" (dict
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
) )
"small" (dict "small" (dict
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
) )
"medium" (dict "medium" (dict
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
) )
"large" (dict "large" (dict
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
) )
"xlarge" (dict "xlarge" (dict
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
) )
"2xlarge" (dict "2xlarge" (dict
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi") "requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi") "limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
) )
}} }}
{{- if hasKey $presets .type -}} {{- if hasKey $presets .type -}}

View File

@@ -4,55 +4,36 @@ A tenant is the main unit of security on the platform. The closest analogy would
Tenants can be created recursively and are subject to the following rules: Tenants can be created recursively and are subject to the following rules:
### Tenant naming ### Higher-level tenants can access lower-level ones.
Tenant names must be alphanumeric. Higher-level tenants can view and manage the applications of all their children.
Using dashes (`-`) in tenant names is not allowed, unlike with other services.
This limitation exists to keep consistent naming in tenants, nested tenants, and services deployed in them.
For example: ### Each tenant has its own domain
- The root tenant is named `root`, but internally it's referenced as `tenant-root`. By default (unless otherwise specified), it inherits the domain of its parent with a prefix of its name, for example, if the parent had the domain `example.org`, then `tenant-foo` would get the domain `foo.example.org` by default.
- A nested tenant could be named `foo`, which would result in `tenant-foo` in service names and URLs.
- However, a tenant can not be named `foo-bar`, because parsing names such as `tenant-foo-bar` would be ambiguous.
### Unique domains
Each tenant has its own domain.
By default, (unless otherwise specified), it inherits the domain of its parent with a prefix of its name.
For example, if the parent had the domain `example.org`, then `tenant-foo` would get the domain `foo.example.org` by default.
Kubernetes clusters created in this tenant namespace would get domains like: `kubernetes-cluster.foo.example.org` Kubernetes clusters created in this tenant namespace would get domains like: `kubernetes-cluster.foo.example.org`
Example: Example:
```text ```
tenant-root (example.org) tenant-root (example.org)
└── tenant-foo (foo.example.org) └── tenant-foo (foo.example.org)
└── kubernetes-cluster1 (kubernetes-cluster1.foo.example.org) └── kubernetes-cluster1 (kubernetes-cluster1.foo.example.org)
``` ```
### Nesting tenants and reusing parent services ### Lower-level tenants can access the cluster services of their parent (provided they do not run their own)
Tenants can be nested. Thus, you can create `tenant-u1` with a set of services like `etcd`, `ingress`, `monitoring`. And create another tenant namespace `tenant-u2` inside of `tenant-u1`.
A tenant administrator can create nested tenants using the "Tenant" application from the catalogue.
Higher-level tenants can view and manage the applications of all their children tenants.
If a tenant does not run their own cluster services, it can access ones of its parent.
For example, you create:
- Tenant `tenant-u1` with a set of services like `etcd`, `ingress`, `monitoring`.
- Tenant `tenant-u2` nested in `tenant-u1`.
Let's see what will happen when you run Kubernetes and Postgres under `tenant-u2` namespace. Let's see what will happen when you run Kubernetes and Postgres under `tenant-u2` namespace.
Since `tenant-u2` does not have its own cluster services like `etcd`, `ingress`, and `monitoring`, Since `tenant-u2` does not have its own cluster services like `etcd`, `ingress`, and `monitoring`, the applications will use the cluster services of the parent tenant.
the applications running in `tenant-u2` will use the cluster services of the parent tenant.
This in turn means: This in turn means:
- The Kubernetes cluster data will be stored in `etcd` for `tenant-u1`. - The Kubernetes cluster data will be stored in etcd for `tenant-u1`.
- Access to the cluster will be through the common `ingress` of `tenant-u1`. - Access to the cluster will be through the common ingress of `tenant-u1`.
- Essentially, all metrics will be collected in the `monitoring` from `tenant-u1`, and only that tenant will have access to them. - Essentially, all metrics will be collected in the monitoring from `tenant-u1`, and only it will have access to them.
Example: Example:
``` ```

View File

@@ -8,8 +8,7 @@ clickhouse 0.5.0 0f312d5c
clickhouse 0.6.0 1ec10165 clickhouse 0.6.0 1ec10165
clickhouse 0.6.1 c62a83a7 clickhouse 0.6.1 c62a83a7
clickhouse 0.6.2 8267072d clickhouse 0.6.2 8267072d
clickhouse 0.7.0 93bdf411 clickhouse 0.7.0 HEAD
clickhouse 0.9.0 HEAD
ferretdb 0.1.0 e9716091 ferretdb 0.1.0 e9716091
ferretdb 0.1.1 91b0499a ferretdb 0.1.1 91b0499a
ferretdb 0.2.0 6c5cf5bf ferretdb 0.2.0 6c5cf5bf
@@ -17,14 +16,12 @@ ferretdb 0.3.0 b8e33d19
ferretdb 0.4.0 b40e1b09 ferretdb 0.4.0 b40e1b09
ferretdb 0.4.1 1ec10165 ferretdb 0.4.1 1ec10165
ferretdb 0.4.2 8267072d ferretdb 0.4.2 8267072d
ferretdb 0.5.0 93bdf411 ferretdb 0.5.0 HEAD
ferretdb 0.6.0 HEAD
http-cache 0.1.0 263e47be http-cache 0.1.0 263e47be
http-cache 0.2.0 53f2365e http-cache 0.2.0 53f2365e
http-cache 0.3.0 6c5cf5bf http-cache 0.3.0 6c5cf5bf
http-cache 0.3.1 0f312d5c http-cache 0.3.1 0f312d5c
http-cache 0.4.0 93bdf411 http-cache 0.4.0 HEAD
http-cache 0.5.0 HEAD
kafka 0.1.0 f7eaab0a kafka 0.1.0 f7eaab0a
kafka 0.2.0 c0685f43 kafka 0.2.0 c0685f43
kafka 0.2.1 dfbc210b kafka 0.2.1 dfbc210b
@@ -35,8 +32,7 @@ kafka 0.3.1 c62a83a7
kafka 0.3.2 93c46161 kafka 0.3.2 93c46161
kafka 0.3.3 8267072d kafka 0.3.3 8267072d
kafka 0.4.0 85ec09b8 kafka 0.4.0 85ec09b8
kafka 0.5.0 93bdf411 kafka 0.5.0 HEAD
kafka 0.6.0 HEAD
kubernetes 0.1.0 263e47be kubernetes 0.1.0 263e47be
kubernetes 0.2.0 53f2365e kubernetes 0.2.0 53f2365e
kubernetes 0.3.0 007d414f kubernetes 0.3.0 007d414f
@@ -63,10 +59,7 @@ kubernetes 0.16.0 077045b0
kubernetes 0.17.0 1fbbfcd0 kubernetes 0.17.0 1fbbfcd0
kubernetes 0.17.1 fd240701 kubernetes 0.17.1 fd240701
kubernetes 0.18.0 721c12a7 kubernetes 0.18.0 721c12a7
kubernetes 0.19.0 93bdf411 kubernetes 0.19.0 HEAD
kubernetes 0.20.0 609e7ede
kubernetes 0.20.1 f9f8bb2f
kubernetes 0.21.0 HEAD
mysql 0.1.0 263e47be mysql 0.1.0 263e47be
mysql 0.2.0 c24a103f mysql 0.2.0 c24a103f
mysql 0.3.0 53f2365e mysql 0.3.0 53f2365e
@@ -75,16 +68,14 @@ mysql 0.5.0 b40e1b09
mysql 0.5.1 0f312d5c mysql 0.5.1 0f312d5c
mysql 0.5.2 1ec10165 mysql 0.5.2 1ec10165
mysql 0.5.3 8267072d mysql 0.5.3 8267072d
mysql 0.6.0 93bdf411 mysql 0.6.0 HEAD
mysql 0.7.0 HEAD
nats 0.1.0 e9716091 nats 0.1.0 e9716091
nats 0.2.0 6c5cf5bf nats 0.2.0 6c5cf5bf
nats 0.3.0 78366f19 nats 0.3.0 78366f19
nats 0.3.1 c62a83a7 nats 0.3.1 c62a83a7
nats 0.4.0 898374b5 nats 0.4.0 898374b5
nats 0.4.1 8267072d nats 0.4.1 8267072d
nats 0.5.0 93bdf411 nats 0.5.0 HEAD
nats 0.6.0 HEAD
postgres 0.1.0 263e47be postgres 0.1.0 263e47be
postgres 0.2.0 53f2365e postgres 0.2.0 53f2365e
postgres 0.2.1 d7cfa53c postgres 0.2.1 d7cfa53c
@@ -99,9 +90,7 @@ postgres 0.7.1 1ec10165
postgres 0.8.0 4e68e65c postgres 0.8.0 4e68e65c
postgres 0.9.0 8267072d postgres 0.9.0 8267072d
postgres 0.10.0 721c12a7 postgres 0.10.0 721c12a7
postgres 0.10.1 93bdf411 postgres 0.10.1 HEAD
postgres 0.11.0 f9f8bb2f
postgres 0.12.0 HEAD
rabbitmq 0.1.0 263e47be rabbitmq 0.1.0 263e47be
rabbitmq 0.2.0 53f2365e rabbitmq 0.2.0 53f2365e
rabbitmq 0.3.0 6c5cf5bf rabbitmq 0.3.0 6c5cf5bf
@@ -110,20 +99,17 @@ rabbitmq 0.4.1 1128d0cb
rabbitmq 0.4.2 4b90bf5a rabbitmq 0.4.2 4b90bf5a
rabbitmq 0.4.3 1ec10165 rabbitmq 0.4.3 1ec10165
rabbitmq 0.4.4 8267072d rabbitmq 0.4.4 8267072d
rabbitmq 0.5.0 93bdf411 rabbitmq 0.5.0 HEAD
rabbitmq 0.6.0 HEAD
redis 0.1.1 263e47be redis 0.1.1 263e47be
redis 0.2.0 53f2365e redis 0.2.0 53f2365e
redis 0.3.0 6c5cf5bf redis 0.3.0 6c5cf5bf
redis 0.3.1 c62a83a7 redis 0.3.1 c62a83a7
redis 0.4.0 84f3ccc0 redis 0.4.0 84f3ccc0
redis 0.5.0 4e68e65c redis 0.5.0 4e68e65c
redis 0.6.0 93bdf411 redis 0.6.0 HEAD
redis 0.7.0 HEAD
tcp-balancer 0.1.0 263e47be tcp-balancer 0.1.0 263e47be
tcp-balancer 0.2.0 53f2365e tcp-balancer 0.2.0 53f2365e
tcp-balancer 0.3.0 93bdf411 tcp-balancer 0.3.0 HEAD
tcp-balancer 0.4.0 HEAD
tenant 0.1.4 afc997ef tenant 0.1.4 afc997ef
tenant 0.1.5 e3ab858a tenant 0.1.5 e3ab858a
tenant 1.0.0 263e47be tenant 1.0.0 263e47be
@@ -160,8 +146,7 @@ virtual-machine 0.8.0 3fa4dd3a
virtual-machine 0.8.1 93c46161 virtual-machine 0.8.1 93c46161
virtual-machine 0.8.2 de19450f virtual-machine 0.8.2 de19450f
virtual-machine 0.9.0 721c12a7 virtual-machine 0.9.0 721c12a7
virtual-machine 0.9.1 93bdf411 virtual-machine 0.9.1 HEAD
virtual-machine 0.10.0 HEAD
vm-disk 0.1.0 d971f2ff vm-disk 0.1.0 d971f2ff
vm-disk 0.1.1 HEAD vm-disk 0.1.1 HEAD
vm-instance 0.1.0 1ec10165 vm-instance 0.1.0 1ec10165
@@ -172,10 +157,9 @@ vm-instance 0.4.1 0ab39f20
vm-instance 0.5.0 3fa4dd3a vm-instance 0.5.0 3fa4dd3a
vm-instance 0.5.1 de19450f vm-instance 0.5.1 de19450f
vm-instance 0.6.0 721c12a7 vm-instance 0.6.0 721c12a7
vm-instance 0.7.0 HEAD vm-instance 0.6.1 HEAD
vpn 0.1.0 263e47be vpn 0.1.0 263e47be
vpn 0.2.0 53f2365e vpn 0.2.0 53f2365e
vpn 0.3.0 6c5cf5bf vpn 0.3.0 6c5cf5bf
vpn 0.3.1 1ec10165 vpn 0.3.1 1ec10165
vpn 0.4.0 93bdf411 vpn 0.4.0 HEAD
vpn 0.5.0 HEAD

View File

@@ -17,10 +17,10 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes # This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version. # to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/) # Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.10.0 version: 0.9.1
# This is the version number of the application being deployed. This version number should be # This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to # incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using. # follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes. # It is recommended to use it with quotes.
appVersion: 0.10.0 appVersion: 0.9.0

Some files were not shown because too many files have changed in this diff Show More