mirror of
https://github.com/outbackdingo/cozystack.git
synced 2026-01-28 18:18:41 +00:00
Compare commits
77 Commits
v0.31.0-rc
...
v0.32.0-be
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9555386bd7 | ||
|
|
32adf5ab38 | ||
|
|
28302e776e | ||
|
|
045ea76539 | ||
|
|
cee820e82c | ||
|
|
6183b715b7 | ||
|
|
2669ab6072 | ||
|
|
96506c7cce | ||
|
|
7bb70c839e | ||
|
|
ba97a4593c | ||
|
|
c467ed798a | ||
|
|
ed881f0741 | ||
|
|
0e0dabdd08 | ||
|
|
bd8f8bde95 | ||
|
|
646dab497c | ||
|
|
dc3b61d164 | ||
|
|
4479a038cd | ||
|
|
dfd01ff118 | ||
|
|
d2bb66db31 | ||
|
|
7af97e2d9f | ||
|
|
ac5145be87 | ||
|
|
4779db2dda | ||
|
|
25c2774bc8 | ||
|
|
bbee8103eb | ||
|
|
730ea4d5ef | ||
|
|
13fccdc465 | ||
|
|
f1b66c80f6 | ||
|
|
f34f140d49 | ||
|
|
520fbfb2e4 | ||
|
|
25016580c1 | ||
|
|
f10f8455fc | ||
|
|
974581d39b | ||
|
|
7e24297913 | ||
|
|
b6142cd4f5 | ||
|
|
e87994c769 | ||
|
|
b140f1b57f | ||
|
|
64936021d2 | ||
|
|
a887e19e6c | ||
|
|
92b97a569e | ||
|
|
0e22358b30 | ||
|
|
7429daf99c | ||
|
|
b470b82e2a | ||
|
|
a0700e7399 | ||
|
|
228e1983bc | ||
|
|
7023abdba7 | ||
|
|
1b43a5f160 | ||
|
|
20f4066c16 | ||
|
|
ea0dd68e84 | ||
|
|
e0c3d2324f | ||
|
|
cb303d694c | ||
|
|
6130f43d06 | ||
|
|
4db55ac5eb | ||
|
|
bfd20a5e0e | ||
|
|
977141bed3 | ||
|
|
c4f8d6a251 | ||
|
|
9633ca4d25 | ||
|
|
f798cbd9f9 | ||
|
|
cf87779f7b | ||
|
|
c69135e0e5 | ||
|
|
a9c3a4c601 | ||
|
|
d1081c86b3 | ||
|
|
beadc80778 | ||
|
|
5bbb5a6266 | ||
|
|
0664370218 | ||
|
|
225d103509 | ||
|
|
0e22e3c12c | ||
|
|
7b8e7e40ce | ||
|
|
c941e487fb | ||
|
|
8386e985f2 | ||
|
|
e4c944488f | ||
|
|
99a7754c00 | ||
|
|
6cbfab9b2a | ||
|
|
461f756c88 | ||
|
|
50932ba49e | ||
|
|
74e7e5cdfb | ||
|
|
2bf4032d5b | ||
|
|
fc8b52d73d |
59
.github/workflows/pull-requests-release.yaml
vendored
59
.github/workflows/pull-requests-release.yaml
vendored
@@ -16,7 +16,6 @@ jobs:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
# Run only when the PR carries the "release" label and not closed.
|
||||
if: |
|
||||
contains(github.event.pull_request.labels.*.name, 'release') &&
|
||||
github.event.action != 'closed'
|
||||
@@ -35,6 +34,64 @@ jobs:
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
|
||||
- name: Extract tag from PR branch
|
||||
id: get_tag
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const branch = context.payload.pull_request.head.ref;
|
||||
const m = branch.match(/^release-(\d+\.\d+\.\d+(?:[-\w\.]+)?)$/);
|
||||
if (!m) {
|
||||
core.setFailed(`❌ Branch '${branch}' does not match 'release-X.Y.Z[-suffix]'`);
|
||||
return;
|
||||
}
|
||||
const tag = `v${m[1]}`;
|
||||
core.setOutput('tag', tag);
|
||||
|
||||
- name: Find draft release and get asset IDs
|
||||
id: fetch_assets
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.GH_PAT }}
|
||||
script: |
|
||||
const tag = '${{ steps.get_tag.outputs.tag }}';
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
per_page: 100
|
||||
});
|
||||
const draft = releases.data.find(r => r.tag_name === tag && r.draft);
|
||||
if (!draft) {
|
||||
core.setFailed(`Draft release '${tag}' not found`);
|
||||
return;
|
||||
}
|
||||
const findAssetId = (name) =>
|
||||
draft.assets.find(a => a.name === name)?.id;
|
||||
const installerId = findAssetId("cozystack-installer.yaml");
|
||||
const diskId = findAssetId("nocloud-amd64.raw.xz");
|
||||
if (!installerId || !diskId) {
|
||||
core.setFailed("Missing required assets");
|
||||
return;
|
||||
}
|
||||
core.setOutput("installer_id", installerId);
|
||||
core.setOutput("disk_id", diskId);
|
||||
|
||||
- name: Download assets from GitHub API
|
||||
run: |
|
||||
mkdir -p _out/assets
|
||||
curl -sSL \
|
||||
-H "Authorization: token ${GH_PAT}" \
|
||||
-H "Accept: application/octet-stream" \
|
||||
-o _out/assets/cozystack-installer.yaml \
|
||||
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/assets/${{ steps.fetch_assets.outputs.installer_id }}"
|
||||
curl -sSL \
|
||||
-H "Authorization: token ${GH_PAT}" \
|
||||
-H "Accept: application/octet-stream" \
|
||||
-o _out/assets/nocloud-amd64.raw.xz \
|
||||
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/assets/${{ steps.fetch_assets.outputs.disk_id }}"
|
||||
env:
|
||||
GH_PAT: ${{ secrets.GH_PAT }}
|
||||
|
||||
- name: Run tests
|
||||
run: make test
|
||||
|
||||
|
||||
45
.github/workflows/pull-requests.yaml
vendored
45
.github/workflows/pull-requests.yaml
vendored
@@ -9,8 +9,8 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
e2e:
|
||||
name: Build and Test
|
||||
build:
|
||||
name: Build
|
||||
runs-on: [self-hosted]
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -33,9 +33,50 @@ jobs:
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
env:
|
||||
DOCKER_CONFIG: ${{ runner.temp }}/.docker
|
||||
|
||||
- name: Build
|
||||
run: make build
|
||||
env:
|
||||
DOCKER_CONFIG: ${{ runner.temp }}/.docker
|
||||
|
||||
- name: Build Talos image
|
||||
run: make -C packages/core/installer talos-nocloud
|
||||
|
||||
- name: Upload installer
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: cozystack-installer
|
||||
path: _out/assets/cozystack-installer.yaml
|
||||
|
||||
- name: Upload Talos image
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: talos-image
|
||||
path: _out/assets/nocloud-amd64.raw.xz
|
||||
|
||||
test:
|
||||
name: Test
|
||||
runs-on: [self-hosted]
|
||||
needs: build
|
||||
|
||||
# Never run when the PR carries the "release" label.
|
||||
if: |
|
||||
!contains(github.event.pull_request.labels.*.name, 'release')
|
||||
|
||||
steps:
|
||||
- name: Download installer
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: cozystack-installer
|
||||
path: _out/assets/
|
||||
|
||||
- name: Download Talos image
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: talos-image
|
||||
path: _out/assets/
|
||||
|
||||
- name: Test
|
||||
run: make test
|
||||
|
||||
4
.github/workflows/tags.yaml
vendored
4
.github/workflows/tags.yaml
vendored
@@ -99,11 +99,15 @@ jobs:
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
env:
|
||||
DOCKER_CONFIG: ${{ runner.temp }}/.docker
|
||||
|
||||
# Build project artifacts
|
||||
- name: Build
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: make build
|
||||
env:
|
||||
DOCKER_CONFIG: ${{ runner.temp }}/.docker
|
||||
|
||||
# Commit built artifacts
|
||||
- name: Commit release artifacts
|
||||
|
||||
4
Makefile
4
Makefile
@@ -29,10 +29,8 @@ build: build-deps
|
||||
|
||||
repos:
|
||||
rm -rf _out
|
||||
make -C packages/library check-version-map
|
||||
make -C packages/apps check-version-map
|
||||
make -C packages/extra check-version-map
|
||||
make -C packages/library repo
|
||||
make -C packages/system repo
|
||||
make -C packages/apps repo
|
||||
make -C packages/extra repo
|
||||
@@ -45,7 +43,7 @@ manifests:
|
||||
(cd packages/core/installer/; helm template -n cozy-installer installer .) > _out/assets/cozystack-installer.yaml
|
||||
|
||||
assets:
|
||||
make -C packages/core/installer/ assets
|
||||
make -C packages/core/installer assets
|
||||
|
||||
test:
|
||||
make -C packages/core/testing apply
|
||||
|
||||
@@ -194,7 +194,15 @@ func main() {
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "Workload")
|
||||
setupLog.Error(err, "unable to create controller", "controller", "TenantHelmReconciler")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&controller.CozystackConfigReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "CozystackConfigReconciler")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,39 +1,129 @@
|
||||
This is the third release candidate for the upcoming Cozystack v0.31.0 release.
|
||||
The release notes show changes accumulated since the release of previous version, Cozystack v0.30.0.
|
||||
Cozystack v0.31.0 is a significant release that brings new features, key fixes, and updates to underlying components.
|
||||
This version enhances GPU support, improves many components of Cozystack, and introduces a more robust release process to improve stability.
|
||||
Below, we'll go over the highlights in each area for current users, developers, and our community.
|
||||
|
||||
Cozystack 0.31.0 further advances GPU support, monitoring, and all-around convenience features.
|
||||
## Major Features and Improvements
|
||||
|
||||
## New Features and Changes
|
||||
### GPU support for tenant Kubernetes clusters
|
||||
|
||||
Cozystack now integrates NVIDIA GPU Operator support for tenant Kubernetes clusters.
|
||||
This enables platform users to run GPU-powered AI/ML applications in their own clusters.
|
||||
To enable GPU Operator, set `addons.gpuOperator.enabled: true` in the cluster configuration.
|
||||
(@kvaps in https://github.com/cozystack/cozystack/pull/834)
|
||||
|
||||
Check out Andrei Kvapil's CNCF webinar [showcasing the GPU support by running Stable Diffusion in Cozystack](https://www.youtube.com/watch?v=S__h_QaoYEk).
|
||||
|
||||
<!--
|
||||
* [kubernetes] Introduce GPU support for tenant Kubernetes clusters. (@kvaps in https://github.com/cozystack/cozystack/pull/834)
|
||||
-->
|
||||
|
||||
### Cilium Improvements
|
||||
|
||||
Cozystack’s Cilium integration received two significant enhancements.
|
||||
First, Gateway API support in Cilium is now enabled, allowing advanced L4/L7 routing features via Kubernetes Gateway API.
|
||||
We thank Zdenek Janda @zdenekjanda for contributing this feature in https://github.com/cozystack/cozystack/pull/924.
|
||||
|
||||
Second, Cozystack now permits custom user-provided parameters in the tenant cluster’s Cilium configuration.
|
||||
(@lllamnyp in https://github.com/cozystack/cozystack/pull/917)
|
||||
|
||||
<!--
|
||||
* [cilium] Enable Cilium Gateway API. (@zdenekjanda in https://github.com/cozystack/cozystack/pull/924)
|
||||
* [cilium] Enable user-added parameters in a tenant cluster Cilium. (@lllamnyp in https://github.com/cozystack/cozystack/pull/917)
|
||||
-->
|
||||
|
||||
### Cross-Architecture Builds (ARM Support Beta)
|
||||
|
||||
Cozystack's build system was refactored to support multi-architecture binaries and container images.
|
||||
This paves the road to running Cozystack on ARM64 servers.
|
||||
Changes include Makefile improvements (https://github.com/cozystack/cozystack/pull/907)
|
||||
and multi-arch Docker image builds (https://github.com/cozystack/cozystack/pull/932 and https://github.com/cozystack/cozystack/pull/970).
|
||||
|
||||
We thank Nikita Bykov @nbykov0 for his ongoing work on ARM support!
|
||||
|
||||
<!--
|
||||
* Introduce support for cross-architecture builds and Cozystack on ARM:
|
||||
* [build] Refactor Makefiles introducing build variables. (@nbykov0 in https://github.com/cozystack/cozystack/pull/907)
|
||||
* [build] Add support for multi-architecture and cross-platform image builds. (@nbykov0 in https://github.com/cozystack/cozystack/pull/932 and https://github.com/cozystack/cozystack/pull/970)
|
||||
-->
|
||||
|
||||
### VerticalPodAutoscaler (VPA) Expansion
|
||||
|
||||
The VerticalPodAutoscaler is now enabled for more Cozystack components to automate resource tuning.
|
||||
Specifically, VPA was added for tenant Kubernetes control planes (@klinch0 in https://github.com/cozystack/cozystack/pull/806),
|
||||
the Cozystack Dashboard (https://github.com/cozystack/cozystack/pull/828),
|
||||
and the Cozystack etcd-operator (https://github.com/cozystack/cozystack/pull/850).
|
||||
All Cozystack components that have VPA enabled can automatically adjust their CPU and memory requests based on usage, improving platform and application stability.
|
||||
|
||||
<!--
|
||||
* Add VerticalPodAutoscaler to a few more components:
|
||||
* [kubernetes] Kubernetes clusters in user tenants. (@klinch0 in https://github.com/cozystack/cozystack/pull/806)
|
||||
* [platform] Cozystack dashboard. (@klinch0 in https://github.com/cozystack/cozystack/pull/828)
|
||||
* [platform] Cozystack etcd-operator (@klinch0 in https://github.com/cozystack/cozystack/pull/850)
|
||||
* Introduce support for cross-architecture builds and Cozystack on ARM:
|
||||
* [build] Refactor Makefiles introducing build variables. (@nbykov0 in https://github.com/cozystack/cozystack/pull/907)
|
||||
* [build] Add support for multi-architecture and cross-platform image builds. (@nbykov0 in https://github.com/cozystack/cozystack/pull/932 and https://github.com/cozystack/cozystack/pull/970)
|
||||
-->
|
||||
|
||||
### Tenant HelmRelease Reconcile Controller
|
||||
|
||||
A new controller was introduced to monitor and synchronize HelmRelease resources across tenants.
|
||||
This controller propagates configuration changes to tenant workloads and ensures that any HelmRelease defined in a tenant
|
||||
stays in sync with platform updates.
|
||||
It improves the reliability of deploying managed applications in Cozystack.
|
||||
(@klinch0 in https://github.com/cozystack/cozystack/pull/870)
|
||||
|
||||
<!--
|
||||
* [platform] Introduce a new controller to synchronize tenant HelmReleases and propagate configuration changes. (@klinch0 in https://github.com/cozystack/cozystack/pull/870)
|
||||
* [platform] Introduce options `expose-services`, `expose-ingress` and `expose-external-ips` to the ingress service. (@kvaps in https://github.com/cozystack/cozystack/pull/929)
|
||||
-->
|
||||
|
||||
### Virtual Machine Improvements
|
||||
|
||||
**Configurable KubeVirt CPU Overcommit**: The CPU allocation ratio in KubeVirt (how virtual CPUs are overcommitted relative to physical) is now configurable
|
||||
via the `cpu-allocation-ratio` value in the Cozystack configmap.
|
||||
This means Cozystack administrators can now tune CPU overcommitment for VMs to balance performance vs. density.
|
||||
(@lllamnyp in https://github.com/cozystack/cozystack/pull/905)
|
||||
|
||||
**KubeVirt VM Export**: Cozystack now allows exporting KubeVirt virtual machines.
|
||||
This feature, enabled via KubeVirt's `VirtualMachineExport` capability, lets users snapshot or back up VM images.
|
||||
(@kvaps in https://github.com/cozystack/cozystack/pull/808)
|
||||
|
||||
**Support for various storage classes in Virtual Machines**: The `virtual-machine` application (since version 0.9.2) lets you pick any StorageClass for a VM's
|
||||
system disk instead of relying on a hard-coded PVC.
|
||||
Refer to values `systemDisk.storage` and `systemDisk.storageClass` in the [application's configs](https://cozystack.io/docs/reference/applications/virtual-machine/#common-parameters).
|
||||
(@kvaps in https://github.com/cozystack/cozystack/pull/974)
|
||||
|
||||
<!--
|
||||
* [kubevirt] Enable exporting VMs. (@kvaps in https://github.com/cozystack/cozystack/pull/808)
|
||||
* [kubevirt] Make KubeVirt's CPU allocation ratio configurable. (@lllamnyp in https://github.com/cozystack/cozystack/pull/905)
|
||||
* [virtual-machine] Add support for various storages. (@kvaps in https://github.com/cozystack/cozystack/pull/974)
|
||||
-->
|
||||
|
||||
### Other Features and Improvements
|
||||
|
||||
* [platform] Introduce options `expose-services`, `expose-ingress`, and `expose-external-ips` to the ingress service. (@kvaps in https://github.com/cozystack/cozystack/pull/929)
|
||||
* [cozystack-controller] Record the IP address pool and storage class in Workload objects. (@lllamnyp in https://github.com/cozystack/cozystack/pull/831)
|
||||
* [cilium] Enable Cilium Gateway API. (@zdenekjanda in https://github.com/cozystack/cozystack/pull/924)
|
||||
* [cilium] Enable user-added parameters in a tenant cluster Cilium. (@lllamnyp in https://github.com/cozystack/cozystack/pull/917)
|
||||
* [apps] Remove user-facing config of limits and requests. (@lllamnyp in https://github.com/cozystack/cozystack/pull/935)
|
||||
* Update the Cozystack release policy to include long-lived release branches and start with release candidates. Update CI workflows and docs accordingly.
|
||||
* Use release branches `release-X.Y` for gathering and releasing fixes after initial `vX.Y.0` release. (@kvaps in https://github.com/cozystack/cozystack/pull/816)
|
||||
* Automatically create release branches after initial `vX.Y.0` release is published. (@kvaps in https://github.com/cozystack/cozystack/pull/886)
|
||||
* Introduce Release Candidate versions. Automate patch backporting by applying patches from pull requests labeled `[backport]` to the current release branch. (@kvaps in https://github.com/cozystack/cozystack/pull/841 and https://github.com/cozystack/cozystack/pull/901, @nickvolynkin in https://github.com/cozystack/cozystack/pull/890)
|
||||
* Support alpha and beta pre-releases. (@kvaps in https://github.com/cozystack/cozystack/pull/978)
|
||||
* Commit changes in release pipelines under `github-actions <github-actions@github.com>`. (@kvaps in https://github.com/cozystack/cozystack/pull/823)
|
||||
* Describe the Cozystack release workflow. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/817 and https://github.com/cozystack/cozystack/pull/897)
|
||||
|
||||
## New Release Lifecycle
|
||||
|
||||
Cozystack release lifecycle is changing to provide a more stable and predictable lifecycle to customers running Cozystack in mission-critical environments.
|
||||
|
||||
* **Gradual Release with Alpha, Beta, and Release Candidates**: Cozystack will now publish pre-release versions (alpha, beta, release candidates) before a stable release.
|
||||
Starting with v0.31.0, the team made three release candidates before releasing version v0.31.0.
|
||||
This allows more testing and feedback before marking a release as stable.
|
||||
|
||||
* **Prolonged Release Support with Patch Versions**: After the initial `vX.Y.0` release, a long-lived branch `release-X.Y` will be created to backport fixes.
|
||||
For example, with 0.31.0’s release, a `release-0.31` branch will track patch fixes (`0.31.x`).
|
||||
This strategy lets Cozystack users receive timely patch releases and updates with minimal risks.
|
||||
|
||||
To implement these new changes, we have rebuilt our CI/CD workflows and introduced automation, enabling automatic backports.
|
||||
You can read more about how it's implemented in the Development section below.
|
||||
|
||||
For more information, read the [Cozystack Release Workflow](https://github.com/cozystack/cozystack/blob/main/docs/release.md) documentation.
|
||||
|
||||
## Fixes
|
||||
|
||||
* [virtual-machine] Add GPU names to the virtual machine specifications. (@kvaps in https://github.com/cozystack/cozystack/pull/862)
|
||||
* [virtual-machine] Count Workload resources for pods by requests, not limits. Other improvements to VM resource tracking. (@lllamnyp in https://github.com/cozystack/cozystack/pull/904)
|
||||
* [virtual-machine] Set PortList method by default. (@kvaps in https://github.com/cozystack/cozystack/pull/996)
|
||||
* [virtual-machine] Specify ports even for wholeIP mode. (@kvaps in https://github.com/cozystack/cozystack/pull/1000)
|
||||
* [platform] Fix installing HelmReleases on initial setup. (@kvaps in https://github.com/cozystack/cozystack/pull/833)
|
||||
* [platform] Migration scripts update Kubernetes ConfigMap with the current stack version for improved version tracking. (@klinch0 in https://github.com/cozystack/cozystack/pull/840)
|
||||
* [platform] Reduce requested CPU and RAM for the `kamaji` provider. (@klinch0 in https://github.com/cozystack/cozystack/pull/825)
|
||||
@@ -45,7 +135,8 @@ Cozystack 0.31.0 further advances GPU support, monitoring, and all-around conven
|
||||
* [kubernetes] Fix merging `valuesOverride` for tenant clusters. (@kvaps in https://github.com/cozystack/cozystack/pull/879)
|
||||
* [kubernetes] Fix `ubuntu-container-disk` tag. (@kvaps in https://github.com/cozystack/cozystack/pull/887)
|
||||
* [kubernetes] Refactor Helm manifests for tenant Kubernetes clusters. (@kvaps in https://github.com/cozystack/cozystack/pull/866)
|
||||
* [kubernetes] Fix Ingress-NGINX depends on Cert-Manager . (@kvaps in https://github.com/cozystack/cozystack/pull/976)
|
||||
* [kubernetes] Fix Ingress-NGINX depends on Cert-Manager. (@kvaps in https://github.com/cozystack/cozystack/pull/976)
|
||||
* [kubernetes, apps] Enable `topologySpreadConstraints` for tenant Kubernetes clusters and fix it for managed PostgreSQL. (@klinch0 in https://github.com/cozystack/cozystack/pull/995)
|
||||
* [tenant] Fix an issue with accessing external IPs of a cluster from the cluster itself. (@kvaps in https://github.com/cozystack/cozystack/pull/854)
|
||||
* [cluster-api] Remove the no longer necessary workaround for Kamaji. (@kvaps in https://github.com/cozystack/cozystack/pull/867, patched in https://github.com/cozystack/cozystack/pull/956)
|
||||
* [monitoring] Remove legacy label "POD" from the exclude filter in metrics. (@xy2 in https://github.com/cozystack/cozystack/pull/826)
|
||||
@@ -54,24 +145,13 @@ Cozystack 0.31.0 further advances GPU support, monitoring, and all-around conven
|
||||
* [postgres] Remove duplicated `template` entry from backup manifest. (@etoshutka in https://github.com/cozystack/cozystack/pull/872)
|
||||
* [kube-ovn] Fix versions mapping in Makefile. (@kvaps in https://github.com/cozystack/cozystack/pull/883)
|
||||
* [dx] Automatically detect version for migrations in the installer.sh. (@kvaps in https://github.com/cozystack/cozystack/pull/837)
|
||||
* [e2e] Increase timeout durations for `capi` and `keycloak` to improve reliability during environment setup. (@kvaps in https://github.com/cozystack/cozystack/pull/858)
|
||||
* [e2e] Fix `device_ownership_from_security_context` CRI. (@dtrdnk in https://github.com/cozystack/cozystack/pull/896)
|
||||
* [e2e] Return `genisoimage` to the e2e-test Dockerfile (@gwynbleidd2106 in https://github.com/cozystack/cozystack/pull/962)
|
||||
* [ci] Improve the check for `versions_map` running on pull requests. (@kvaps and @klinch0 in https://github.com/cozystack/cozystack/pull/836, https://github.com/cozystack/cozystack/pull/842, and https://github.com/cozystack/cozystack/pull/845)
|
||||
* [ci] If the release step was skipped on a tag, skip tests as well. (@kvaps in https://github.com/cozystack/cozystack/pull/822)
|
||||
* [ci] Allow CI to cancel the previous job if a new one is scheduled. (@kvaps in https://github.com/cozystack/cozystack/pull/873)
|
||||
* [ci] Use the correct version name when uploading build assets to the release page. (@kvaps in https://github.com/cozystack/cozystack/pull/876)
|
||||
* [ci] Stop using `ok-to-test` label to trigger CI in pull requests. (@kvaps in https://github.com/cozystack/cozystack/pull/875)
|
||||
* [ci] Do not run tests in the release building pipeline. (@kvaps in https://github.com/cozystack/cozystack/pull/882)
|
||||
* [ci] Fix release branch creation. (@kvaps in https://github.com/cozystack/cozystack/pull/884)
|
||||
* [ci, dx] Reduce noise in the test logs by suppressing the `wget` progress bar. (@lllamnyp in https://github.com/cozystack/cozystack/pull/865)
|
||||
* [ci] Revert "automatically trigger tests in releasing PR". (@kvaps in https://github.com/cozystack/cozystack/pull/900)
|
||||
* [ci] Force-update release branch on tagged main commits . (@kvaps in https://github.com/cozystack/cozystack/pull/977)
|
||||
* [docs] Explain that tenants cannot have dashes in the names. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/980)
|
||||
* [dx] remove version_map and building for library charts. (@kvaps in https://github.com/cozystack/cozystack/pull/998)
|
||||
* [docs] Review the tenant Kubernetes cluster docs. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/969)
|
||||
* [docs] Explain that tenants cannot have dashes in their names. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/980)
|
||||
|
||||
## Dependencies
|
||||
|
||||
* MetalLB s now included directly as a patched image based on version 0.14.9. (@lllamnyp in https://github.com/cozystack/cozystack/pull/945)
|
||||
* MetalLB images are now built in-tree based on version 0.14.9 with additional critical patches. (@lllamnyp in https://github.com/cozystack/cozystack/pull/945)
|
||||
* Update Kubernetes to v1.32.4. (@kvaps in https://github.com/cozystack/cozystack/pull/949)
|
||||
* Update Talos Linux to v1.10.1. (@kvaps in https://github.com/cozystack/cozystack/pull/931)
|
||||
* Update Cilium to v1.17.3. (@kvaps in https://github.com/cozystack/cozystack/pull/848)
|
||||
@@ -83,15 +163,81 @@ Cozystack 0.31.0 further advances GPU support, monitoring, and all-around conven
|
||||
* Update KamajiControlPlane to edge-25.4.1. (@kvaps in https://github.com/cozystack/cozystack/pull/953, fixed by @nbykov0 in https://github.com/cozystack/cozystack/pull/983)
|
||||
* Update cert-manager to v1.17.2. (@kvaps in https://github.com/cozystack/cozystack/pull/975)
|
||||
|
||||
## Maintenance
|
||||
## Documentation
|
||||
|
||||
* Add @klinch0 to CODEOWNERS. (@kvaps in https://github.com/cozystack/cozystack/pull/838)
|
||||
* [Installing Talos in Air-Gapped Environment](https://cozystack.io/docs/operations/talos/configuration/air-gapped/):
|
||||
new guide for configuring and bootstrapping Talos Linux clusters in air-gapped environments.
|
||||
(@klinch0 in https://github.com/cozystack/website/pull/203)
|
||||
|
||||
## New Contributors
|
||||
* [Cozystack Bundles](https://cozystack.io/docs/guides/bundles/): new page in the learning section explaining how Cozystack bundles work and how to choose a bundle.
|
||||
(@NickVolynkin in https://github.com/cozystack/website/pull/188, https://github.com/cozystack/website/pull/189, and others;
|
||||
updated by @kvaps in https://github.com/cozystack/website/pull/192 and https://github.com/cozystack/website/pull/193)
|
||||
|
||||
* [Managed Application Reference](https://cozystack.io/docs/reference/applications/): A set of new pages in the docs, mirroring application docs from the Cozystack dashboard.
|
||||
(@NickVolynkin in https://github.com/cozystack/website/pull/198, https://github.com/cozystack/website/pull/202, and https://github.com/cozystack/website/pull/204)
|
||||
|
||||
* **LINSTOR Networking**: Guides on [configuring dedicated network for LINSTOR](https://cozystack.io/docs/operations/storage/dedicated-network/)
|
||||
and [configuring network for distributed storage in multi-datacenter setup](https://cozystack.io/docs/operations/stretched/linstor-dedicated-network/).
|
||||
(@xy2, edited by @NickVolynkin in https://github.com/cozystack/website/pull/171, https://github.com/cozystack/website/pull/182, and https://github.com/cozystack/website/pull/184)
|
||||
|
||||
### Fixes
|
||||
|
||||
* Correct error in the doc for the command to edit the configmap. (@lb0o in https://github.com/cozystack/website/pull/207)
|
||||
* Fix group name in OIDC docs (@kingdonb in https://github.com/cozystack/website/pull/179)
|
||||
* A bit more explanation of Docker buildx builders. (@nbykov0 in https://github.com/cozystack/website/pull/187)
|
||||
|
||||
## Development, Testing, and CI/CD
|
||||
|
||||
### Testing
|
||||
|
||||
Improvements:
|
||||
|
||||
* Introduce `cozytest` — a new [BATS-based](https://github.com/bats-core/bats-core) testing framework. (@kvaps in https://github.com/cozystack/cozystack/pull/982)
|
||||
|
||||
Fixes:
|
||||
|
||||
* Fix `device_ownership_from_security_context` CRI. (@dtrdnk in https://github.com/cozystack/cozystack/pull/896)
|
||||
* Increase timeout durations for `capi` and `keycloak` to improve reliability during e2e-tests. (@kvaps in https://github.com/cozystack/cozystack/pull/858)
|
||||
* Return `genisoimage` to the e2e-test Dockerfile (@gwynbleidd2106 in https://github.com/cozystack/cozystack/pull/962)
|
||||
|
||||
### CI/CD Changes
|
||||
|
||||
Improvements:
|
||||
|
||||
* Use release branches `release-X.Y` for gathering and releasing fixes after initial `vX.Y.0` release. (@kvaps in https://github.com/cozystack/cozystack/pull/816)
|
||||
* Automatically create release branches after initial `vX.Y.0` release is published. (@kvaps in https://github.com/cozystack/cozystack/pull/886)
|
||||
* Introduce Release Candidate versions. Automate patch backporting by applying patches from pull requests labeled `[backport]` to the current release branch. (@kvaps in https://github.com/cozystack/cozystack/pull/841 and https://github.com/cozystack/cozystack/pull/901, @nickvolynkin in https://github.com/cozystack/cozystack/pull/890)
|
||||
* Support alpha and beta pre-releases. (@kvaps in https://github.com/cozystack/cozystack/pull/978)
|
||||
* Commit changes in release pipelines under `github-actions <github-actions@github.com>`. (@kvaps in https://github.com/cozystack/cozystack/pull/823)
|
||||
* Describe the Cozystack release workflow. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/817 and https://github.com/cozystack/cozystack/pull/897)
|
||||
|
||||
Fixes:
|
||||
|
||||
* Improve the check for `versions_map` running on pull requests. (@kvaps and @klinch0 in https://github.com/cozystack/cozystack/pull/836, https://github.com/cozystack/cozystack/pull/842, and https://github.com/cozystack/cozystack/pull/845)
|
||||
* If the release step was skipped on a tag, skip tests as well. (@kvaps in https://github.com/cozystack/cozystack/pull/822)
|
||||
* Allow CI to cancel the previous job if a new one is scheduled. (@kvaps in https://github.com/cozystack/cozystack/pull/873)
|
||||
* Use the correct version name when uploading build assets to the release page. (@kvaps in https://github.com/cozystack/cozystack/pull/876)
|
||||
* Stop using `ok-to-test` label to trigger CI in pull requests. (@kvaps in https://github.com/cozystack/cozystack/pull/875)
|
||||
* Do not run tests in the release building pipeline. (@kvaps in https://github.com/cozystack/cozystack/pull/882)
|
||||
* Fix release branch creation. (@kvaps in https://github.com/cozystack/cozystack/pull/884)
|
||||
* Reduce noise in the test logs by suppressing the `wget` progress bar. (@lllamnyp in https://github.com/cozystack/cozystack/pull/865)
|
||||
* Revert "automatically trigger tests in releasing PR". (@kvaps in https://github.com/cozystack/cozystack/pull/900)
|
||||
* Force-update release branch on tagged main commits. (@kvaps in https://github.com/cozystack/cozystack/pull/977)
|
||||
* Show detailed errors in the `pull-request-release` workflow. (@lllamnyp in https://github.com/cozystack/cozystack/pull/992)
|
||||
|
||||
## Community and Maintenance
|
||||
|
||||
### Repository Maintenance
|
||||
|
||||
Added @klinch0 to CODEOWNERS. (@kvaps in https://github.com/cozystack/cozystack/pull/838)
|
||||
|
||||
### New Contributors
|
||||
|
||||
* @etoshutka made their first contribution in https://github.com/cozystack/cozystack/pull/872
|
||||
* @dtrdnk made their first contribution in https://github.com/cozystack/cozystack/pull/896
|
||||
* @zdenekjanda made their first contribution in https://github.com/cozystack/cozystack/pull/924
|
||||
* @gwynbleidd2106 made their first contribution in https://github.com/cozystack/cozystack/pull/962
|
||||
|
||||
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.30.0...v0.31.0-rc.3
|
||||
## Full Changelog
|
||||
|
||||
See https://github.com/cozystack/cozystack/compare/v0.30.0...v0.31.0
|
||||
|
||||
117
hack/cozytest.sh
Executable file
117
hack/cozytest.sh
Executable file
@@ -0,0 +1,117 @@
|
||||
#!/bin/sh
|
||||
###############################################################################
|
||||
# cozytest.sh - Bats-compatible test runner with live trace and enhanced #
|
||||
# output, written in pure shell #
|
||||
###############################################################################
|
||||
set -eu
|
||||
|
||||
TEST_FILE=${1:?Usage: ./cozytest.sh <file.bats> [pattern]}
|
||||
PATTERN=${2:-*}
|
||||
LINE='----------------------------------------------------------------'
|
||||
|
||||
cols() { stty size 2>/dev/null | awk '{print $2}' || echo 80; }
|
||||
MAXW=$(( $(cols) - 12 )); [ "$MAXW" -lt 40 ] && MAXW=70
|
||||
BEGIN=$(date +%s)
|
||||
timestamp() { s=$(( $(date +%s) - BEGIN )); printf '[%02d:%02d]' $((s/60)) $((s%60)); }
|
||||
|
||||
###############################################################################
|
||||
# run_one <fn> <title> #
|
||||
###############################################################################
|
||||
run_one() {
|
||||
fn=$1 title=$2
|
||||
tmp=$(mktemp -d) || { echo "Failed to create temp directory" >&2; exit 1; }
|
||||
log="$tmp/log"
|
||||
|
||||
echo "╭ » Run test: $title"
|
||||
START=$(date +%s)
|
||||
skip_next="+ $fn" # первую строку трассировки с именем функции пропустим
|
||||
|
||||
{
|
||||
(
|
||||
PS4='+ ' # prefix for set -x
|
||||
set -eu -x # strict + trace
|
||||
"$fn"
|
||||
)
|
||||
printf '__RC__%s\n' "$?"
|
||||
} 2>&1 | tee "$log" | while IFS= read -r line; do
|
||||
case "$line" in
|
||||
'__RC__'*) : ;;
|
||||
'+ '*) cmd=${line#'+ '}
|
||||
[ "$cmd" = "${skip_next#+ }" ] && continue
|
||||
case "$cmd" in
|
||||
'set -e'|'set -x'|'set -u'|'return 0') continue ;;
|
||||
esac
|
||||
out=$cmd ;;
|
||||
*) out=$line ;;
|
||||
esac
|
||||
now=$(( $(date +%s) - START ))
|
||||
[ ${#out} -gt "$MAXW" ] && out="$(printf '%.*s…' "$MAXW" "$out")"
|
||||
printf '┊[%02d:%02d] %s\n' $((now/60)) $((now%60)) "$out"
|
||||
done
|
||||
|
||||
rc=$(awk '/^__RC__/ {print substr($0,7)}' "$log" | tail -n1)
|
||||
[ -z "$rc" ] && rc=1
|
||||
now=$(( $(date +%s) - START ))
|
||||
|
||||
if [ "$rc" -eq 0 ]; then
|
||||
printf '╰[%02d:%02d] ✅ Test OK: %s\n' $((now/60)) $((now%60)) "$title"
|
||||
else
|
||||
printf '╰[%02d:%02d] ❌ Test failed: %s (exit %s)\n' \
|
||||
$((now/60)) $((now%60)) "$title" "$rc"
|
||||
echo "----- captured output -----------------------------------------"
|
||||
grep -v '^__RC__' "$log"
|
||||
echo "$LINE"
|
||||
exit "$rc"
|
||||
fi
|
||||
|
||||
rm -rf "$tmp"
|
||||
}
|
||||
|
||||
###############################################################################
|
||||
# convert .bats -> shell-functions #
|
||||
###############################################################################
|
||||
TMP_SH=$(mktemp) || { echo "Failed to create temp file" >&2; exit 1; }
|
||||
trap 'rm -f "$TMP_SH"' EXIT
|
||||
awk '
|
||||
/^@test[[:space:]]+"/ {
|
||||
line = substr($0, index($0, "\"") + 1)
|
||||
title = substr(line, 1, index(line, "\"") - 1)
|
||||
fname = "test_"
|
||||
for (i = 1; i <= length(title); i++) {
|
||||
c = substr(title, i, 1)
|
||||
fname = fname (c ~ /[A-Za-z0-9]/ ? c : "_")
|
||||
}
|
||||
printf("### %s\n", title)
|
||||
printf("%s() {\n", fname)
|
||||
print " set -e" # ошибка → падение теста
|
||||
next
|
||||
}
|
||||
/^}$/ {
|
||||
print " return 0" # если автор не сделал exit 1 — тест ОК
|
||||
print "}"
|
||||
next
|
||||
}
|
||||
{ print }
|
||||
' "$TEST_FILE" > "$TMP_SH"
|
||||
|
||||
[ -f "$TMP_SH" ] || { echo "Failed to generate test functions" >&2; exit 1; }
|
||||
# shellcheck disable=SC1090
|
||||
. "$TMP_SH"
|
||||
|
||||
###############################################################################
|
||||
# run selected tests #
|
||||
###############################################################################
|
||||
awk -v pat="$PATTERN" '
|
||||
/^### / {
|
||||
title = substr($0, 5)
|
||||
name = "test_"
|
||||
for (i = 1; i <= length(title); i++) {
|
||||
c = substr(title, i, 1)
|
||||
name = name (c ~ /[A-Za-z0-9]/ ? c : "_")
|
||||
}
|
||||
if (pat == "*" || index(title, pat) > 0)
|
||||
printf("%s %s\n", name, title)
|
||||
}
|
||||
' "$TMP_SH" | while IFS=' ' read -r fn title; do
|
||||
run_one "$fn" "$title"
|
||||
done
|
||||
94
hack/e2e-apps.bats
Executable file
94
hack/e2e-apps.bats
Executable file
@@ -0,0 +1,94 @@
|
||||
#!/usr/bin/env bats
|
||||
# -----------------------------------------------------------------------------
|
||||
# Cozystack end‑to‑end provisioning test (Bats)
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
@test "Create tenant with isolated mode enabled" {
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: test
|
||||
namespace: tenant-root
|
||||
spec:
|
||||
etcd: false
|
||||
host: ""
|
||||
ingress: false
|
||||
isolated: true
|
||||
monitoring: false
|
||||
resourceQuotas: {}
|
||||
seaweedfs: false
|
||||
EOF
|
||||
kubectl wait hr/tenant-test -n tenant-root --timeout=1m --for=condition=ready
|
||||
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
|
||||
}
|
||||
|
||||
@test "Create a tenant Kubernetes control plane" {
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Kubernetes
|
||||
metadata:
|
||||
name: test
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
addons:
|
||||
certManager:
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
cilium:
|
||||
valuesOverride: {}
|
||||
fluxcd:
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
gatewayAPI:
|
||||
enabled: false
|
||||
gpuOperator:
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
ingressNginx:
|
||||
enabled: true
|
||||
hosts: []
|
||||
valuesOverride: {}
|
||||
monitoringAgents:
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
verticalPodAutoscaler:
|
||||
valuesOverride: {}
|
||||
controlPlane:
|
||||
apiServer:
|
||||
resources: {}
|
||||
resourcesPreset: small
|
||||
controllerManager:
|
||||
resources: {}
|
||||
resourcesPreset: micro
|
||||
konnectivity:
|
||||
server:
|
||||
resources: {}
|
||||
resourcesPreset: micro
|
||||
replicas: 2
|
||||
scheduler:
|
||||
resources: {}
|
||||
resourcesPreset: micro
|
||||
host: ""
|
||||
nodeGroups:
|
||||
md0:
|
||||
ephemeralStorage: 20Gi
|
||||
gpus: []
|
||||
instanceType: u1.medium
|
||||
maxReplicas: 10
|
||||
minReplicas: 0
|
||||
resources:
|
||||
cpu: ""
|
||||
memory: ""
|
||||
roles:
|
||||
- ingress-nginx
|
||||
storageClass: replicated
|
||||
EOF
|
||||
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
|
||||
timeout 10 sh -ec 'until kubectl get kamajicontrolplane -n tenant-test kubernetes-test; do sleep 1; done'
|
||||
kubectl wait --for=condition=TenantControlPlaneCreated kamajicontrolplane -n tenant-test kubernetes-test --timeout=4m
|
||||
kubectl wait tcp -n tenant-test kubernetes-test --timeout=2m --for=jsonpath='{.status.kubernetesResources.version.status}'=Ready
|
||||
kubectl wait deploy --timeout=4m --for=condition=available -n tenant-test kubernetes-test kubernetes-test-cluster-autoscaler kubernetes-test-kccm kubernetes-test-kcsi-controller
|
||||
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=1m --for=jsonpath='{.status.replicas}'=2
|
||||
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=10m --for=jsonpath='{.status.v1beta2.readyReplicas}'=2
|
||||
}
|
||||
391
hack/e2e-cluster.bats
Executable file
391
hack/e2e-cluster.bats
Executable file
@@ -0,0 +1,391 @@
|
||||
#!/usr/bin/env bats
|
||||
# -----------------------------------------------------------------------------
|
||||
# Cozystack end‑to‑end provisioning test (Bats)
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
@test "Required installer assets exist" {
|
||||
if [ ! -f _out/assets/cozystack-installer.yaml ]; then
|
||||
echo "Missing: _out/assets/cozystack-installer.yaml" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f _out/assets/nocloud-amd64.raw.xz ]; then
|
||||
echo "Missing: _out/assets/nocloud-amd64.raw.xz" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
@test "IPv4 forwarding is enabled" {
|
||||
if [ "$(cat /proc/sys/net/ipv4/ip_forward)" != 1 ]; then
|
||||
echo "IPv4 forwarding is disabled!" >&2
|
||||
echo >&2
|
||||
echo "Enable it with:" >&2
|
||||
echo " echo 1 > /proc/sys/net/ipv4/ip_forward" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
@test "Clean previous VMs" {
|
||||
kill $(cat srv1/qemu.pid srv2/qemu.pid srv3/qemu.pid 2>/dev/null) 2>/dev/null || true
|
||||
rm -rf srv1 srv2 srv3
|
||||
}
|
||||
|
||||
@test "Prepare networking and masquerading" {
|
||||
ip link del cozy-br0 2>/dev/null || true
|
||||
ip link add cozy-br0 type bridge
|
||||
ip link set cozy-br0 up
|
||||
ip address add 192.168.123.1/24 dev cozy-br0
|
||||
|
||||
# Masquerading rule – idempotent (delete first, then add)
|
||||
iptables -t nat -D POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE 2>/dev/null || true
|
||||
iptables -t nat -A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
|
||||
}
|
||||
|
||||
@test "Prepare cloud‑init drive for VMs" {
|
||||
mkdir -p srv1 srv2 srv3
|
||||
|
||||
# Generate cloud‑init ISOs
|
||||
for i in 1 2 3; do
|
||||
echo "hostname: srv${i}" > "srv${i}/meta-data"
|
||||
|
||||
cat > "srv${i}/user-data" <<'EOF'
|
||||
#cloud-config
|
||||
EOF
|
||||
|
||||
cat > "srv${i}/network-config" <<EOF
|
||||
version: 2
|
||||
ethernets:
|
||||
eth0:
|
||||
dhcp4: false
|
||||
addresses:
|
||||
- "192.168.123.1${i}/26"
|
||||
gateway4: "192.168.123.1"
|
||||
nameservers:
|
||||
search: [cluster.local]
|
||||
addresses: [8.8.8.8]
|
||||
EOF
|
||||
|
||||
( cd "srv${i}" && genisoimage \
|
||||
-output seed.img \
|
||||
-volid cidata -rational-rock -joliet \
|
||||
user-data meta-data network-config )
|
||||
done
|
||||
}
|
||||
|
||||
@test "Use Talos NoCloud image from assets" {
|
||||
if [ ! -f _out/assets/nocloud-amd64.raw.xz ]; then
|
||||
echo "Missing _out/assets/nocloud-amd64.raw.xz" 2>&1
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm -f nocloud-amd64.raw
|
||||
cp _out/assets/nocloud-amd64.raw.xz .
|
||||
xz --decompress nocloud-amd64.raw.xz
|
||||
}
|
||||
|
||||
@test "Prepare VM disks" {
|
||||
for i in 1 2 3; do
|
||||
cp nocloud-amd64.raw srv${i}/system.img
|
||||
qemu-img resize srv${i}/system.img 50G
|
||||
qemu-img create srv${i}/data.img 100G
|
||||
done
|
||||
}
|
||||
|
||||
@test "Create tap devices" {
|
||||
for i in 1 2 3; do
|
||||
ip link del cozy-srv${i} 2>/dev/null || true
|
||||
ip tuntap add dev cozy-srv${i} mode tap
|
||||
ip link set cozy-srv${i} up
|
||||
ip link set cozy-srv${i} master cozy-br0
|
||||
done
|
||||
}
|
||||
|
||||
@test "Boot QEMU VMs" {
|
||||
for i in 1 2 3; do
|
||||
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 8 -m 16384 \
|
||||
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5${i} \
|
||||
-netdev tap,id=net0,ifname=cozy-srv${i},script=no,downscript=no \
|
||||
-drive file=srv${i}/system.img,if=virtio,format=raw \
|
||||
-drive file=srv${i}/seed.img,if=virtio,format=raw \
|
||||
-drive file=srv${i}/data.img,if=virtio,format=raw \
|
||||
-display none -daemonize -pidfile srv${i}/qemu.pid
|
||||
done
|
||||
|
||||
# Give qemu a few seconds to start up networking
|
||||
sleep 5
|
||||
}
|
||||
|
||||
@test "Wait until Talos API port 50000 is reachable on all machines" {
|
||||
timeout 60 sh -ec 'until nc -nz 192.168.123.11 50000 && nc -nz 192.168.123.12 50000 && nc -nz 192.168.123.13 50000; do sleep 1; done'
|
||||
}
|
||||
|
||||
@test "Generate Talos cluster configuration" {
|
||||
# Cluster‑wide patches
|
||||
cat > patch.yaml <<'EOF'
|
||||
machine:
|
||||
kubelet:
|
||||
nodeIP:
|
||||
validSubnets:
|
||||
- 192.168.123.0/24
|
||||
extraConfig:
|
||||
maxPods: 512
|
||||
kernel:
|
||||
modules:
|
||||
- name: openvswitch
|
||||
- name: drbd
|
||||
parameters:
|
||||
- usermode_helper=disabled
|
||||
- name: zfs
|
||||
- name: spl
|
||||
registries:
|
||||
mirrors:
|
||||
docker.io:
|
||||
endpoints:
|
||||
- https://mirror.gcr.io
|
||||
files:
|
||||
- content: |
|
||||
[plugins]
|
||||
[plugins."io.containerd.cri.v1.runtime"]
|
||||
device_ownership_from_security_context = true
|
||||
path: /etc/cri/conf.d/20-customization.part
|
||||
op: create
|
||||
|
||||
cluster:
|
||||
apiServer:
|
||||
extraArgs:
|
||||
oidc-issuer-url: "https://keycloak.example.org/realms/cozy"
|
||||
oidc-client-id: "kubernetes"
|
||||
oidc-username-claim: "preferred_username"
|
||||
oidc-groups-claim: "groups"
|
||||
network:
|
||||
cni:
|
||||
name: none
|
||||
dnsDomain: cozy.local
|
||||
podSubnets:
|
||||
- 10.244.0.0/16
|
||||
serviceSubnets:
|
||||
- 10.96.0.0/16
|
||||
EOF
|
||||
|
||||
# Control‑plane‑only patches
|
||||
cat > patch-controlplane.yaml <<'EOF'
|
||||
machine:
|
||||
nodeLabels:
|
||||
node.kubernetes.io/exclude-from-external-load-balancers:
|
||||
$patch: delete
|
||||
network:
|
||||
interfaces:
|
||||
- interface: eth0
|
||||
vip:
|
||||
ip: 192.168.123.10
|
||||
cluster:
|
||||
allowSchedulingOnControlPlanes: true
|
||||
controllerManager:
|
||||
extraArgs:
|
||||
bind-address: 0.0.0.0
|
||||
scheduler:
|
||||
extraArgs:
|
||||
bind-address: 0.0.0.0
|
||||
apiServer:
|
||||
certSANs:
|
||||
- 127.0.0.1
|
||||
proxy:
|
||||
disabled: true
|
||||
discovery:
|
||||
enabled: false
|
||||
etcd:
|
||||
advertisedSubnets:
|
||||
- 192.168.123.0/24
|
||||
EOF
|
||||
|
||||
# Generate secrets once
|
||||
if [ ! -f secrets.yaml ]; then
|
||||
talosctl gen secrets
|
||||
fi
|
||||
|
||||
rm -f controlplane.yaml worker.yaml talosconfig kubeconfig
|
||||
talosctl gen config --with-secrets secrets.yaml cozystack https://192.168.123.10:6443 \
|
||||
--config-patch=@patch.yaml --config-patch-control-plane @patch-controlplane.yaml
|
||||
}
|
||||
|
||||
@test "Apply Talos configuration to the node" {
|
||||
# Apply the configuration to all three nodes
|
||||
for node in 11 12 13; do
|
||||
talosctl apply -f controlplane.yaml -n 192.168.123.${node} -e 192.168.123.${node} -i
|
||||
done
|
||||
|
||||
# Wait for Talos services to come up again
|
||||
timeout 60 sh -ec 'until nc -nz 192.168.123.11 50000 && nc -nz 192.168.123.12 50000 && nc -nz 192.168.123.13 50000; do sleep 1; done'
|
||||
}
|
||||
|
||||
@test "Bootstrap Talos cluster" {
|
||||
# Bootstrap etcd on the first node
|
||||
timeout 10 sh -ec 'until talosctl bootstrap -n 192.168.123.11 -e 192.168.123.11; do sleep 1; done'
|
||||
|
||||
# Wait until etcd is healthy
|
||||
timeout 180 sh -ec 'until talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 >/dev/null 2>&1; do sleep 1; done'
|
||||
timeout 60 sh -ec 'while talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1 | grep -q "rpc error"; do sleep 1; done'
|
||||
|
||||
# Retrieve kubeconfig
|
||||
rm -f kubeconfig
|
||||
talosctl kubeconfig kubeconfig -e 192.168.123.10 -n 192.168.123.10
|
||||
|
||||
# Wait until all three nodes register in Kubernetes
|
||||
timeout 60 sh -ec 'until [ $(kubectl get node --no-headers | wc -l) -eq 3 ]; do sleep 1; done'
|
||||
}
|
||||
|
||||
@test "Install Cozystack" {
|
||||
# Create namespace & configmap required by installer
|
||||
kubectl create namespace cozy-system --dry-run=client -o yaml | kubectl apply -f -
|
||||
kubectl create configmap cozystack -n cozy-system \
|
||||
--from-literal=bundle-name=paas-full \
|
||||
--from-literal=ipv4-pod-cidr=10.244.0.0/16 \
|
||||
--from-literal=ipv4-pod-gateway=10.244.0.1 \
|
||||
--from-literal=ipv4-svc-cidr=10.96.0.0/16 \
|
||||
--from-literal=ipv4-join-cidr=100.64.0.0/16 \
|
||||
--from-literal=root-host=example.org \
|
||||
--from-literal=api-server-endpoint=https://192.168.123.10:6443 \
|
||||
--dry-run=client -o yaml | kubectl apply -f -
|
||||
|
||||
# Apply installer manifests from file
|
||||
kubectl apply -f _out/assets/cozystack-installer.yaml
|
||||
|
||||
# Wait for the installer deployment to become available
|
||||
kubectl wait deployment/cozystack -n cozy-system --timeout=1m --for=condition=Available
|
||||
|
||||
# Wait until HelmReleases appear & reconcile them
|
||||
timeout 60 sh -ec 'until kubectl get hr -A | grep -q cozys; do sleep 1; done'
|
||||
sleep 5
|
||||
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n "$1" hr/"$2" &"} END {print "wait"}' | sh -ex
|
||||
|
||||
# Fail the test if any HelmRelease is not Ready
|
||||
if kubectl get hr -A | grep -v " True " | grep -v NAME; then
|
||||
kubectl get hr -A
|
||||
fail "Some HelmReleases failed to reconcile"
|
||||
fi
|
||||
}
|
||||
|
||||
@test "Wait for Cluster‑API provider deployments" {
|
||||
# Wait for Cluster‑API provider deployments
|
||||
timeout 60 sh -ec 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager >/dev/null 2>&1; do sleep 1; done'
|
||||
kubectl wait deployment/capi-controller-manager deployment/capi-kamaji-controller-manager deployment/capi-kubeadm-bootstrap-controller-manager deployment/capi-operator-cluster-api-operator deployment/capk-controller-manager -n cozy-cluster-api --timeout=1m --for=condition=available
|
||||
}
|
||||
|
||||
@test "Wait for LINSTOR and configure storage" {
|
||||
# Linstor controller and nodes
|
||||
kubectl wait deployment/linstor-controller -n cozy-linstor --timeout=5m --for=condition=available
|
||||
timeout 60 sh -ec 'until [ $(kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor node list | grep -c Online) -eq 3 ]; do sleep 1; done'
|
||||
|
||||
for node in srv1 srv2 srv3; do
|
||||
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs ${node} /dev/vdc --pool-name data --storage-pool data
|
||||
done
|
||||
|
||||
# Storage classes
|
||||
kubectl apply -f - <<'EOF'
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: local
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
provisioner: linstor.csi.linbit.com
|
||||
parameters:
|
||||
linstor.csi.linbit.com/storagePool: "data"
|
||||
linstor.csi.linbit.com/layerList: "storage"
|
||||
linstor.csi.linbit.com/allowRemoteVolumeAccess: "false"
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
allowVolumeExpansion: true
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: replicated
|
||||
provisioner: linstor.csi.linbit.com
|
||||
parameters:
|
||||
linstor.csi.linbit.com/storagePool: "data"
|
||||
linstor.csi.linbit.com/autoPlace: "3"
|
||||
linstor.csi.linbit.com/layerList: "drbd storage"
|
||||
linstor.csi.linbit.com/allowRemoteVolumeAccess: "true"
|
||||
property.linstor.csi.linbit.com/DrbdOptions/auto-quorum: suspend-io
|
||||
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-no-data-accessible: suspend-io
|
||||
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-suspended-primary-outdated: force-secondary
|
||||
property.linstor.csi.linbit.com/DrbdOptions/Net/rr-conflict: retry-connect
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
allowVolumeExpansion: true
|
||||
EOF
|
||||
}
|
||||
|
||||
@test "Wait for MetalLB and configure address pool" {
|
||||
# MetalLB address pool
|
||||
kubectl apply -f - <<'EOF'
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: L2Advertisement
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-metallb
|
||||
spec:
|
||||
ipAddressPools: [cozystack]
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: IPAddressPool
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-metallb
|
||||
spec:
|
||||
addresses: [192.168.123.200-192.168.123.250]
|
||||
autoAssign: true
|
||||
avoidBuggyIPs: false
|
||||
EOF
|
||||
}
|
||||
|
||||
@test "Check Cozystack API service" {
|
||||
kubectl wait --for=condition=Available apiservices/v1alpha1.apps.cozystack.io --timeout=2m
|
||||
}
|
||||
|
||||
@test "Configure Tenant and wait for applications" {
|
||||
# Patch root tenant and wait for its releases
|
||||
kubectl patch tenants/root -n tenant-root --type merge -p '{"spec":{"host":"example.org","ingress":true,"monitoring":true,"etcd":true,"isolated":true}}'
|
||||
|
||||
timeout 60 sh -ec 'until kubectl get hr -n tenant-root etcd ingress monitoring tenant-root >/dev/null 2>&1; do sleep 1; done'
|
||||
kubectl wait hr/etcd hr/ingress hr/tenant-root -n tenant-root --timeout=2m --for=condition=ready
|
||||
|
||||
if ! kubectl wait hr/monitoring -n tenant-root --timeout=2m --for=condition=ready; then
|
||||
flux reconcile hr monitoring -n tenant-root --force
|
||||
kubectl wait hr/monitoring -n tenant-root --timeout=2m --for=condition=ready
|
||||
fi
|
||||
|
||||
# Expose Cozystack services through ingress
|
||||
kubectl patch configmap/cozystack -n cozy-system --type merge -p '{"data":{"expose-services":"api,dashboard,cdi-uploadproxy,vm-exportproxy,keycloak"}}'
|
||||
|
||||
# NGINX ingress controller
|
||||
timeout 60 sh -ec 'until kubectl get deploy root-ingress-controller -n tenant-root >/dev/null 2>&1; do sleep 1; done'
|
||||
kubectl wait deploy/root-ingress-controller -n tenant-root --timeout=5m --for=condition=available
|
||||
|
||||
# etcd statefulset
|
||||
kubectl wait sts/etcd -n tenant-root --for=jsonpath='{.status.readyReplicas}'=3 --timeout=5m
|
||||
|
||||
# VictoriaMetrics components
|
||||
kubectl wait vmalert/vmalert-shortterm vmalertmanager/alertmanager -n tenant-root --for=jsonpath='{.status.updateStatus}'=operational --timeout=5m
|
||||
kubectl wait vlogs/generic -n tenant-root --for=jsonpath='{.status.updateStatus}'=operational --timeout=5m
|
||||
kubectl wait vmcluster/shortterm vmcluster/longterm -n tenant-root --for=jsonpath='{.status.clusterStatus}'=operational --timeout=5m
|
||||
|
||||
# Grafana
|
||||
kubectl wait clusters.postgresql.cnpg.io/grafana-db -n tenant-root --for=condition=ready --timeout=5m
|
||||
kubectl wait deploy/grafana-deployment -n tenant-root --for=condition=available --timeout=5m
|
||||
|
||||
# Verify Grafana via ingress
|
||||
ingress_ip=$(kubectl get svc root-ingress-controller -n tenant-root -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
if ! curl -sS -k "https://${ingress_ip}" -H 'Host: grafana.example.org' --max-time 30 | grep -q Found; then
|
||||
echo "Failed to access Grafana via ingress at ${ingress_ip}" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
@test "Keycloak OIDC stack is healthy" {
|
||||
kubectl patch configmap/cozystack -n cozy-system --type merge -p '{"data":{"oidc-enabled":"true"}}'
|
||||
|
||||
timeout 120 sh -ec 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator >/dev/null 2>&1; do sleep 1; done'
|
||||
kubectl wait hr/keycloak hr/keycloak-configure hr/keycloak-operator -n cozy-keycloak --timeout=10m --for=condition=ready
|
||||
}
|
||||
370
hack/e2e.sh
370
hack/e2e.sh
@@ -1,370 +0,0 @@
|
||||
#!/bin/bash
|
||||
if [ "$COZYSTACK_INSTALLER_YAML" = "" ]; then
|
||||
echo 'COZYSTACK_INSTALLER_YAML variable is not set!' >&2
|
||||
echo 'please set it with following command:' >&2
|
||||
echo >&2
|
||||
echo 'export COZYSTACK_INSTALLER_YAML=$(helm template -n cozy-system installer packages/core/installer)' >&2
|
||||
echo >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$(cat /proc/sys/net/ipv4/ip_forward)" != 1 ]; then
|
||||
echo "IPv4 forwarding is not enabled!" >&2
|
||||
echo 'please enable forwarding with the following command:' >&2
|
||||
echo >&2
|
||||
echo 'echo 1 > /proc/sys/net/ipv4/ip_forward' >&2
|
||||
echo >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
set -x
|
||||
set -e
|
||||
|
||||
kill `cat srv1/qemu.pid srv2/qemu.pid srv3/qemu.pid` || true
|
||||
|
||||
ip link del cozy-br0 || true
|
||||
ip link add cozy-br0 type bridge
|
||||
ip link set cozy-br0 up
|
||||
ip addr add 192.168.123.1/24 dev cozy-br0
|
||||
|
||||
# Enable masquerading
|
||||
iptables -t nat -D POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE 2>/dev/null || true
|
||||
iptables -t nat -A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
|
||||
|
||||
rm -rf srv1 srv2 srv3
|
||||
mkdir -p srv1 srv2 srv3
|
||||
|
||||
# Prepare cloud-init
|
||||
for i in 1 2 3; do
|
||||
echo "hostname: srv$i" > "srv$i/meta-data"
|
||||
echo '#cloud-config' > "srv$i/user-data"
|
||||
cat > "srv$i/network-config" <<EOT
|
||||
version: 2
|
||||
ethernets:
|
||||
eth0:
|
||||
dhcp4: false
|
||||
addresses:
|
||||
- "192.168.123.1$i/26"
|
||||
gateway4: "192.168.123.1"
|
||||
nameservers:
|
||||
search: [cluster.local]
|
||||
addresses: [8.8.8.8]
|
||||
EOT
|
||||
|
||||
( cd srv$i && genisoimage \
|
||||
-output seed.img \
|
||||
-volid cidata -rational-rock -joliet \
|
||||
user-data meta-data network-config
|
||||
)
|
||||
done
|
||||
|
||||
# Prepare system drive
|
||||
if [ ! -f nocloud-amd64.raw ]; then
|
||||
wget https://github.com/cozystack/cozystack/releases/latest/download/nocloud-amd64.raw.xz \
|
||||
-O nocloud-amd64.raw.xz --show-progress --output-file /dev/stdout --progress=dot:giga 2>/dev/null
|
||||
rm -f nocloud-amd64.raw
|
||||
xz --decompress nocloud-amd64.raw.xz
|
||||
fi
|
||||
for i in 1 2 3; do
|
||||
cp nocloud-amd64.raw srv$i/system.img
|
||||
qemu-img resize srv$i/system.img 20G
|
||||
done
|
||||
|
||||
# Prepare data drives
|
||||
for i in 1 2 3; do
|
||||
qemu-img create srv$i/data.img 100G
|
||||
done
|
||||
|
||||
# Prepare networking
|
||||
for i in 1 2 3; do
|
||||
ip link del cozy-srv$i || true
|
||||
ip tuntap add dev cozy-srv$i mode tap
|
||||
ip link set cozy-srv$i up
|
||||
ip link set cozy-srv$i master cozy-br0
|
||||
done
|
||||
|
||||
# Start VMs
|
||||
for i in 1 2 3; do
|
||||
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 8 -m 16384 \
|
||||
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5$i \
|
||||
-netdev tap,id=net0,ifname=cozy-srv$i,script=no,downscript=no \
|
||||
-drive file=srv$i/system.img,if=virtio,format=raw \
|
||||
-drive file=srv$i/seed.img,if=virtio,format=raw \
|
||||
-drive file=srv$i/data.img,if=virtio,format=raw \
|
||||
-display none -daemonize -pidfile srv$i/qemu.pid
|
||||
done
|
||||
|
||||
sleep 5
|
||||
|
||||
# Wait for VM to start up
|
||||
timeout 60 sh -c 'until nc -nzv 192.168.123.11 50000 && nc -nzv 192.168.123.12 50000 && nc -nzv 192.168.123.13 50000; do sleep 1; done'
|
||||
|
||||
cat > patch.yaml <<\EOT
|
||||
machine:
|
||||
kubelet:
|
||||
nodeIP:
|
||||
validSubnets:
|
||||
- 192.168.123.0/24
|
||||
extraConfig:
|
||||
maxPods: 512
|
||||
kernel:
|
||||
modules:
|
||||
- name: openvswitch
|
||||
- name: drbd
|
||||
parameters:
|
||||
- usermode_helper=disabled
|
||||
- name: zfs
|
||||
- name: spl
|
||||
registries:
|
||||
mirrors:
|
||||
docker.io:
|
||||
endpoints:
|
||||
- https://mirror.gcr.io
|
||||
files:
|
||||
- content: |
|
||||
[plugins]
|
||||
[plugins."io.containerd.cri.v1.runtime"]
|
||||
device_ownership_from_security_context = true
|
||||
path: /etc/cri/conf.d/20-customization.part
|
||||
op: create
|
||||
|
||||
cluster:
|
||||
apiServer:
|
||||
extraArgs:
|
||||
oidc-issuer-url: "https://keycloak.example.org/realms/cozy"
|
||||
oidc-client-id: "kubernetes"
|
||||
oidc-username-claim: "preferred_username"
|
||||
oidc-groups-claim: "groups"
|
||||
network:
|
||||
cni:
|
||||
name: none
|
||||
dnsDomain: cozy.local
|
||||
podSubnets:
|
||||
- 10.244.0.0/16
|
||||
serviceSubnets:
|
||||
- 10.96.0.0/16
|
||||
EOT
|
||||
|
||||
cat > patch-controlplane.yaml <<\EOT
|
||||
machine:
|
||||
nodeLabels:
|
||||
node.kubernetes.io/exclude-from-external-load-balancers:
|
||||
$patch: delete
|
||||
network:
|
||||
interfaces:
|
||||
- interface: eth0
|
||||
vip:
|
||||
ip: 192.168.123.10
|
||||
cluster:
|
||||
allowSchedulingOnControlPlanes: true
|
||||
controllerManager:
|
||||
extraArgs:
|
||||
bind-address: 0.0.0.0
|
||||
scheduler:
|
||||
extraArgs:
|
||||
bind-address: 0.0.0.0
|
||||
apiServer:
|
||||
certSANs:
|
||||
- 127.0.0.1
|
||||
proxy:
|
||||
disabled: true
|
||||
discovery:
|
||||
enabled: false
|
||||
etcd:
|
||||
advertisedSubnets:
|
||||
- 192.168.123.0/24
|
||||
EOT
|
||||
|
||||
# Gen configuration
|
||||
if [ ! -f secrets.yaml ]; then
|
||||
talosctl gen secrets
|
||||
fi
|
||||
|
||||
rm -f controlplane.yaml worker.yaml talosconfig kubeconfig
|
||||
talosctl gen config --with-secrets secrets.yaml cozystack https://192.168.123.10:6443 --config-patch=@patch.yaml --config-patch-control-plane @patch-controlplane.yaml
|
||||
export TALOSCONFIG=$PWD/talosconfig
|
||||
|
||||
# Apply configuration
|
||||
talosctl apply -f controlplane.yaml -n 192.168.123.11 -e 192.168.123.11 -i
|
||||
talosctl apply -f controlplane.yaml -n 192.168.123.12 -e 192.168.123.12 -i
|
||||
talosctl apply -f controlplane.yaml -n 192.168.123.13 -e 192.168.123.13 -i
|
||||
|
||||
# Wait for VM to be configured
|
||||
timeout 60 sh -c 'until nc -nzv 192.168.123.11 50000 && nc -nzv 192.168.123.12 50000 && nc -nzv 192.168.123.13 50000; do sleep 1; done'
|
||||
|
||||
# Bootstrap
|
||||
timeout 10 sh -c 'until talosctl bootstrap -n 192.168.123.11 -e 192.168.123.11; do sleep 1; done'
|
||||
|
||||
# Wait for etcd
|
||||
timeout 180 sh -c 'until timeout -s 9 2 talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1; do sleep 1; done'
|
||||
timeout 60 sh -c 'while talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1 | grep "rpc error"; do sleep 1; done'
|
||||
|
||||
rm -f kubeconfig
|
||||
talosctl kubeconfig kubeconfig -e 192.168.123.10 -n 192.168.123.10
|
||||
export KUBECONFIG=$PWD/kubeconfig
|
||||
|
||||
# Wait for kubernetes nodes appear
|
||||
timeout 60 sh -c 'until [ $(kubectl get node -o name | wc -l) = 3 ]; do sleep 1; done'
|
||||
kubectl create ns cozy-system -o yaml | kubectl apply -f -
|
||||
kubectl create -f - <<\EOT
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-system
|
||||
data:
|
||||
bundle-name: "paas-full"
|
||||
ipv4-pod-cidr: "10.244.0.0/16"
|
||||
ipv4-pod-gateway: "10.244.0.1"
|
||||
ipv4-svc-cidr: "10.96.0.0/16"
|
||||
ipv4-join-cidr: "100.64.0.0/16"
|
||||
root-host: example.org
|
||||
api-server-endpoint: https://192.168.123.10:6443
|
||||
EOT
|
||||
|
||||
#
|
||||
echo "$COZYSTACK_INSTALLER_YAML" | kubectl apply -f -
|
||||
|
||||
# wait for cozystack pod to start
|
||||
kubectl wait deploy --timeout=1m --for=condition=available -n cozy-system cozystack
|
||||
|
||||
# wait for helmreleases appear
|
||||
timeout 60 sh -c 'until kubectl get hr -A | grep cozy; do sleep 1; done'
|
||||
|
||||
sleep 5
|
||||
|
||||
# Wait for all HelmReleases to be installed
|
||||
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n " $1 " hr/" $2 " &"} END{print "wait"}' | sh -x
|
||||
|
||||
failed_hrs=$(kubectl get hr -A | grep -v True)
|
||||
if [ -n "$(echo "$failed_hrs" | grep -v NAME)" ]; then
|
||||
printf 'Failed HelmReleases:\n%s\n' "$failed_hrs" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Wait for Cluster-API providers
|
||||
timeout 60 sh -c 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager; do sleep 1; done'
|
||||
kubectl wait deploy --timeout=1m --for=condition=available -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager
|
||||
|
||||
# Wait for linstor controller
|
||||
kubectl wait deploy --timeout=5m --for=condition=available -n cozy-linstor linstor-controller
|
||||
|
||||
# Wait for all linstor nodes become Online
|
||||
timeout 60 sh -c 'until [ $(kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor node list | grep -c Online) = 3 ]; do sleep 1; done'
|
||||
|
||||
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv1 /dev/vdc --pool-name data --storage-pool data
|
||||
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv2 /dev/vdc --pool-name data --storage-pool data
|
||||
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv3 /dev/vdc --pool-name data --storage-pool data
|
||||
|
||||
kubectl create -f- <<EOT
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: local
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
provisioner: linstor.csi.linbit.com
|
||||
parameters:
|
||||
linstor.csi.linbit.com/storagePool: "data"
|
||||
linstor.csi.linbit.com/layerList: "storage"
|
||||
linstor.csi.linbit.com/allowRemoteVolumeAccess: "false"
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
allowVolumeExpansion: true
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: replicated
|
||||
provisioner: linstor.csi.linbit.com
|
||||
parameters:
|
||||
linstor.csi.linbit.com/storagePool: "data"
|
||||
linstor.csi.linbit.com/autoPlace: "3"
|
||||
linstor.csi.linbit.com/layerList: "drbd storage"
|
||||
linstor.csi.linbit.com/allowRemoteVolumeAccess: "true"
|
||||
property.linstor.csi.linbit.com/DrbdOptions/auto-quorum: suspend-io
|
||||
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-no-data-accessible: suspend-io
|
||||
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-suspended-primary-outdated: force-secondary
|
||||
property.linstor.csi.linbit.com/DrbdOptions/Net/rr-conflict: retry-connect
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
allowVolumeExpansion: true
|
||||
EOT
|
||||
kubectl create -f- <<EOT
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: L2Advertisement
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-metallb
|
||||
spec:
|
||||
ipAddressPools:
|
||||
- cozystack
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: IPAddressPool
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-metallb
|
||||
spec:
|
||||
addresses:
|
||||
- 192.168.123.200-192.168.123.250
|
||||
autoAssign: true
|
||||
avoidBuggyIPs: false
|
||||
EOT
|
||||
|
||||
# Wait for cozystack-api
|
||||
kubectl wait --for=condition=Available apiservices v1alpha1.apps.cozystack.io --timeout=2m
|
||||
|
||||
kubectl patch -n tenant-root tenants.apps.cozystack.io root --type=merge -p '{"spec":{
|
||||
"host": "example.org",
|
||||
"ingress": true,
|
||||
"monitoring": true,
|
||||
"etcd": true,
|
||||
"isolated": true
|
||||
}}'
|
||||
|
||||
# Wait for HelmRelease be created
|
||||
timeout 60 sh -c 'until kubectl get hr -n tenant-root etcd ingress monitoring tenant-root; do sleep 1; done'
|
||||
|
||||
# Wait for HelmReleases be installed
|
||||
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr etcd ingress tenant-root
|
||||
|
||||
if ! kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr monitoring; then
|
||||
flux reconcile hr monitoring -n tenant-root --force
|
||||
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr monitoring
|
||||
fi
|
||||
|
||||
kubectl patch -n cozy-system cm cozystack --type=merge -p '{"data":{
|
||||
"expose-services": "api,dashboard,cdi-uploadproxy,vm-exportproxy,keycloak"
|
||||
}}'
|
||||
|
||||
# Wait for nginx-ingress-controller
|
||||
timeout 60 sh -c 'until kubectl get deploy -n tenant-root root-ingress-controller; do sleep 1; done'
|
||||
kubectl wait --timeout=5m --for=condition=available -n tenant-root deploy root-ingress-controller
|
||||
|
||||
# Wait for etcd
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.readyReplicas=3 -n tenant-root sts etcd
|
||||
|
||||
# Wait for Victoria metrics
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.updateStatus=operational -n tenant-root vmalert/vmalert-shortterm vmalertmanager/alertmanager
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.updateStatus=operational -n tenant-root vlogs/generic
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.clusterStatus=operational -n tenant-root vmcluster/shortterm vmcluster/longterm
|
||||
|
||||
# Wait for grafana
|
||||
kubectl wait --timeout=5m --for=condition=ready -n tenant-root clusters.postgresql.cnpg.io grafana-db
|
||||
kubectl wait --timeout=5m --for=condition=available -n tenant-root deploy grafana-deployment
|
||||
|
||||
# Get IP of nginx-ingress
|
||||
ip=$(kubectl get svc -n tenant-root root-ingress-controller -o jsonpath='{.status.loadBalancer.ingress..ip}')
|
||||
|
||||
# Check Grafana
|
||||
curl -sS -k "https://$ip" -H 'Host: grafana.example.org' | grep Found
|
||||
|
||||
|
||||
# Test OIDC
|
||||
kubectl patch -n cozy-system cm/cozystack --type=merge -p '{"data":{
|
||||
"oidc-enabled": "true"
|
||||
}}'
|
||||
|
||||
timeout 120 sh -c 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator; do sleep 1; done'
|
||||
kubectl wait --timeout=10m --for=condition=ready -n cozy-keycloak hr keycloak keycloak-configure keycloak-operator
|
||||
139
internal/controller/system_helm_reconciler.go
Normal file
139
internal/controller/system_helm_reconciler.go
Normal file
@@ -0,0 +1,139 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
helmv2 "github.com/fluxcd/helm-controller/api/v2"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
)
|
||||
|
||||
type CozystackConfigReconciler struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
var configMapNames = []string{"cozystack", "cozystack-branding", "cozystack-scheduling"}
|
||||
|
||||
const configMapNamespace = "cozy-system"
|
||||
const digestAnnotation = "cozystack.io/cozy-config-digest"
|
||||
const forceReconcileKey = "reconcile.fluxcd.io/forceAt"
|
||||
const requestedAt = "reconcile.fluxcd.io/requestedAt"
|
||||
|
||||
func (r *CozystackConfigReconciler) Reconcile(ctx context.Context, _ ctrl.Request) (ctrl.Result, error) {
|
||||
log := log.FromContext(ctx)
|
||||
|
||||
digest, err := r.computeDigest(ctx)
|
||||
if err != nil {
|
||||
log.Error(err, "failed to compute config digest")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
var helmList helmv2.HelmReleaseList
|
||||
if err := r.List(ctx, &helmList); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to list HelmReleases: %w", err)
|
||||
}
|
||||
|
||||
now := time.Now().Format(time.RFC3339Nano)
|
||||
updated := 0
|
||||
|
||||
for _, hr := range helmList.Items {
|
||||
isSystemApp := hr.Labels["cozystack.io/system-app"] == "true"
|
||||
isTenantRoot := hr.Namespace == "tenant-root" && hr.Name == "tenant-root"
|
||||
if !isSystemApp && !isTenantRoot {
|
||||
continue
|
||||
}
|
||||
|
||||
if hr.Annotations == nil {
|
||||
hr.Annotations = map[string]string{}
|
||||
}
|
||||
|
||||
if hr.Annotations[digestAnnotation] == digest {
|
||||
continue
|
||||
}
|
||||
|
||||
patch := client.MergeFrom(hr.DeepCopy())
|
||||
hr.Annotations[digestAnnotation] = digest
|
||||
hr.Annotations[forceReconcileKey] = now
|
||||
hr.Annotations[requestedAt] = now
|
||||
|
||||
if err := r.Patch(ctx, &hr, patch); err != nil {
|
||||
log.Error(err, "failed to patch HelmRelease", "name", hr.Name, "namespace", hr.Namespace)
|
||||
continue
|
||||
}
|
||||
updated++
|
||||
log.Info("patched HelmRelease with new config digest", "name", hr.Name, "namespace", hr.Namespace)
|
||||
}
|
||||
|
||||
log.Info("finished reconciliation", "updatedHelmReleases", updated)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *CozystackConfigReconciler) computeDigest(ctx context.Context) (string, error) {
|
||||
hash := sha256.New()
|
||||
|
||||
for _, name := range configMapNames {
|
||||
var cm corev1.ConfigMap
|
||||
err := r.Get(ctx, client.ObjectKey{Namespace: configMapNamespace, Name: name}, &cm)
|
||||
if err != nil {
|
||||
if kerrors.IsNotFound(err) {
|
||||
continue // ignore missing
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Sort keys for consistent hashing
|
||||
var keys []string
|
||||
for k := range cm.Data {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, k := range keys {
|
||||
v := cm.Data[k]
|
||||
fmt.Fprintf(hash, "%s:%s=%s\n", name, k, v)
|
||||
}
|
||||
}
|
||||
|
||||
return hex.EncodeToString(hash.Sum(nil)), nil
|
||||
}
|
||||
|
||||
func (r *CozystackConfigReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
WithEventFilter(predicate.Funcs{
|
||||
UpdateFunc: func(e event.UpdateEvent) bool {
|
||||
cm, ok := e.ObjectNew.(*corev1.ConfigMap)
|
||||
return ok && cm.Namespace == configMapNamespace && contains(configMapNames, cm.Name)
|
||||
},
|
||||
CreateFunc: func(e event.CreateEvent) bool {
|
||||
cm, ok := e.Object.(*corev1.ConfigMap)
|
||||
return ok && cm.Namespace == configMapNamespace && contains(configMapNames, cm.Name)
|
||||
},
|
||||
DeleteFunc: func(e event.DeleteEvent) bool {
|
||||
cm, ok := e.Object.(*corev1.ConfigMap)
|
||||
return ok && cm.Namespace == configMapNamespace && contains(configMapNames, cm.Name)
|
||||
},
|
||||
}).
|
||||
For(&corev1.ConfigMap{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func contains(slice []string, val string) bool {
|
||||
for _, s := range slice {
|
||||
if s == val {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.9.0
|
||||
version: 0.9.2
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
CLICKHOUSE_BACKUP_TAG = $(shell awk '$$1 == "version:" {print $$2}' Chart.yaml)
|
||||
CLICKHOUSE_BACKUP_TAG = $(shell awk '$$0 ~ /^version:/ {print $$2}' Chart.yaml)
|
||||
|
||||
include ../../../scripts/common-envs.mk
|
||||
include ../../../scripts/package.mk
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/clickhouse-backup:0.9.0@sha256:3faf7a4cebf390b9053763107482de175aa0fdb88c1e77424fd81100b1c3a205
|
||||
ghcr.io/cozystack/cozystack/clickhouse-backup:0.9.2@sha256:3faf7a4cebf390b9053763107482de175aa0fdb88c1e77424fd81100b1c3a205
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
|
||||
{{- $clusterDomain := (index $cozyConfig.data "cluster-domain") | default "cozy.local" }}
|
||||
{{- $existingSecret := lookup "v1" "Secret" .Release.Namespace (printf "%s-credentials" .Release.Name) }}
|
||||
{{- $passwords := dict }}
|
||||
{{- $users := .Values.users }}
|
||||
@@ -32,7 +34,7 @@ kind: "ClickHouseInstallation"
|
||||
metadata:
|
||||
name: "{{ .Release.Name }}"
|
||||
spec:
|
||||
namespaceDomainPattern: "%s.svc.cozy.local"
|
||||
namespaceDomainPattern: "%s.svc.{{ $clusterDomain }}"
|
||||
defaults:
|
||||
templates:
|
||||
dataVolumeClaimTemplate: data-volume-template
|
||||
@@ -92,6 +94,9 @@ spec:
|
||||
templates:
|
||||
volumeClaimTemplates:
|
||||
- name: data-volume-template
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
@@ -99,6 +104,9 @@ spec:
|
||||
requests:
|
||||
storage: {{ .Values.size }}
|
||||
- name: log-volume-template
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
@@ -107,6 +115,9 @@ spec:
|
||||
storage: {{ .Values.logStorageSize }}
|
||||
podTemplates:
|
||||
- name: clickhouse-per-host
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
@@ -122,9 +133,9 @@ spec:
|
||||
- name: clickhouse
|
||||
image: clickhouse/clickhouse-server:24.9.2.42
|
||||
{{- if .Values.resources }}
|
||||
resources: {{- include "cozy-lib.resources.sanitize" .Values.resources | nindent 16 }}
|
||||
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.resources $) | nindent 16 }}
|
||||
{{- else if ne .Values.resourcesPreset "none" }}
|
||||
resources: {{- include "cozy-lib.resources.preset" .Values.resourcesPreset | nindent 16 }}
|
||||
resources: {{- include "cozy-lib.resources.preset" (list .Values.resourcesPreset $) | nindent 16 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: data-volume-template
|
||||
@@ -133,6 +144,9 @@ spec:
|
||||
mountPath: /var/log/clickhouse-server
|
||||
serviceTemplates:
|
||||
- name: svc-template
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
generateName: chendpoint-{chi}
|
||||
spec:
|
||||
ports:
|
||||
|
||||
@@ -9,5 +9,5 @@ spec:
|
||||
kind: clickhouse
|
||||
type: clickhouse
|
||||
selector:
|
||||
clickhouse.altinity.com/chi: {{ $.Release.Name }}
|
||||
app.kubernetes.io/instance: {{ $.Release.Name }}
|
||||
version: {{ $.Chart.Version }}
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.6.0
|
||||
version: 0.6.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
1
packages/apps/ferretdb/charts/cozy-lib
Symbolic link
1
packages/apps/ferretdb/charts/cozy-lib
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../library/cozy-lib
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/postgres-backup:0.11.0@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f
|
||||
ghcr.io/cozystack/cozystack/postgres-backup:0.12.1@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f
|
||||
|
||||
@@ -2,6 +2,8 @@ apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ .Release.Name }}
|
||||
labels:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
type: {{ ternary "LoadBalancer" "ClusterIP" .Values.external }}
|
||||
{{- if .Values.external }}
|
||||
|
||||
@@ -12,6 +12,7 @@ spec:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ .Release.Name }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
containers:
|
||||
- name: ferretdb
|
||||
|
||||
@@ -11,14 +11,17 @@ spec:
|
||||
{{- $rawConstraints := get $configMap.data "globalAppTopologySpreadConstraints" }}
|
||||
{{- if $rawConstraints }}
|
||||
{{- $rawConstraints | fromYaml | toYaml | nindent 2 }}
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
cnpg.io/cluster: {{ .Release.Name }}-postgres
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
minSyncReplicas: {{ .Values.quorum.minSyncReplicas }}
|
||||
maxSyncReplicas: {{ .Values.quorum.maxSyncReplicas }}
|
||||
{{- if .Values.resources }}
|
||||
resources: {{- toYaml .Values.resources | nindent 4 }}
|
||||
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.resources $) | nindent 4 }}
|
||||
{{- else if ne .Values.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.resourcesPreset "Release" .Release) | nindent 4 }}
|
||||
resources: {{- include "cozy-lib.resources.preset" (list .Values.resourcesPreset $) | nindent 4 }}
|
||||
{{- end }}
|
||||
monitoring:
|
||||
enablePodMonitor: true
|
||||
@@ -32,6 +35,7 @@ spec:
|
||||
inheritedMetadata:
|
||||
labels:
|
||||
policy.cozystack.io/allow-to-apiserver: "true"
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
|
||||
{{- if .Values.users }}
|
||||
managed:
|
||||
|
||||
@@ -9,5 +9,5 @@ spec:
|
||||
kind: ferretdb
|
||||
type: ferretdb
|
||||
selector:
|
||||
app: {{ $.Release.Name }}
|
||||
app.kubernetes.io/instance: {{ $.Release.Name }}
|
||||
version: {{ $.Chart.Version }}
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.5.0
|
||||
version: 0.5.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
1
packages/apps/http-cache/charts/cozy-lib
Symbolic link
1
packages/apps/http-cache/charts/cozy-lib
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../library/cozy-lib
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/nginx-cache:0.5.0@sha256:158c35dd6a512bd14e86a423be5c8c7ca91ac71999c73cce2714e4db60a2db43
|
||||
ghcr.io/cozystack/cozystack/nginx-cache:0.5.1@sha256:50ac1581e3100bd6c477a71161cb455a341ffaf9e5e2f6086802e4e25271e8af
|
||||
|
||||
@@ -34,9 +34,9 @@ spec:
|
||||
- image: haproxy:latest
|
||||
name: haproxy
|
||||
{{- if .Values.haproxy.resources }}
|
||||
resources: {{- toYaml .Values.haproxy.resources | nindent 10 }}
|
||||
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.haproxy.resources $) | nindent 10 }}
|
||||
{{- else if ne .Values.haproxy.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.haproxy.resourcesPreset "Release" .Release) | nindent 10 }}
|
||||
resources: {{- include "cozy-lib.resources.preset" (list .Values.haproxy.resourcesPreset $) | nindent 10 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
|
||||
@@ -53,9 +53,9 @@ spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
{{- if $.Values.nginx.resources }}
|
||||
resources: {{- toYaml $.Values.nginx.resources | nindent 10 }}
|
||||
resources: {{- include "cozy-lib.resources.sanitize" (list $.Values.nginx.resources $) | nindent 10 }}
|
||||
{{- else if ne $.Values.nginx.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" $.Values.nginx.resourcesPreset "Release" $.Release) | nindent 10 }}
|
||||
resources: {{- include "cozy-lib.resources.preset" (list $.Values.nginx.resourcesPreset $) | nindent 10 }}
|
||||
{{- end }}
|
||||
image: "{{ $.Files.Get "images/nginx-cache.tag" | trim }}"
|
||||
readinessProbe:
|
||||
|
||||
39
packages/apps/http-cache/templates/workloadmonitor.yaml
Normal file
39
packages/apps/http-cache/templates/workloadmonitor.yaml
Normal file
@@ -0,0 +1,39 @@
|
||||
---
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}-haproxy
|
||||
spec:
|
||||
replicas: {{ .Values.haproxy.replicas }}
|
||||
minReplicas: 1
|
||||
kind: http-cache
|
||||
type: http-cache
|
||||
selector:
|
||||
app: {{ $.Release.Name }}-haproxy
|
||||
version: {{ $.Chart.Version }}
|
||||
---
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}-nginx
|
||||
spec:
|
||||
replicas: {{ .Values.nginx.replicas }}
|
||||
minReplicas: 1
|
||||
kind: http-cache
|
||||
type: http-cache
|
||||
selector:
|
||||
app: {{ $.Release.Name }}-nginx-cache
|
||||
version: {{ $.Chart.Version }}
|
||||
---
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicas }}
|
||||
minReplicas: 1
|
||||
kind: http-cache
|
||||
type: http-cache
|
||||
selector:
|
||||
app.kubernetes.io/instance: {{ $.Release.Name }}
|
||||
version: {{ $.Chart.Version }}
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.6.0
|
||||
version: 0.6.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -14,9 +14,9 @@
|
||||
| `zookeeper.replicas` | Number of ZooKeeper replicas | `3` |
|
||||
| `zookeeper.storageClass` | StorageClass used to store the ZooKeeper data | `""` |
|
||||
| `kafka.resources` | Resources | `{}` |
|
||||
| `kafka.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||
| `kafka.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `small` |
|
||||
| `zookeeper.resources` | Resources | `{}` |
|
||||
| `zookeeper.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||
| `zookeeper.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` |
|
||||
|
||||
### Configuration parameters
|
||||
|
||||
|
||||
1
packages/apps/kafka/charts/cozy-lib
Symbolic link
1
packages/apps/kafka/charts/cozy-lib
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../library/cozy-lib
|
||||
@@ -9,9 +9,9 @@ spec:
|
||||
kafka:
|
||||
replicas: {{ .Values.kafka.replicas }}
|
||||
{{- if .Values.kafka.resources }}
|
||||
resources: {{- toYaml .Values.kafka.resources | nindent 6 }}
|
||||
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.kafka.resources $) | nindent 6 }}
|
||||
{{- else if ne .Values.kafka.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.kafka.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
resources: {{- include "cozy-lib.resources.preset" (list .Values.kafka.resourcesPreset $) | nindent 6 }}
|
||||
{{- end }}
|
||||
listeners:
|
||||
- name: plain
|
||||
@@ -71,9 +71,9 @@ spec:
|
||||
zookeeper:
|
||||
replicas: {{ .Values.zookeeper.replicas }}
|
||||
{{- if .Values.zookeeper.resources }}
|
||||
resources: {{- toYaml .Values.zookeeper.resources | nindent 6 }}
|
||||
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.zookeeper.resources $) | nindent 6 }}
|
||||
{{- else if ne .Values.zookeeper.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.zookeeper.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
resources: {{- include "cozy-lib.resources.preset" (list .Values.zookeeper.resourcesPreset $) | nindent 6 }}
|
||||
{{- end }}
|
||||
storage:
|
||||
type: persistent-claim
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "nano"
|
||||
"default": "small"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -63,7 +63,7 @@
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "nano"
|
||||
"default": "micro"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@@ -25,7 +25,7 @@ kafka:
|
||||
# memory: 512Mi
|
||||
|
||||
## @param kafka.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
resourcesPreset: "nano"
|
||||
resourcesPreset: "small"
|
||||
|
||||
zookeeper:
|
||||
size: 5Gi
|
||||
@@ -42,7 +42,7 @@ zookeeper:
|
||||
# memory: 512Mi
|
||||
|
||||
## @param zookeeper.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
resourcesPreset: "nano"
|
||||
resourcesPreset: "micro"
|
||||
|
||||
## @section Configuration parameters
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.20.1
|
||||
version: 0.23.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -1,77 +1,200 @@
|
||||
# Managed Kubernetes Service
|
||||
|
||||
## Overview
|
||||
## Managed Kubernetes in Cozystack
|
||||
|
||||
The Managed Kubernetes Service offers a streamlined solution for efficiently managing server workloads. Kubernetes has emerged as the industry standard, providing a unified and accessible API, primarily utilizing YAML for configuration. This means that teams can easily understand and work with Kubernetes, streamlining infrastructure management.
|
||||
Whenever you want to deploy a custom containerized application in Cozystack, it's best to deploy it to a managed Kubernetes cluster.
|
||||
|
||||
The Kubernetes leverages robust software design patterns, enabling continuous recovery in any scenario through the reconciliation method. Additionally, it ensures seamless scaling across a multitude of servers, addressing the challenges posed by complex and outdated APIs found in traditional virtualization platforms. This managed service eliminates the need for developing custom solutions or modifying source code, saving valuable time and effort.
|
||||
Cozystack deploys and manages Kubernetes-as-a-service as standalone applications within each tenant’s isolated environment.
|
||||
In Cozystack, such clusters are named tenant Kubernetes clusters, while the base Cozystack cluster is called a management or root cluster.
|
||||
Tenant clusters are fully separated from the management cluster and are intended for deploying tenant-specific or customer-developed applications.
|
||||
|
||||
## Deployment Details
|
||||
Within a tenant cluster, users can take advantage of LoadBalancer services and easily provision physical volumes as needed.
|
||||
The control-plane operates within containers, while the worker nodes are deployed as virtual machines, all seamlessly managed by the application.
|
||||
|
||||
The managed Kubernetes service deploys a standard Kubernetes cluster utilizing the Cluster API, Kamaji as control-plane provicer and the KubeVirt infrastructure provider. This ensures a consistent and reliable setup for workloads.
|
||||
## Why Use a Managed Kubernetes Cluster?
|
||||
|
||||
Within this cluster, users can take advantage of LoadBalancer services and easily provision physical volumes as needed. The control-plane operates within containers, while the worker nodes are deployed as virtual machines, all seamlessly managed by the application.
|
||||
Kubernetes has emerged as the industry standard, providing a unified and accessible API, primarily utilizing YAML for configuration.
|
||||
This means that teams can easily understand and work with Kubernetes, streamlining infrastructure management.
|
||||
|
||||
- Docs: https://github.com/clastix/kamaji
|
||||
- Docs: https://cluster-api.sigs.k8s.io/
|
||||
- GitHub: https://github.com/clastix/kamaji
|
||||
- GitHub: https://github.com/kubernetes-sigs/cluster-api-provider-kubevirt
|
||||
- GitHub: https://github.com/kubevirt/csi-driver
|
||||
Kubernetes leverages robust software design patterns, enabling continuous recovery in any scenario through the reconciliation method.
|
||||
Additionally, it ensures seamless scaling across a multitude of servers,
|
||||
addressing the challenges posed by complex and outdated APIs found in traditional virtualization platforms.
|
||||
This managed service eliminates the need for developing custom solutions or modifying source code, saving valuable time and effort.
|
||||
|
||||
The Managed Kubernetes Service in Cozystack offers a streamlined solution for efficiently managing server workloads.
|
||||
|
||||
## How-Tos
|
||||
## Starting Work
|
||||
|
||||
How to access to deployed cluster:
|
||||
Once the tenant Kubernetes cluster is ready, you can get a kubeconfig file to work with it.
|
||||
It can be done via UI or a `kubectl` request:
|
||||
|
||||
```
|
||||
kubectl get secret -n <namespace> kubernetes-<clusterName>-admin-kubeconfig -o go-template='{{ printf "%s\n" (index .data "super-admin.conf" | base64decode) }}' > test
|
||||
```
|
||||
- Open the Cozystack dashboard, switch to your tenant, find and open the application page. Copy one of the config files from the **Secrets** section.
|
||||
- Run the following command (using the management cluster kubeconfig):
|
||||
|
||||
```bash
|
||||
kubectl get secret -n tenant-<name> kubernetes-<clusterName>-admin-kubeconfig -o go-template='{{ printf "%s\n" (index .data "admin.conf" | base64decode) }}' > admin.conf
|
||||
```
|
||||
|
||||
There are several kubeconfig options available:
|
||||
|
||||
- `admin.conf` — The standard kubeconfig for accessing your new cluster.
|
||||
You can create additional Kubernetes users using this configuration.
|
||||
- `admin.svc` — Same token as `admin.conf`, but with the API server address set to the internal service name.
|
||||
Use it for applications running inside the cluster that need API access.
|
||||
- `super-admin.conf` — Similar to `admin.conf`, but with extended administrative permissions.
|
||||
Intended for troubleshooting and cluster maintenance tasks.
|
||||
- `super-admin.svc` — Same as `super-admin.conf`, but pointing to the internal API server address.
|
||||
|
||||
## Implementation Details
|
||||
|
||||
A tenant Kubernetes cluster in Cozystack is essentially Kubernetes-in-Kubernetes.
|
||||
Deploying it involves the following components:
|
||||
|
||||
- **Kamaji Control Plane**: [Kamaji](https://kamaji.clastix.io/) is an open-source project that facilitates the deployment
|
||||
of Kubernetes control planes as pods within a root cluster.
|
||||
Each control plane pod includes essential components like `kube-apiserver`, `controller-manager`, and `scheduler`,
|
||||
allowing for efficient multi-tenancy and resource utilization.
|
||||
|
||||
- **Etcd Cluster**: A dedicated etcd cluster is deployed using Ænix's [etcd-operator](https://github.com/aenix-io/etcd-operator).
|
||||
It provides reliable and scalable key-value storage for the Kubernetes control plane.
|
||||
|
||||
- **Worker Nodes**: Virtual Machines are provisioned to serve as worker nodes using KubeVirt.
|
||||
These nodes are configured to join the tenant Kubernetes cluster, enabling the deployment and management of workloads.
|
||||
|
||||
- **Cluster API**: Cozystack is using the [Kubernetes Cluster API](https://cluster-api.sigs.k8s.io/) to provision the components of a cluster.
|
||||
|
||||
This architecture ensures isolated, scalable, and efficient tenant Kubernetes environments.
|
||||
|
||||
See the reference for components utilized in this service:
|
||||
|
||||
- [Kamaji Control Plane](https://kamaji.clastix.io)
|
||||
- [Kamaji — Cluster API](https://kamaji.clastix.io/cluster-api/)
|
||||
- [github.com/clastix/kamaji](https://github.com/clastix/kamaji)
|
||||
- [KubeVirt](https://kubevirt.io/)
|
||||
- [github.com/kubevirt/kubevirt](https://github.com/kubevirt/kubevirt)
|
||||
- [github.com/aenix-io/etcd-operator](https://github.com/aenix-io/etcd-operator)
|
||||
- [Kubernetes Cluster API](https://cluster-api.sigs.k8s.io/)
|
||||
- [github.com/kubernetes-sigs/cluster-api-provider-kubevirt](https://github.com/kubernetes-sigs/cluster-api-provider-kubevirt)
|
||||
- [github.com/kubevirt/csi-driver](https://github.com/kubevirt/csi-driver)
|
||||
|
||||
## Parameters
|
||||
|
||||
### Common parameters
|
||||
### Common Parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | ------------ |
|
||||
| `host` | The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host). | `""` |
|
||||
| `controlPlane.replicas` | Number of replicas for Kubernetes control-plane components | `2` |
|
||||
| `storageClass` | StorageClass used to store user data | `replicated` |
|
||||
| `nodeGroups` | nodeGroups configuration | `{}` |
|
||||
| Name | Description | Value |
|
||||
| ----------------------------------- | ----------------------------------------------------------------------------------------------------------------- | ------------ |
|
||||
| `host` | Hostname used to access the Kubernetes cluster externally. Defaults to `<cluster-name>.<tenant-host>` when empty. | `""` |
|
||||
| `controlPlane.replicas` | Number of replicas for Kubernetes control-plane components. | `2` |
|
||||
| `storageClass` | StorageClass used to store user data. | `replicated` |
|
||||
| `useCustomSecretForPatchContainerd` | if true, for patch containerd will be used secret: {{ .Release.Name }}-patch-containerd | `false` |
|
||||
| `nodeGroups` | nodeGroups configuration | `{}` |
|
||||
|
||||
### Cluster Addons
|
||||
|
||||
| Name | Description | Value |
|
||||
| --------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| `addons.certManager.enabled` | Enables the cert-manager | `false` |
|
||||
| `addons.certManager.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.cilium.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.gatewayAPI.enabled` | Enables the Gateway API | `false` |
|
||||
| `addons.ingressNginx.enabled` | Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role) | `false` |
|
||||
| `addons.ingressNginx.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.ingressNginx.hosts` | List of domain names that should be passed through to the cluster by upper cluster | `[]` |
|
||||
| `addons.gpuOperator.enabled` | Enables the gpu-operator | `false` |
|
||||
| `addons.gpuOperator.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.fluxcd.enabled` | Enables Flux CD | `false` |
|
||||
| `addons.fluxcd.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.monitoringAgents.enabled` | Enables MonitoringAgents (fluentbit, vmagents for sending logs and metrics to storage) if tenant monitoring enabled, send to tenant storage, else to root storage | `false` |
|
||||
| `addons.monitoringAgents.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.verticalPodAutoscaler.valuesOverride` | Custom values to override | `{}` |
|
||||
| Name | Description | Value |
|
||||
| --------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| `addons.certManager.enabled` | Enable cert-manager, which automatically creates and manages SSL/TLS certificates. | `false` |
|
||||
| `addons.certManager.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.cilium.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.gatewayAPI.enabled` | Enable the Gateway API | `false` |
|
||||
| `addons.ingressNginx.enabled` | Enable the Ingress-NGINX controller (requires nodes labeled with the 'ingress-nginx' role). | `false` |
|
||||
| `addons.ingressNginx.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.ingressNginx.hosts` | List of domain names that the parent cluster should route to this tenant cluster. | `[]` |
|
||||
| `addons.gpuOperator.enabled` | Enable the GPU-operator | `false` |
|
||||
| `addons.gpuOperator.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.fluxcd.enabled` | Enable FluxCD | `false` |
|
||||
| `addons.fluxcd.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.monitoringAgents.enabled` | Enable monitoring agents (Fluent Bit and VMAgents) to send logs and metrics. If tenant monitoring is enabled, data is sent to tenant storage; otherwise, it goes to root storage. | `false` |
|
||||
| `addons.monitoringAgents.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.verticalPodAutoscaler.valuesOverride` | Custom values to override | `{}` |
|
||||
|
||||
### Kubernetes control plane configuration
|
||||
### Kubernetes Control Plane Configuration
|
||||
|
||||
| Name | Description | Value |
|
||||
| -------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| `controlPlane.apiServer.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `small` |
|
||||
| `controlPlane.apiServer.resources` | Resources | `{}` |
|
||||
| `controlPlane.controllerManager.resources` | Resources | `{}` |
|
||||
| `controlPlane.controllerManager.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` |
|
||||
| `controlPlane.scheduler.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` |
|
||||
| `controlPlane.scheduler.resources` | Resources | `{}` |
|
||||
| `controlPlane.konnectivity.server.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` |
|
||||
| `controlPlane.konnectivity.server.resources` | Resources | `{}` |
|
||||
| Name | Description | Value |
|
||||
| -------------------------------------------------- | ---------------------------------------------------------------------------- | ------- |
|
||||
| `controlPlane.apiServer.resources` | Explicit CPU/memory resource requests and limits for the API server. | `{}` |
|
||||
| `controlPlane.apiServer.resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `small` |
|
||||
| `controlPlane.controllerManager.resources` | Explicit CPU/memory resource requests and limits for the controller manager. | `{}` |
|
||||
| `controlPlane.controllerManager.resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `micro` |
|
||||
| `controlPlane.scheduler.resources` | Explicit CPU/memory resource requests and limits for the scheduler. | `{}` |
|
||||
| `controlPlane.scheduler.resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `micro` |
|
||||
| `controlPlane.konnectivity.server.resources` | Explicit CPU/memory resource requests and limits for the Konnectivity. | `{}` |
|
||||
| `controlPlane.konnectivity.server.resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `micro` |
|
||||
|
||||
In production environments, it's recommended to set `resources` explicitly.
|
||||
Example of `controlPlane.*.resources`:
|
||||
|
||||
## U Series
|
||||
```yaml
|
||||
resources:
|
||||
limits:
|
||||
cpu: 4000m
|
||||
memory: 4Gi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 512Mi
|
||||
```
|
||||
|
||||
Allowed values for `controlPlane.*.resourcesPreset` are `none`, `nano`, `micro`, `small`, `medium`, `large`, `xlarge`, `2xlarge`.
|
||||
This value is ignored if the corresponding `resources` value is set.
|
||||
|
||||
## Resources Reference
|
||||
|
||||
### instanceType Resources
|
||||
|
||||
The following instanceType resources are provided by Cozystack:
|
||||
|
||||
| Name | vCPUs | Memory |
|
||||
|---------------|-------|--------|
|
||||
| `cx1.2xlarge` | 8 | 16Gi |
|
||||
| `cx1.4xlarge` | 16 | 32Gi |
|
||||
| `cx1.8xlarge` | 32 | 64Gi |
|
||||
| `cx1.large` | 2 | 4Gi |
|
||||
| `cx1.medium` | 1 | 2Gi |
|
||||
| `cx1.xlarge` | 4 | 8Gi |
|
||||
| `gn1.2xlarge` | 8 | 32Gi |
|
||||
| `gn1.4xlarge` | 16 | 64Gi |
|
||||
| `gn1.8xlarge` | 32 | 128Gi |
|
||||
| `gn1.xlarge` | 4 | 16Gi |
|
||||
| `m1.2xlarge` | 8 | 64Gi |
|
||||
| `m1.4xlarge` | 16 | 128Gi |
|
||||
| `m1.8xlarge` | 32 | 256Gi |
|
||||
| `m1.large` | 2 | 16Gi |
|
||||
| `m1.xlarge` | 4 | 32Gi |
|
||||
| `n1.2xlarge` | 16 | 32Gi |
|
||||
| `n1.4xlarge` | 32 | 64Gi |
|
||||
| `n1.8xlarge` | 64 | 128Gi |
|
||||
| `n1.large` | 4 | 8Gi |
|
||||
| `n1.medium` | 4 | 4Gi |
|
||||
| `n1.xlarge` | 8 | 16Gi |
|
||||
| `o1.2xlarge` | 8 | 32Gi |
|
||||
| `o1.4xlarge` | 16 | 64Gi |
|
||||
| `o1.8xlarge` | 32 | 128Gi |
|
||||
| `o1.large` | 2 | 8Gi |
|
||||
| `o1.medium` | 1 | 4Gi |
|
||||
| `o1.micro` | 1 | 1Gi |
|
||||
| `o1.nano` | 1 | 512Mi |
|
||||
| `o1.small` | 1 | 2Gi |
|
||||
| `o1.xlarge` | 4 | 16Gi |
|
||||
| `rt1.2xlarge` | 8 | 32Gi |
|
||||
| `rt1.4xlarge` | 16 | 64Gi |
|
||||
| `rt1.8xlarge` | 32 | 128Gi |
|
||||
| `rt1.large` | 2 | 8Gi |
|
||||
| `rt1.medium` | 1 | 4Gi |
|
||||
| `rt1.micro` | 1 | 1Gi |
|
||||
| `rt1.small` | 1 | 2Gi |
|
||||
| `rt1.xlarge` | 4 | 16Gi |
|
||||
| `u1.2xlarge` | 8 | 32Gi |
|
||||
| `u1.2xmedium` | 2 | 4Gi |
|
||||
| `u1.4xlarge` | 16 | 64Gi |
|
||||
| `u1.8xlarge` | 32 | 128Gi |
|
||||
| `u1.large` | 2 | 8Gi |
|
||||
| `u1.medium` | 1 | 4Gi |
|
||||
| `u1.micro` | 1 | 1Gi |
|
||||
| `u1.nano` | 1 | 512Mi |
|
||||
| `u1.small` | 1 | 2Gi |
|
||||
| `u1.xlarge` | 4 | 16Gi |
|
||||
|
||||
### U Series: Universal
|
||||
|
||||
The U Series is quite neutral and provides resources for
|
||||
general purpose applications.
|
||||
@@ -82,7 +205,7 @@ attitude towards workloads.
|
||||
VMs of instance types will share physical CPU cores on a
|
||||
time-slice basis with other VMs.
|
||||
|
||||
### U Series Characteristics
|
||||
#### U Series Characteristics
|
||||
|
||||
Specific characteristics of this series are:
|
||||
- *Burstable CPU performance* - The workload has a baseline compute
|
||||
@@ -91,14 +214,14 @@ Specific characteristics of this series are:
|
||||
- *vCPU-To-Memory Ratio (1:4)* - A vCPU-to-Memory ratio of 1:4, for less
|
||||
noise per node.
|
||||
|
||||
## O Series
|
||||
### O Series: Overcommitted
|
||||
|
||||
The O Series is based on the U Series, with the only difference
|
||||
being that memory is overcommitted.
|
||||
|
||||
*O* is the abbreviation for "Overcommitted".
|
||||
|
||||
### UO Series Characteristics
|
||||
#### O Series Characteristics
|
||||
|
||||
Specific characteristics of this series are:
|
||||
- *Burstable CPU performance* - The workload has a baseline compute
|
||||
@@ -109,7 +232,7 @@ Specific characteristics of this series are:
|
||||
- *vCPU-To-Memory Ratio (1:4)* - A vCPU-to-Memory ratio of 1:4, for less
|
||||
noise per node.
|
||||
|
||||
## CX Series
|
||||
### CX Series: Compute Exclusive
|
||||
|
||||
The CX Series provides exclusive compute resources for compute
|
||||
intensive applications.
|
||||
@@ -123,7 +246,7 @@ the IO threading from cores dedicated to the workload.
|
||||
In addition, in this series, the NUMA topology of the used
|
||||
cores is provided to the VM.
|
||||
|
||||
### CX Series Characteristics
|
||||
#### CX Series Characteristics
|
||||
|
||||
Specific characteristics of this series are:
|
||||
- *Hugepages* - Hugepages are used in order to improve memory
|
||||
@@ -138,14 +261,14 @@ Specific characteristics of this series are:
|
||||
optimize guest sided cache utilization.
|
||||
- *vCPU-To-Memory Ratio (1:2)* - A vCPU-to-Memory ratio of 1:2.
|
||||
|
||||
## M Series
|
||||
### M Series: Memory
|
||||
|
||||
The M Series provides resources for memory intensive
|
||||
applications.
|
||||
|
||||
*M* is the abbreviation of "Memory".
|
||||
|
||||
### M Series Characteristics
|
||||
#### M Series Characteristics
|
||||
|
||||
Specific characteristics of this series are:
|
||||
- *Hugepages* - Hugepages are used in order to improve memory
|
||||
@@ -156,7 +279,7 @@ Specific characteristics of this series are:
|
||||
- *vCPU-To-Memory Ratio (1:8)* - A vCPU-to-Memory ratio of 1:8, for much
|
||||
less noise per node.
|
||||
|
||||
## RT Series
|
||||
### RT Series: RealTime
|
||||
|
||||
The RT Series provides resources for realtime applications, like Oslat.
|
||||
|
||||
@@ -165,7 +288,7 @@ The RT Series provides resources for realtime applications, like Oslat.
|
||||
This series of instance types requires nodes capable of running
|
||||
realtime applications.
|
||||
|
||||
### RT Series Characteristics
|
||||
#### RT Series Characteristics
|
||||
|
||||
Specific characteristics of this series are:
|
||||
- *Hugepages* - Hugepages are used in order to improve memory
|
||||
@@ -179,57 +302,3 @@ Specific characteristics of this series are:
|
||||
- *vCPU-To-Memory Ratio (1:4)* - A vCPU-to-Memory ratio of 1:4 starting from
|
||||
the medium size.
|
||||
|
||||
## Resources
|
||||
|
||||
The following instancetype resources are provided by Cozystack:
|
||||
|
||||
Name | vCPUs | Memory
|
||||
-----|-------|-------
|
||||
cx1.2xlarge | 8 | 16Gi
|
||||
cx1.4xlarge | 16 | 32Gi
|
||||
cx1.8xlarge | 32 | 64Gi
|
||||
cx1.large | 2 | 4Gi
|
||||
cx1.medium | 1 | 2Gi
|
||||
cx1.xlarge | 4 | 8Gi
|
||||
gn1.2xlarge | 8 | 32Gi
|
||||
gn1.4xlarge | 16 | 64Gi
|
||||
gn1.8xlarge | 32 | 128Gi
|
||||
gn1.xlarge | 4 | 16Gi
|
||||
m1.2xlarge | 8 | 64Gi
|
||||
m1.4xlarge | 16 | 128Gi
|
||||
m1.8xlarge | 32 | 256Gi
|
||||
m1.large | 2 | 16Gi
|
||||
m1.xlarge | 4 | 32Gi
|
||||
n1.2xlarge | 16 | 32Gi
|
||||
n1.4xlarge | 32 | 64Gi
|
||||
n1.8xlarge | 64 | 128Gi
|
||||
n1.large | 4 | 8Gi
|
||||
n1.medium | 4 | 4Gi
|
||||
n1.xlarge | 8 | 16Gi
|
||||
o1.2xlarge | 8 | 32Gi
|
||||
o1.4xlarge | 16 | 64Gi
|
||||
o1.8xlarge | 32 | 128Gi
|
||||
o1.large | 2 | 8Gi
|
||||
o1.medium | 1 | 4Gi
|
||||
o1.micro | 1 | 1Gi
|
||||
o1.nano | 1 | 512Mi
|
||||
o1.small | 1 | 2Gi
|
||||
o1.xlarge | 4 | 16Gi
|
||||
rt1.2xlarge | 8 | 32Gi
|
||||
rt1.4xlarge | 16 | 64Gi
|
||||
rt1.8xlarge | 32 | 128Gi
|
||||
rt1.large | 2 | 8Gi
|
||||
rt1.medium | 1 | 4Gi
|
||||
rt1.micro | 1 | 1Gi
|
||||
rt1.small | 1 | 2Gi
|
||||
rt1.xlarge | 4 | 16Gi
|
||||
u1.2xlarge | 8 | 32Gi
|
||||
u1.2xmedium | 2 | 4Gi
|
||||
u1.4xlarge | 16 | 64Gi
|
||||
u1.8xlarge | 32 | 128Gi
|
||||
u1.large | 2 | 8Gi
|
||||
u1.medium | 1 | 4Gi
|
||||
u1.micro | 1 | 1Gi
|
||||
u1.nano | 1 | 512Mi
|
||||
u1.small | 1 | 2Gi
|
||||
u1.xlarge | 4 | 16Gi
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.20.1@sha256:720148128917fa10f860a8b7e74f9428de72481c466c880c5ad894e1f0026d43
|
||||
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.23.1@sha256:7315850634728a5864a3de3150c12f0e1454f3f1ce33cdf21a278f57611dd5e9
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.20.1@sha256:1b48a4725a33ccb48604bb2e1be3171271e7daac2726d3119228212d8a9da5bb
|
||||
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.23.1@sha256:6962bdf51ab2ff40b420b9cff7c850aeea02187da2a65a67f10e0471744649d7
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.20.1@sha256:fb6d3ce9d6d948285a6d399c852e15259d6922162ec7c44177d2274243f59d1f
|
||||
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.23.1@sha256:b1525163cd21938ac934bb1b860f2f3151464fa463b82880ab058167aeaf3e29
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/ubuntu-container-disk:v1.32@sha256:184b81529ae72684279799b12f436cc7a511d8ff5bd1e9a30478799c7707c625
|
||||
ghcr.io/cozystack/cozystack/ubuntu-container-disk:v1.32@sha256:290264eba144c5c58f68009b17f59a5990f6fafbf768f9cbefd7050c34733930
|
||||
|
||||
@@ -31,6 +31,16 @@ spec:
|
||||
{{- end }}
|
||||
cluster.x-k8s.io/deployment-name: {{ $.Release.Name }}-{{ .groupName }}
|
||||
spec:
|
||||
{{- $configMap := lookup "v1" "ConfigMap" "cozy-system" "cozystack-scheduling" }}
|
||||
{{- if $configMap }}
|
||||
{{- $rawConstraints := get $configMap.data "globalAppTopologySpreadConstraints" }}
|
||||
{{- if $rawConstraints }}
|
||||
{{- $rawConstraints | fromYaml | toYaml | nindent 10 }}
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
cluster.x-k8s.io/cluster-name: {{ $.Release.Name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
domain:
|
||||
{{- if and .group.resources .group.resources.cpu }}
|
||||
cpu:
|
||||
@@ -201,12 +211,25 @@ spec:
|
||||
- ["LABEL=ephemeral", "/ephemeral"]
|
||||
- ["/ephemeral/kubelet", "/var/lib/kubelet", "none", "bind,nofail"]
|
||||
- ["/ephemeral/containerd", "/var/lib/containerd", "none", "bind,nofail"]
|
||||
{{- $sec := lookup "v1" "Secret" $.Release.Namespace (printf "%s-patch-containerd" $.Release.Name) }}
|
||||
{{- if $sec }}
|
||||
files:
|
||||
{{- range $key, $_ := $sec.data }}
|
||||
- path: /etc/containerd/certs.d/{{ trimSuffix ".toml" $key }}/hosts.toml
|
||||
contentFrom:
|
||||
secret:
|
||||
name: {{ $.Release.Name }}-patch-containerd
|
||||
key: {{ $key }}
|
||||
permissions: "0400"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
preKubeadmCommands:
|
||||
- sed -i 's|root:x:|root::|' /etc/passwd
|
||||
- systemctl stop containerd.service
|
||||
- mkdir -p /ephemeral/kubelet /ephemeral/containerd
|
||||
- mount -o bind /ephemeral/kubelet /var/lib/kubelet
|
||||
- mount -o bind /ephemeral/containerd /var/lib/containerd
|
||||
- sudo sed -i '/\[plugins."io.containerd.grpc.v1.cri".registry\]/,/^\[/ s|^\(\s*config_path\s*=\s*\).*|\1"/etc/containerd/certs.d"|' /etc/containerd/config.toml
|
||||
- systemctl start containerd.service
|
||||
joinConfiguration:
|
||||
nodeRegistration:
|
||||
|
||||
@@ -0,0 +1,15 @@
|
||||
{{- if not .Values.useCustomSecretForPatchContainerd }}
|
||||
{{- $sourceSecret := lookup "v1" "Secret" "cozy-system" "patch-containerd" }}
|
||||
{{- if $sourceSecret }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-patch-containerd
|
||||
namespace: {{ .Release.Namespace }}
|
||||
type: {{ $sourceSecret.type }}
|
||||
data:
|
||||
{{- range $key, $value := $sourceSecret.data }}
|
||||
{{ printf "%s: %s" $key ($value | quote) | indent 2 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -17,6 +17,7 @@ spec:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '>= 0.0.0-0'
|
||||
kubeConfig:
|
||||
secretRef:
|
||||
name: {{ .Release.Name }}-admin-kubeconfig
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
{{- define "cozystack.defaultVPAValues" -}}
|
||||
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
|
||||
{{- $clusterDomain := (index $cozyConfig.data "cluster-domain") | default "cozy.local" }}
|
||||
{{- $myNS := lookup "v1" "Namespace" "" .Release.Namespace }}
|
||||
{{- $targetTenant := index $myNS.metadata.annotations "namespace.cozystack.io/monitoring" }}
|
||||
vertical-pod-autoscaler:
|
||||
@@ -13,7 +15,7 @@ vertical-pod-autoscaler:
|
||||
metric-for-pod-labels: kube_pod_labels{job="kube-state-metrics", tenant="{{ .Release.Namespace }}", cluster="{{ .Release.Name }}"}[8d]
|
||||
pod-name-label: pod
|
||||
pod-namespace-label: namespace
|
||||
prometheus-address: http://vmselect-shortterm.{{ $targetTenant }}.svc.cozy.local:8481/select/0/prometheus/
|
||||
prometheus-address: http://vmselect-shortterm.{{ $targetTenant }}.svc.{{ $clusterDomain }}:8481/select/0/prometheus/
|
||||
prometheus-cadvisor-job-name: cadvisor
|
||||
resources:
|
||||
limits:
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"properties": {
|
||||
"host": {
|
||||
"type": "string",
|
||||
"description": "The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host).",
|
||||
"description": "Hostname used to access the Kubernetes cluster externally. Defaults to `<cluster-name>.<tenant-host>` when empty.",
|
||||
"default": ""
|
||||
},
|
||||
"controlPlane": {
|
||||
@@ -12,15 +12,20 @@
|
||||
"properties": {
|
||||
"replicas": {
|
||||
"type": "number",
|
||||
"description": "Number of replicas for Kubernetes control-plane components",
|
||||
"description": "Number of replicas for Kubernetes control-plane components.",
|
||||
"default": 2
|
||||
},
|
||||
"apiServer": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Explicit CPU/memory resource requests and limits for the API server.",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"description": "Use a common resources preset when `resources` is not set explicitly.",
|
||||
"default": "small",
|
||||
"enum": [
|
||||
"none",
|
||||
@@ -32,11 +37,6 @@
|
||||
"xlarge",
|
||||
"2xlarge"
|
||||
]
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -45,12 +45,12 @@
|
||||
"properties": {
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"description": "Explicit CPU/memory resource requests and limits for the controller manager.",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"description": "Use a common resources preset when `resources` is not set explicitly.",
|
||||
"default": "micro",
|
||||
"enum": [
|
||||
"none",
|
||||
@@ -68,9 +68,14 @@
|
||||
"scheduler": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Explicit CPU/memory resource requests and limits for the scheduler.",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"description": "Use a common resources preset when `resources` is not set explicitly.",
|
||||
"default": "micro",
|
||||
"enum": [
|
||||
"none",
|
||||
@@ -82,11 +87,6 @@
|
||||
"xlarge",
|
||||
"2xlarge"
|
||||
]
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -96,9 +96,14 @@
|
||||
"server": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Explicit CPU/memory resource requests and limits for the Konnectivity.",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"description": "Use a common resources preset when `resources` is not set explicitly.",
|
||||
"default": "micro",
|
||||
"enum": [
|
||||
"none",
|
||||
@@ -110,11 +115,6 @@
|
||||
"xlarge",
|
||||
"2xlarge"
|
||||
]
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -124,9 +124,14 @@
|
||||
},
|
||||
"storageClass": {
|
||||
"type": "string",
|
||||
"description": "StorageClass used to store user data",
|
||||
"description": "StorageClass used to store user data.",
|
||||
"default": "replicated"
|
||||
},
|
||||
"useCustomSecretForPatchContainerd": {
|
||||
"type": "boolean",
|
||||
"description": "if true, for patch containerd will be used secret: {{ .Release.Name }}-patch-containerd",
|
||||
"default": false
|
||||
},
|
||||
"addons": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -135,7 +140,7 @@
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enables the cert-manager",
|
||||
"description": "Enable cert-manager, which automatically creates and manages SSL/TLS certificates.",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
@@ -160,7 +165,7 @@
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enables the Gateway API",
|
||||
"description": "Enable the Gateway API",
|
||||
"default": false
|
||||
}
|
||||
}
|
||||
@@ -170,7 +175,7 @@
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role)",
|
||||
"description": "Enable the Ingress-NGINX controller (requires nodes labeled with the 'ingress-nginx' role).",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
@@ -180,7 +185,7 @@
|
||||
},
|
||||
"hosts": {
|
||||
"type": "array",
|
||||
"description": "List of domain names that should be passed through to the cluster by upper cluster",
|
||||
"description": "List of domain names that the parent cluster should route to this tenant cluster.",
|
||||
"default": [],
|
||||
"items": {}
|
||||
}
|
||||
@@ -191,7 +196,7 @@
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enables the gpu-operator",
|
||||
"description": "Enable the GPU-operator",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
@@ -206,7 +211,7 @@
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enables Flux CD",
|
||||
"description": "Enable FluxCD",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
@@ -221,7 +226,7 @@
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enables MonitoringAgents (fluentbit, vmagents for sending logs and metrics to storage) if tenant monitoring enabled, send to tenant storage, else to root storage",
|
||||
"description": "Enable monitoring agents (Fluent Bit and VMAgents) to send logs and metrics. If tenant monitoring is enabled, data is sent to tenant storage; otherwise, it goes to root storage.",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
## @section Common parameters
|
||||
## @section Common Parameters
|
||||
|
||||
## @param host The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host).
|
||||
## @param controlPlane.replicas Number of replicas for Kubernetes control-plane components
|
||||
## @param storageClass StorageClass used to store user data
|
||||
## @param host Hostname used to access the Kubernetes cluster externally. Defaults to `<cluster-name>.<tenant-host>` when empty.
|
||||
## @param controlPlane.replicas Number of replicas for Kubernetes control-plane components.
|
||||
## @param storageClass StorageClass used to store user data.
|
||||
## @param useCustomSecretForPatchContainerd if true, for patch containerd will be used secret: {{ .Release.Name }}-patch-containerd
|
||||
##
|
||||
host: ""
|
||||
storageClass: replicated
|
||||
useCustomSecretForPatchContainerd: false
|
||||
|
||||
## @param nodeGroups [object] nodeGroups configuration
|
||||
##
|
||||
@@ -37,7 +39,7 @@ addons:
|
||||
## Cert-manager: automatically creates and manages SSL/TLS certificate
|
||||
##
|
||||
certManager:
|
||||
## @param addons.certManager.enabled Enables the cert-manager
|
||||
## @param addons.certManager.enabled Enable cert-manager, which automatically creates and manages SSL/TLS certificates.
|
||||
## @param addons.certManager.valuesOverride Custom values to override
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
@@ -51,17 +53,17 @@ addons:
|
||||
## Gateway API
|
||||
##
|
||||
gatewayAPI:
|
||||
## @param addons.gatewayAPI.enabled Enables the Gateway API
|
||||
## @param addons.gatewayAPI.enabled Enable the Gateway API
|
||||
enabled: false
|
||||
|
||||
## Ingress-NGINX Controller
|
||||
##
|
||||
ingressNginx:
|
||||
## @param addons.ingressNginx.enabled Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role)
|
||||
## @param addons.ingressNginx.enabled Enable the Ingress-NGINX controller (requires nodes labeled with the 'ingress-nginx' role).
|
||||
## @param addons.ingressNginx.valuesOverride Custom values to override
|
||||
##
|
||||
enabled: false
|
||||
## @param addons.ingressNginx.hosts List of domain names that should be passed through to the cluster by upper cluster
|
||||
## @param addons.ingressNginx.hosts List of domain names that the parent cluster should route to this tenant cluster.
|
||||
## e.g:
|
||||
## hosts:
|
||||
## - example.org
|
||||
@@ -73,7 +75,7 @@ addons:
|
||||
## GPU-operator: NVIDIA GPU Operator
|
||||
##
|
||||
gpuOperator:
|
||||
## @param addons.gpuOperator.enabled Enables the gpu-operator
|
||||
## @param addons.gpuOperator.enabled Enable the GPU-operator
|
||||
## @param addons.gpuOperator.valuesOverride Custom values to override
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
@@ -81,7 +83,7 @@ addons:
|
||||
## Flux CD
|
||||
##
|
||||
fluxcd:
|
||||
## @param addons.fluxcd.enabled Enables Flux CD
|
||||
## @param addons.fluxcd.enabled Enable FluxCD
|
||||
## @param addons.fluxcd.valuesOverride Custom values to override
|
||||
##
|
||||
enabled: false
|
||||
@@ -90,7 +92,7 @@ addons:
|
||||
## MonitoringAgents
|
||||
##
|
||||
monitoringAgents:
|
||||
## @param addons.monitoringAgents.enabled Enables MonitoringAgents (fluentbit, vmagents for sending logs and metrics to storage) if tenant monitoring enabled, send to tenant storage, else to root storage
|
||||
## @param addons.monitoringAgents.enabled Enable monitoring agents (Fluent Bit and VMAgents) to send logs and metrics. If tenant monitoring is enabled, data is sent to tenant storage; otherwise, it goes to root storage.
|
||||
## @param addons.monitoringAgents.valuesOverride Custom values to override
|
||||
##
|
||||
enabled: false
|
||||
@@ -103,15 +105,15 @@ addons:
|
||||
##
|
||||
valuesOverride: {}
|
||||
|
||||
## @section Kubernetes control plane configuration
|
||||
## @section Kubernetes Control Plane Configuration
|
||||
##
|
||||
|
||||
controlPlane:
|
||||
replicas: 2
|
||||
|
||||
apiServer:
|
||||
## @param controlPlane.apiServer.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
## @param controlPlane.apiServer.resources Resources
|
||||
## @param controlPlane.apiServer.resources Explicit CPU/memory resource requests and limits for the API server.
|
||||
## @param controlPlane.apiServer.resourcesPreset Use a common resources preset when `resources` is not set explicitly.
|
||||
## e.g:
|
||||
## resources:
|
||||
## limits:
|
||||
@@ -125,20 +127,20 @@ controlPlane:
|
||||
resources: {}
|
||||
|
||||
controllerManager:
|
||||
## @param controlPlane.controllerManager.resources Resources
|
||||
## @param controlPlane.controllerManager.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
## @param controlPlane.controllerManager.resources Explicit CPU/memory resource requests and limits for the controller manager.
|
||||
## @param controlPlane.controllerManager.resourcesPreset Use a common resources preset when `resources` is not set explicitly.
|
||||
resourcesPreset: "micro"
|
||||
resources: {}
|
||||
|
||||
scheduler:
|
||||
## @param controlPlane.scheduler.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
## @param controlPlane.scheduler.resources Resources
|
||||
## @param controlPlane.scheduler.resources Explicit CPU/memory resource requests and limits for the scheduler.
|
||||
## @param controlPlane.scheduler.resourcesPreset Use a common resources preset when `resources` is not set explicitly.
|
||||
resourcesPreset: "micro"
|
||||
resources: {}
|
||||
|
||||
konnectivity:
|
||||
server:
|
||||
## @param controlPlane.konnectivity.server.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
## @param controlPlane.konnectivity.server.resources Resources
|
||||
## @param controlPlane.konnectivity.server.resources Explicit CPU/memory resource requests and limits for the Konnectivity.
|
||||
## @param controlPlane.konnectivity.server.resourcesPreset Use a common resources preset when `resources` is not set explicitly.
|
||||
resourcesPreset: "micro"
|
||||
resources: {}
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.7.0
|
||||
version: 0.7.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
1
packages/apps/mysql/charts/cozy-lib
Symbolic link
1
packages/apps/mysql/charts/cozy-lib
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../library/cozy-lib
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/mariadb-backup:0.7.0@sha256:cfd1c37d8ad24e10681d82d6e6ce8a641b4602c1b0ffa8516ae15b4958bb12d4
|
||||
ghcr.io/cozystack/cozystack/mariadb-backup:0.7.1@sha256:cfd1c37d8ad24e10681d82d6e6ce8a641b4602c1b0ffa8516ae15b4958bb12d4
|
||||
|
||||
@@ -57,6 +57,11 @@ spec:
|
||||
name: {{ .Release.Name }}-my-cnf
|
||||
key: config
|
||||
|
||||
service:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: {{ $.Release.Name }}
|
||||
|
||||
storage:
|
||||
size: {{ .Values.size }}
|
||||
resizeInUseVolumes: true
|
||||
@@ -74,7 +79,7 @@ spec:
|
||||
# type: LoadBalancer
|
||||
|
||||
{{- if .Values.resources }}
|
||||
resources: {{- toYaml .Values.resources | nindent 4 }}
|
||||
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.resources $) | nindent 4 }}
|
||||
{{- else if ne .Values.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.resourcesPreset "Release" .Release) | nindent 4 }}
|
||||
resources: {{- include "cozy-lib.resources.preset" (list .Values.resourcesPreset $) | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.6.0
|
||||
version: 0.6.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
1
packages/apps/nats/charts/cozy-lib
Symbolic link
1
packages/apps/nats/charts/cozy-lib
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../library/cozy-lib
|
||||
@@ -1,3 +1,5 @@
|
||||
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
|
||||
{{- $clusterDomain := (index $cozyConfig.data "cluster-domain") | default "cozy.local" }}
|
||||
{{- $passwords := dict }}
|
||||
{{- range $user, $u := .Values.users }}
|
||||
{{- if $u.password }}
|
||||
@@ -45,12 +47,15 @@ spec:
|
||||
- name: nats
|
||||
image: nats:2.10.17-alpine
|
||||
{{- if .Values.resources }}
|
||||
resources: {{- toYaml .Values.resources | nindent 22 }}
|
||||
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.resources $) | nindent 22 }}
|
||||
{{- else if ne .Values.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.resourcesPreset "Release" .Release) | nindent 22 }}
|
||||
resources: {{- include "cozy-lib.resources.preset" (list .Values.resourcesPreset $) | nindent 22 }}
|
||||
{{- end }}
|
||||
fullnameOverride: {{ .Release.Name }}
|
||||
config:
|
||||
cluster:
|
||||
routeURLs:
|
||||
k8sClusterDomain: {{ $clusterDomain }}
|
||||
{{- if or (gt (len $passwords) 0) (gt (len .Values.config.merge) 0) }}
|
||||
merge:
|
||||
{{- if gt (len $passwords) 0 }}
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.11.0
|
||||
version: 0.12.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
1
packages/apps/postgres/charts/cozy-lib
Symbolic link
1
packages/apps/postgres/charts/cozy-lib
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../library/cozy-lib
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/postgres-backup:0.11.0@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f
|
||||
ghcr.io/cozystack/cozystack/postgres-backup:0.12.1@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f
|
||||
|
||||
@@ -6,9 +6,9 @@ metadata:
|
||||
spec:
|
||||
instances: {{ .Values.replicas }}
|
||||
{{- if .Values.resources }}
|
||||
resources: {{- toYaml .Values.resources | nindent 4 }}
|
||||
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.resources $) | nindent 4 }}
|
||||
{{- else if ne .Values.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.resourcesPreset "Release" .Release) | nindent 4 }}
|
||||
resources: {{- include "cozy-lib.resources.preset" (list .Values.resourcesPreset $) | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
enableSuperuserAccess: true
|
||||
@@ -17,6 +17,9 @@ spec:
|
||||
{{- $rawConstraints := get $configMap.data "globalAppTopologySpreadConstraints" }}
|
||||
{{- if $rawConstraints }}
|
||||
{{- $rawConstraints | fromYaml | toYaml | nindent 2 }}
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
cnpg.io/cluster: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
postgresql:
|
||||
@@ -41,6 +44,8 @@ spec:
|
||||
inheritedMetadata:
|
||||
labels:
|
||||
policy.cozystack.io/allow-to-apiserver: "true"
|
||||
app.kubernetes.io/name: postgres.apps.cozystack.io
|
||||
app.kubernets.io/instance: {{ $.Release.Name }}
|
||||
---
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
@@ -52,6 +57,6 @@ spec:
|
||||
kind: postgres
|
||||
type: postgres
|
||||
selector:
|
||||
cnpg.io/cluster: {{ .Release.Name }}
|
||||
cnpg.io/podRole: instance
|
||||
app.kubernetes.io/name: postgres.apps.cozystack.io
|
||||
app.kubernets.io/instance: {{ $.Release.Name }}
|
||||
version: {{ $.Chart.Version }}
|
||||
|
||||
1
packages/apps/rabbitmq/charts/cozy-lib
Symbolic link
1
packages/apps/rabbitmq/charts/cozy-lib
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../library/cozy-lib
|
||||
@@ -12,9 +12,9 @@ spec:
|
||||
type: LoadBalancer
|
||||
{{- end }}
|
||||
{{- if .Values.resources }}
|
||||
resources: {{- toYaml .Values.resources | nindent 4 }}
|
||||
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.resources $) | nindent 4 }}
|
||||
{{- else if ne .Values.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.resourcesPreset "Release" .Release) | nindent 4 }}
|
||||
resources: {{- include "cozy-lib.resources.preset" (list .Values.resourcesPreset $) | nindent 4 }}
|
||||
{{- end }}
|
||||
override:
|
||||
statefulSet:
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.7.0
|
||||
version: 0.7.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
1
packages/apps/redis/charts/cozy-lib
Symbolic link
1
packages/apps/redis/charts/cozy-lib
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../library/cozy-lib
|
||||
@@ -26,22 +26,25 @@ spec:
|
||||
sentinel:
|
||||
replicas: 3
|
||||
{{- if .Values.resources }}
|
||||
resources: {{- toYaml .Values.resources | nindent 6 }}
|
||||
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.resources $) | nindent 6 }}
|
||||
{{- else if ne .Values.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
resources: {{- include "cozy-lib.resources.preset" (list .Values.resourcesPreset $) | nindent 6 }}
|
||||
{{- end }}
|
||||
redis:
|
||||
replicas: {{ .Values.replicas }}
|
||||
{{- if .Values.resources }}
|
||||
resources: {{- toYaml .Values.resources | nindent 6 }}
|
||||
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.resources $) | nindent 6 }}
|
||||
{{- else if ne .Values.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
resources: {{- include "cozy-lib.resources.preset" (list .Values.resourcesPreset $) | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- with .Values.size }}
|
||||
storage:
|
||||
persistentVolumeClaim:
|
||||
metadata:
|
||||
name: redisfailover-persistent-data
|
||||
labels:
|
||||
app.kubernetes.io/component: redis
|
||||
app.kubernetes.io/instance: {{ $.Release.Name }}
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.4.0
|
||||
version: 0.4.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
1
packages/apps/tcp-balancer/charts/cozy-lib
Symbolic link
1
packages/apps/tcp-balancer/charts/cozy-lib
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../library/cozy-lib
|
||||
@@ -15,6 +15,7 @@ spec:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ .Release.Name }}-haproxy
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
annotations:
|
||||
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
|
||||
spec:
|
||||
@@ -34,9 +35,9 @@ spec:
|
||||
- image: haproxy:latest
|
||||
name: haproxy
|
||||
{{- if .Values.resources }}
|
||||
resources: {{- toYaml .Values.resources | nindent 10 }}
|
||||
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.resources $) | nindent 10 }}
|
||||
{{- else if ne .Values.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.resourcesPreset "Release" .Release) | nindent 10 }}
|
||||
resources: {{- include "cozy-lib.resources.preset" (list .Values.resourcesPreset $) | nindent 10 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
{{- with .Values.httpAndHttps }}
|
||||
|
||||
13
packages/apps/tcp-balancer/templates/workloadmonitor.yaml
Normal file
13
packages/apps/tcp-balancer/templates/workloadmonitor.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicas }}
|
||||
minReplicas: 1
|
||||
kind: tcp-balancer
|
||||
type: haproxy
|
||||
selector:
|
||||
app.kubernetes.io/instance: {{ $.Release.Name }}
|
||||
version: {{ $.Chart.Version }}
|
||||
@@ -4,4 +4,4 @@ description: Separated tenant namespace
|
||||
icon: /logos/tenant.svg
|
||||
|
||||
type: application
|
||||
version: 1.9.2
|
||||
version: 1.9.3
|
||||
|
||||
@@ -184,6 +184,12 @@ rules:
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- apiGroups: ["subresources.kubevirt.io"]
|
||||
resources:
|
||||
- virtualmachineinstances/portforward
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- apiGroups:
|
||||
- cozystack.io
|
||||
resources:
|
||||
@@ -253,6 +259,12 @@ rules:
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- apiGroups: ["subresources.kubevirt.io"]
|
||||
resources:
|
||||
- virtualmachineinstances/portforward
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- apiGroups: ["apps.cozystack.io"]
|
||||
resources:
|
||||
- buckets
|
||||
@@ -349,6 +361,12 @@ rules:
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- apiGroups: ["subresources.kubevirt.io"]
|
||||
resources:
|
||||
- virtualmachineinstances/portforward
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- apiGroups: ["apps.cozystack.io"]
|
||||
resources:
|
||||
- '*'
|
||||
|
||||
@@ -9,7 +9,8 @@ clickhouse 0.6.0 1ec10165
|
||||
clickhouse 0.6.1 c62a83a7
|
||||
clickhouse 0.6.2 8267072d
|
||||
clickhouse 0.7.0 93bdf411
|
||||
clickhouse 0.9.0 HEAD
|
||||
clickhouse 0.9.0 6130f43d
|
||||
clickhouse 0.9.2 HEAD
|
||||
ferretdb 0.1.0 e9716091
|
||||
ferretdb 0.1.1 91b0499a
|
||||
ferretdb 0.2.0 6c5cf5bf
|
||||
@@ -18,13 +19,15 @@ ferretdb 0.4.0 b40e1b09
|
||||
ferretdb 0.4.1 1ec10165
|
||||
ferretdb 0.4.2 8267072d
|
||||
ferretdb 0.5.0 93bdf411
|
||||
ferretdb 0.6.0 HEAD
|
||||
ferretdb 0.6.0 6130f43d
|
||||
ferretdb 0.6.1 HEAD
|
||||
http-cache 0.1.0 263e47be
|
||||
http-cache 0.2.0 53f2365e
|
||||
http-cache 0.3.0 6c5cf5bf
|
||||
http-cache 0.3.1 0f312d5c
|
||||
http-cache 0.4.0 93bdf411
|
||||
http-cache 0.5.0 HEAD
|
||||
http-cache 0.5.0 6130f43d
|
||||
http-cache 0.5.1 HEAD
|
||||
kafka 0.1.0 f7eaab0a
|
||||
kafka 0.2.0 c0685f43
|
||||
kafka 0.2.1 dfbc210b
|
||||
@@ -36,7 +39,8 @@ kafka 0.3.2 93c46161
|
||||
kafka 0.3.3 8267072d
|
||||
kafka 0.4.0 85ec09b8
|
||||
kafka 0.5.0 93bdf411
|
||||
kafka 0.6.0 HEAD
|
||||
kafka 0.6.0 6130f43d
|
||||
kafka 0.6.1 HEAD
|
||||
kubernetes 0.1.0 263e47be
|
||||
kubernetes 0.2.0 53f2365e
|
||||
kubernetes 0.3.0 007d414f
|
||||
@@ -65,7 +69,9 @@ kubernetes 0.17.1 fd240701
|
||||
kubernetes 0.18.0 721c12a7
|
||||
kubernetes 0.19.0 93bdf411
|
||||
kubernetes 0.20.0 609e7ede
|
||||
kubernetes 0.20.1 HEAD
|
||||
kubernetes 0.20.1 f9f8bb2f
|
||||
kubernetes 0.21.0 6130f43d
|
||||
kubernetes 0.23.1 HEAD
|
||||
mysql 0.1.0 263e47be
|
||||
mysql 0.2.0 c24a103f
|
||||
mysql 0.3.0 53f2365e
|
||||
@@ -75,7 +81,8 @@ mysql 0.5.1 0f312d5c
|
||||
mysql 0.5.2 1ec10165
|
||||
mysql 0.5.3 8267072d
|
||||
mysql 0.6.0 93bdf411
|
||||
mysql 0.7.0 HEAD
|
||||
mysql 0.7.0 6130f43d
|
||||
mysql 0.7.1 HEAD
|
||||
nats 0.1.0 e9716091
|
||||
nats 0.2.0 6c5cf5bf
|
||||
nats 0.3.0 78366f19
|
||||
@@ -83,7 +90,8 @@ nats 0.3.1 c62a83a7
|
||||
nats 0.4.0 898374b5
|
||||
nats 0.4.1 8267072d
|
||||
nats 0.5.0 93bdf411
|
||||
nats 0.6.0 HEAD
|
||||
nats 0.6.0 6130f43d
|
||||
nats 0.6.1 HEAD
|
||||
postgres 0.1.0 263e47be
|
||||
postgres 0.2.0 53f2365e
|
||||
postgres 0.2.1 d7cfa53c
|
||||
@@ -99,7 +107,9 @@ postgres 0.8.0 4e68e65c
|
||||
postgres 0.9.0 8267072d
|
||||
postgres 0.10.0 721c12a7
|
||||
postgres 0.10.1 93bdf411
|
||||
postgres 0.11.0 HEAD
|
||||
postgres 0.11.0 f9f8bb2f
|
||||
postgres 0.12.0 6130f43d
|
||||
postgres 0.12.1 HEAD
|
||||
rabbitmq 0.1.0 263e47be
|
||||
rabbitmq 0.2.0 53f2365e
|
||||
rabbitmq 0.3.0 6c5cf5bf
|
||||
@@ -117,11 +127,13 @@ redis 0.3.1 c62a83a7
|
||||
redis 0.4.0 84f3ccc0
|
||||
redis 0.5.0 4e68e65c
|
||||
redis 0.6.0 93bdf411
|
||||
redis 0.7.0 HEAD
|
||||
redis 0.7.0 6130f43d
|
||||
redis 0.7.1 HEAD
|
||||
tcp-balancer 0.1.0 263e47be
|
||||
tcp-balancer 0.2.0 53f2365e
|
||||
tcp-balancer 0.3.0 93bdf411
|
||||
tcp-balancer 0.4.0 HEAD
|
||||
tcp-balancer 0.4.0 6130f43d
|
||||
tcp-balancer 0.4.1 HEAD
|
||||
tenant 0.1.4 afc997ef
|
||||
tenant 0.1.5 e3ab858a
|
||||
tenant 1.0.0 263e47be
|
||||
@@ -144,7 +156,8 @@ tenant 1.7.0 24fa7222
|
||||
tenant 1.8.0 160e4e2a
|
||||
tenant 1.9.0 728743db
|
||||
tenant 1.9.1 721c12a7
|
||||
tenant 1.9.2 HEAD
|
||||
tenant 1.9.2 6130f43d
|
||||
tenant 1.9.3 HEAD
|
||||
virtual-machine 0.1.4 f2015d65
|
||||
virtual-machine 0.1.5 263e47be
|
||||
virtual-machine 0.2.0 c0685f43
|
||||
@@ -159,9 +172,11 @@ virtual-machine 0.8.1 93c46161
|
||||
virtual-machine 0.8.2 de19450f
|
||||
virtual-machine 0.9.0 721c12a7
|
||||
virtual-machine 0.9.1 93bdf411
|
||||
virtual-machine 0.9.2 HEAD
|
||||
virtual-machine 0.10.0 6130f43d
|
||||
virtual-machine 0.10.2 HEAD
|
||||
vm-disk 0.1.0 d971f2ff
|
||||
vm-disk 0.1.1 HEAD
|
||||
vm-disk 0.1.1 6130f43d
|
||||
vm-disk 0.1.2 HEAD
|
||||
vm-instance 0.1.0 1ec10165
|
||||
vm-instance 0.2.0 84f3ccc0
|
||||
vm-instance 0.3.0 4e68e65c
|
||||
@@ -170,10 +185,12 @@ vm-instance 0.4.1 0ab39f20
|
||||
vm-instance 0.5.0 3fa4dd3a
|
||||
vm-instance 0.5.1 de19450f
|
||||
vm-instance 0.6.0 721c12a7
|
||||
vm-instance 0.6.1 HEAD
|
||||
vm-instance 0.7.0 6130f43d
|
||||
vm-instance 0.7.2 HEAD
|
||||
vpn 0.1.0 263e47be
|
||||
vpn 0.2.0 53f2365e
|
||||
vpn 0.3.0 6c5cf5bf
|
||||
vpn 0.3.1 1ec10165
|
||||
vpn 0.4.0 93bdf411
|
||||
vpn 0.5.0 HEAD
|
||||
vpn 0.5.0 6130f43d
|
||||
vpn 0.5.1 HEAD
|
||||
|
||||
@@ -17,10 +17,10 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.9.2
|
||||
version: 0.10.2
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: 0.9.2
|
||||
appVersion: 0.10.0
|
||||
|
||||
@@ -9,4 +9,4 @@ generate:
|
||||
&& yq -i -o json ".properties.instanceProfile.optional=true | .properties.instanceProfile.enum = $${PREFERENCES}" values.schema.json
|
||||
yq -i -o json '.properties.externalPorts.items.type = "integer"' values.schema.json
|
||||
yq -i -o json '.properties.systemDisk.properties.image.enum = ["ubuntu", "cirros", "alpine", "fedora", "talos"]' values.schema.json
|
||||
yq -i -o json '.properties.externalMethod.enum = ["WholeIP", "PortList"]' values.schema.json
|
||||
yq -i -o json '.properties.externalMethod.enum = ["PortList", "WholeIP"]' values.schema.json
|
||||
|
||||
@@ -39,7 +39,7 @@ virtctl ssh <user>@<vm>
|
||||
| Name | Description | Value |
|
||||
| ------------------------- | ---------------------------------------------------------------------------------------------------------- | ------------ |
|
||||
| `external` | Enable external access from outside the cluster | `false` |
|
||||
| `externalMethod` | specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList` | `WholeIP` |
|
||||
| `externalMethod` | specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList` | `PortList` |
|
||||
| `externalPorts` | Specify ports to forward from outside the cluster | `[]` |
|
||||
| `running` | Determines if the virtual machine should be running | `true` |
|
||||
| `instanceType` | Virtual Machine instance type | `u1.medium` |
|
||||
|
||||
@@ -21,5 +21,5 @@ spec:
|
||||
kind: virtual-machine
|
||||
type: virtual-machine
|
||||
selector:
|
||||
vm.kubevirt.io/name: {{ .Release.Name }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
version: {{ $.Chart.Version }}
|
||||
|
||||
@@ -17,7 +17,7 @@ spec:
|
||||
selector:
|
||||
{{- include "virtual-machine.selectorLabels" . | nindent 4 }}
|
||||
ports:
|
||||
{{- if eq .Values.externalMethod "WholeIP" }}
|
||||
{{- if and (eq .Values.externalMethod "WholeIP") (not .Values.externalPorts) }}
|
||||
- port: 65535
|
||||
{{- else }}
|
||||
{{- range .Values.externalPorts }}
|
||||
|
||||
@@ -13,6 +13,7 @@ metadata:
|
||||
{{- include "virtual-machine.labels" . | nindent 4 }}
|
||||
spec:
|
||||
running: {{ .Values.running | default "true" }}
|
||||
|
||||
{{- with .Values.instanceType }}
|
||||
instancetype:
|
||||
kind: VirtualMachineClusterInstancetype
|
||||
@@ -23,9 +24,12 @@ spec:
|
||||
kind: VirtualMachineClusterPreference
|
||||
name: {{ . }}
|
||||
{{- end }}
|
||||
|
||||
dataVolumeTemplates:
|
||||
- metadata:
|
||||
name: {{ include "virtual-machine.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
storage:
|
||||
resources:
|
||||
@@ -75,21 +79,25 @@ spec:
|
||||
deviceName: {{ $gpu.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
disks:
|
||||
- disk:
|
||||
bus: scsi
|
||||
name: systemdisk
|
||||
{{- if or .Values.sshKeys .Values.cloudInit }}
|
||||
{{- if .Values.sshKeys }}
|
||||
- disk:
|
||||
bus: virtio
|
||||
name: cloudinitdisk
|
||||
{{- end }}
|
||||
|
||||
interfaces:
|
||||
- name: default
|
||||
bridge: {}
|
||||
|
||||
machine:
|
||||
type: ""
|
||||
{{- with .Values.sshKeys }}
|
||||
|
||||
{{- if .Values.sshKeys }}
|
||||
accessCredentials:
|
||||
- sshPublicKey:
|
||||
source:
|
||||
@@ -99,23 +107,37 @@ spec:
|
||||
# keys will be injected into metadata part of cloud-init disk
|
||||
noCloud: {}
|
||||
{{- end }}
|
||||
|
||||
terminationGracePeriodSeconds: 30
|
||||
|
||||
volumes:
|
||||
- name: systemdisk
|
||||
dataVolume:
|
||||
name: {{ include "virtual-machine.fullname" . }}
|
||||
{{- if or .Values.sshKeys .Values.cloudInit }}
|
||||
- name: cloudinitdisk
|
||||
cloudInitNoCloud:
|
||||
{{- if .Values.cloudInit }}
|
||||
secretRef:
|
||||
name: {{ include "virtual-machine.fullname" . }}-cloud-init
|
||||
- name: systemdisk
|
||||
dataVolume:
|
||||
name: {{ include "virtual-machine.fullname" . }}
|
||||
|
||||
{{- if and .Values.sshKeys .Values.cloudInit }}
|
||||
- name: cloudinitdisk
|
||||
cloudInitNoCloud:
|
||||
secretRef:
|
||||
name: {{ include "virtual-machine.fullname" . }}-cloud-init
|
||||
{{- else if .Values.sshKeys }}
|
||||
- name: cloudinitdisk
|
||||
cloudInitNoCloud:
|
||||
userData: |
|
||||
{{ printf "%s" "#cloud-config" }}
|
||||
ssh_authorized_keys:
|
||||
{{- range .Values.sshKeys }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
chpasswd:
|
||||
expire: false
|
||||
{{- else }}
|
||||
userData: |
|
||||
#cloud-config
|
||||
final_message: Cloud-init user-data was left blank intentionally.
|
||||
- name: cloudinitdisk
|
||||
cloudInitNoCloud:
|
||||
userData: |
|
||||
{{ printf "%s" "#cloud-config" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
networks:
|
||||
- name: default
|
||||
pod: {}
|
||||
|
||||
@@ -10,10 +10,10 @@
|
||||
"externalMethod": {
|
||||
"type": "string",
|
||||
"description": "specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList`",
|
||||
"default": "WholeIP",
|
||||
"default": "PortList",
|
||||
"enum": [
|
||||
"WholeIP",
|
||||
"PortList"
|
||||
"PortList",
|
||||
"WholeIP"
|
||||
]
|
||||
},
|
||||
"externalPorts": {
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
## @param externalMethod specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList`
|
||||
## @param externalPorts [array] Specify ports to forward from outside the cluster
|
||||
external: false
|
||||
externalMethod: WholeIP
|
||||
externalMethod: PortList
|
||||
externalPorts:
|
||||
- 22
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.1
|
||||
version: 0.1.2
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -7,6 +7,8 @@ metadata:
|
||||
cdi.kubevirt.io/storage.bind.immediate.requested: ""
|
||||
{{- end }}
|
||||
vm-disk.cozystack.io/optical: "{{ .Values.optical }}"
|
||||
labels:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
name: {{ .Release.Name }}
|
||||
spec:
|
||||
{{- if $existingDV }}
|
||||
|
||||
12
packages/apps/vm-disk/templates/workloadmonitor.yaml
Normal file
12
packages/apps/vm-disk/templates/workloadmonitor.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}
|
||||
spec:
|
||||
replicas: 0
|
||||
minReplicas: 0
|
||||
kind: vm-disk
|
||||
type: vm-disk
|
||||
selector:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
version: {{ $.Chart.Version }}
|
||||
@@ -17,10 +17,10 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.6.1
|
||||
version: 0.7.2
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: 0.6.0
|
||||
appVersion: 0.7.0
|
||||
|
||||
@@ -9,4 +9,4 @@ generate:
|
||||
PREFERENCES=$$(yq e '.metadata.name' -o=json -r ../../system/kubevirt-instancetypes/templates/preferences.yaml | yq 'split(" ") | . + [""]' -o json) \
|
||||
&& yq -i -o json ".properties.instanceProfile.optional=true | .properties.instanceProfile.enum = $${PREFERENCES}" values.schema.json
|
||||
yq -i -o json '.properties.externalPorts.items.type = "integer"' values.schema.json
|
||||
yq -i -o json '.properties.externalMethod.enum = ["WholeIP", "PortList"]' values.schema.json
|
||||
yq -i -o json '.properties.externalMethod.enum = ["PortList", "WholeIP"]' values.schema.json
|
||||
|
||||
@@ -39,7 +39,7 @@ virtctl ssh <user>@<vm>
|
||||
| Name | Description | Value |
|
||||
| ------------------ | ---------------------------------------------------------------------------------------------------------- | ----------- |
|
||||
| `external` | Enable external access from outside the cluster | `false` |
|
||||
| `externalMethod` | specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList` | `WholeIP` |
|
||||
| `externalMethod` | specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList` | `PortList` |
|
||||
| `externalPorts` | Specify ports to forward from outside the cluster | `[]` |
|
||||
| `running` | Determines if the virtual machine should be running | `true` |
|
||||
| `instanceType` | Virtual Machine instance type | `u1.medium` |
|
||||
|
||||
@@ -22,5 +22,5 @@ spec:
|
||||
kind: virtual-machine
|
||||
type: virtual-machine
|
||||
selector:
|
||||
{{- include "virtual-machine.selectorLabels" . | nindent 4 }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
version: {{ $.Chart.Version }}
|
||||
|
||||
@@ -17,7 +17,7 @@ spec:
|
||||
selector:
|
||||
{{- include "virtual-machine.selectorLabels" . | nindent 4 }}
|
||||
ports:
|
||||
{{- if eq .Values.externalMethod "WholeIP" }}
|
||||
{{- if and (eq .Values.externalMethod "WholeIP") (not .Values.externalPorts) }}
|
||||
- port: 65535
|
||||
{{- else }}
|
||||
{{- range .Values.externalPorts }}
|
||||
|
||||
@@ -94,17 +94,27 @@ spec:
|
||||
dataVolume:
|
||||
name: vm-disk-{{ .name }}
|
||||
{{- end }}
|
||||
{{- if or .Values.sshKeys .Values.cloudInit }}
|
||||
{{- if and .Values.sshKeys .Values.cloudInit }}
|
||||
- name: cloudinitdisk
|
||||
cloudInitNoCloud:
|
||||
{{- if .Values.cloudInit }}
|
||||
secretRef:
|
||||
name: {{ include "virtual-machine.fullname" . }}-cloud-init
|
||||
{{- else }}
|
||||
{{- else if .Values.sshKeys }}
|
||||
- name: cloudinitdisk
|
||||
cloudInitNoCloud:
|
||||
userData: |
|
||||
#cloud-config
|
||||
final_message: Cloud-init user-data was left blank intentionally.
|
||||
{{- end }}
|
||||
{{ printf "%s" "#cloud-config" }}
|
||||
ssh_authorized_keys:
|
||||
{{- range .Values.sshKeys }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
chpasswd:
|
||||
expire: false
|
||||
{{- else }}
|
||||
- name: cloudinitdisk
|
||||
cloudInitNoCloud:
|
||||
userData: |
|
||||
{{ printf "%s" "#cloud-config" }}
|
||||
{{- end }}
|
||||
networks:
|
||||
- name: default
|
||||
|
||||
@@ -10,10 +10,10 @@
|
||||
"externalMethod": {
|
||||
"type": "string",
|
||||
"description": "specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList`",
|
||||
"default": "WholeIP",
|
||||
"default": "PortList",
|
||||
"enum": [
|
||||
"WholeIP",
|
||||
"PortList"
|
||||
"PortList",
|
||||
"WholeIP"
|
||||
]
|
||||
},
|
||||
"externalPorts": {
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
## @param externalMethod specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList`
|
||||
## @param externalPorts [array] Specify ports to forward from outside the cluster
|
||||
external: false
|
||||
externalMethod: WholeIP
|
||||
externalMethod: PortList
|
||||
externalPorts:
|
||||
- 22
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.5.0
|
||||
version: 0.5.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
1
packages/apps/vpn/charts/cozy-lib
Symbolic link
1
packages/apps/vpn/charts/cozy-lib
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../library/cozy-lib
|
||||
@@ -14,6 +14,7 @@ spec:
|
||||
labels:
|
||||
app: {{ .Release.Name }}-vpn
|
||||
name: {{ .Release.Name }}-vpn
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
annotations:
|
||||
checksum/config: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
|
||||
spec:
|
||||
@@ -43,9 +44,9 @@ spec:
|
||||
- name: outline-vpn
|
||||
image: quay.io/outline/shadowbox:stable
|
||||
{{- if .Values.resources }}
|
||||
resources: {{- toYaml .Values.resources | nindent 10 }}
|
||||
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.resources $) | nindent 12 }}
|
||||
{{- else if ne .Values.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.resourcesPreset "Release" .Release) | nindent 10 }}
|
||||
resources: {{- include "cozy-lib.resources.preset" (list .Values.resourcesPreset $) | nindent 12 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- containerPort: 40000
|
||||
|
||||
@@ -5,6 +5,7 @@ metadata:
|
||||
name: {{ .Release.Name }}-vpn
|
||||
labels:
|
||||
app: {{ .Release.Name }}-vpn
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
{{- if .Values.externalIPs }}
|
||||
externalIPs:
|
||||
|
||||
12
packages/apps/vpn/templates/workloadmonitor.yaml
Normal file
12
packages/apps/vpn/templates/workloadmonitor.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicas }}
|
||||
minReplicas: 1
|
||||
kind: vpn
|
||||
type: vpn
|
||||
selector:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
version: {{ $.Chart.Version }}
|
||||
@@ -32,17 +32,36 @@ done
|
||||
|
||||
for profile in $PROFILES; do
|
||||
echo "writing profile images/talos/profiles/$profile.yaml"
|
||||
if [ "$profile" = "nocloud" ] || [ "$profile" = "metal" ]; then
|
||||
image_options="{ diskSize: 1306525696, diskFormat: raw }"
|
||||
out_format=".xz"
|
||||
platform="$profile"
|
||||
kind="image"
|
||||
else
|
||||
image_options="{}"
|
||||
out_format="raw"
|
||||
platform="metal"
|
||||
kind="$profile"
|
||||
fi
|
||||
case "$profile" in
|
||||
initramfs|kernel|iso)
|
||||
image_options="{}"
|
||||
out_format="raw"
|
||||
platform="metal"
|
||||
kind="$profile"
|
||||
;;
|
||||
installer)
|
||||
image_options="{}"
|
||||
out_format="raw"
|
||||
platform="metal"
|
||||
kind="installer"
|
||||
;;
|
||||
metal)
|
||||
image_options="{ diskSize: 1306525696, diskFormat: raw }"
|
||||
out_format=".xz"
|
||||
platform="metal"
|
||||
kind="image"
|
||||
;;
|
||||
nocloud)
|
||||
image_options="{ diskSize: 1306525696, diskFormat: raw }"
|
||||
out_format=".xz"
|
||||
platform="nocloud"
|
||||
kind="image"
|
||||
;;
|
||||
*)
|
||||
echo "Unknown profile: $profile" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
cat > images/talos/profiles/$profile.yaml <<EOT
|
||||
# this file generated by hack/gen-profiles.sh
|
||||
@@ -57,12 +76,10 @@ input:
|
||||
initramfs:
|
||||
path: /usr/install/amd64/initramfs.xz
|
||||
baseInstaller:
|
||||
imageRef: ghcr.io/siderolabs/installer:${TALOS_VERSION}
|
||||
imageRef: "ghcr.io/siderolabs/installer:v1.10.3"
|
||||
systemExtensions:
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:${AMD_UCODE_VERSION}
|
||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:${AMDGPU_FIRMWARE_VERSION}
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:${BNX2_BNX2X_VERSION}
|
||||
- imageRef: ghcr.io/siderolabs/i915-ucode:${I915_UCODE_VERSION}
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:${INTEL_ICE_FIRMWARE_VERSION}
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:${INTEL_UCODE_VERSION}
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:${QLOGIC_FIRMWARE_VERSION}
|
||||
|
||||
@@ -3,24 +3,22 @@
|
||||
arch: amd64
|
||||
platform: metal
|
||||
secureboot: false
|
||||
version: v1.10.1
|
||||
version: v1.10.3
|
||||
input:
|
||||
kernel:
|
||||
path: /usr/install/amd64/vmlinuz
|
||||
initramfs:
|
||||
path: /usr/install/amd64/initramfs.xz
|
||||
baseInstaller:
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.10.1
|
||||
imageRef: "ghcr.io/siderolabs/installer:v1.10.3"
|
||||
systemExtensions:
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250410
|
||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250410
|
||||
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250410
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250509
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250509
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250509
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20250211
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250410
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.13-v1.10.1
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.3.1-v1.10.1
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250509
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.13-v1.10.3
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.3.2-v1.10.3
|
||||
output:
|
||||
kind: initramfs
|
||||
imageOptions: {}
|
||||
|
||||
@@ -3,24 +3,22 @@
|
||||
arch: amd64
|
||||
platform: metal
|
||||
secureboot: false
|
||||
version: v1.10.1
|
||||
version: v1.10.3
|
||||
input:
|
||||
kernel:
|
||||
path: /usr/install/amd64/vmlinuz
|
||||
initramfs:
|
||||
path: /usr/install/amd64/initramfs.xz
|
||||
baseInstaller:
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.10.1
|
||||
imageRef: "ghcr.io/siderolabs/installer:v1.10.3"
|
||||
systemExtensions:
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250410
|
||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250410
|
||||
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250410
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250509
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250509
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250509
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20250211
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250410
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.13-v1.10.1
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.3.1-v1.10.1
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250509
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.13-v1.10.3
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.3.2-v1.10.3
|
||||
output:
|
||||
kind: installer
|
||||
imageOptions: {}
|
||||
|
||||
@@ -3,24 +3,22 @@
|
||||
arch: amd64
|
||||
platform: metal
|
||||
secureboot: false
|
||||
version: v1.10.1
|
||||
version: v1.10.3
|
||||
input:
|
||||
kernel:
|
||||
path: /usr/install/amd64/vmlinuz
|
||||
initramfs:
|
||||
path: /usr/install/amd64/initramfs.xz
|
||||
baseInstaller:
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.10.1
|
||||
imageRef: "ghcr.io/siderolabs/installer:v1.10.3"
|
||||
systemExtensions:
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250410
|
||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250410
|
||||
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250410
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250509
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250509
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250509
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20250211
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250410
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.13-v1.10.1
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.3.1-v1.10.1
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250509
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.13-v1.10.3
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.3.2-v1.10.3
|
||||
output:
|
||||
kind: iso
|
||||
imageOptions: {}
|
||||
|
||||
@@ -3,24 +3,22 @@
|
||||
arch: amd64
|
||||
platform: metal
|
||||
secureboot: false
|
||||
version: v1.10.1
|
||||
version: v1.10.3
|
||||
input:
|
||||
kernel:
|
||||
path: /usr/install/amd64/vmlinuz
|
||||
initramfs:
|
||||
path: /usr/install/amd64/initramfs.xz
|
||||
baseInstaller:
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.10.1
|
||||
imageRef: "ghcr.io/siderolabs/installer:v1.10.3"
|
||||
systemExtensions:
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250410
|
||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250410
|
||||
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250410
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250509
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250509
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250509
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20250211
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250410
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.13-v1.10.1
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.3.1-v1.10.1
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250509
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.13-v1.10.3
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.3.2-v1.10.3
|
||||
output:
|
||||
kind: kernel
|
||||
imageOptions: {}
|
||||
|
||||
@@ -3,24 +3,22 @@
|
||||
arch: amd64
|
||||
platform: metal
|
||||
secureboot: false
|
||||
version: v1.10.1
|
||||
version: v1.10.3
|
||||
input:
|
||||
kernel:
|
||||
path: /usr/install/amd64/vmlinuz
|
||||
initramfs:
|
||||
path: /usr/install/amd64/initramfs.xz
|
||||
baseInstaller:
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.10.1
|
||||
imageRef: "ghcr.io/siderolabs/installer:v1.10.3"
|
||||
systemExtensions:
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250410
|
||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250410
|
||||
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250410
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250509
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250509
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250509
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20250211
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250410
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.13-v1.10.1
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.3.1-v1.10.1
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250509
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.13-v1.10.3
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.3.2-v1.10.3
|
||||
output:
|
||||
kind: image
|
||||
imageOptions: { diskSize: 1306525696, diskFormat: raw }
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user