Compare commits

..

1 Commits

Author SHA1 Message Date
Ahmad Murzahmatov
ea2e369e24 [feat] Hetzner
Add Hetzner CCM
Add Hetzner RobotLB

Signed-off-by: Ahmad Murzahmatov <gwynbleidd2106@yandex.com>
2025-07-08 11:49:43 +06:00
629 changed files with 60553 additions and 44623 deletions

View File

@@ -28,7 +28,16 @@ jobs:
- name: Install generate
run: |
curl -sSL https://github.com/cozystack/cozyvalues-gen/releases/download/v0.9.0/cozyvalues-gen-linux-amd64.tar.gz | tar -xzvf- -C /usr/local/bin/ cozyvalues-gen
sudo apt update
sudo apt install curl -y
sudo apt install nodejs -y
sudo apt install npm -y
git clone --branch 2.7.0 --depth 1 https://github.com/bitnami/readme-generator-for-helm.git
cd ./readme-generator-for-helm
npm install
npm install -g @yao-pkg/pkg
pkg . -o /usr/local/bin/readme-generator
- name: Run pre-commit hooks
run: |

View File

@@ -1,7 +1,5 @@
name: Pull Request
env:
REGISTRY: ${{ vars.OCIR_REPO }}
on:
pull_request:
types: [opened, synchronize, reopened]
@@ -32,19 +30,12 @@ jobs:
fetch-depth: 0
fetch-tags: true
- name: Set up Docker config
run: |
if [ -d ~/.docker ]; then
cp -r ~/.docker "${{ runner.temp }}/.docker"
fi
- name: Login to GitHub Container Registry
if: ${{ !github.event.pull_request.head.repo.fork }}
uses: docker/login-action@v3
with:
username: ${{ secrets.OCIR_USER}}
password: ${{ secrets.OCIR_TOKEN }}
registry: iad.ocir.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
registry: ghcr.io
env:
DOCKER_CONFIG: ${{ runner.temp }}/.docker
@@ -261,11 +252,6 @@ jobs:
done
echo "✅ The task completed successfully after $attempt attempts."
- name: Run OpenAPI tests
run: |
cd /tmp/$SANDBOX_NAME
make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME test-openapi
detect_test_matrix:
name: "Detect e2e test matrix"
runs-on: ubuntu-latest
@@ -276,12 +262,12 @@ jobs:
- uses: actions/checkout@v4
- id: set
run: |
apps=$(ls hack/e2e-apps/*.bats | cut -f3 -d/ | cut -f1 -d. | jq -R | jq -cs)
apps=$(find hack/e2e-apps -maxdepth 1 -mindepth 1 -name '*.bats' | \
awk -F/ '{sub(/\..+/, "", $NF); print $NF}' | jq -R . | jq -cs .)
echo "matrix={\"app\":$apps}" >> "$GITHUB_OUTPUT"
test_apps:
strategy:
fail-fast: false
matrix: ${{ fromJson(needs.detect_test_matrix.outputs.matrix) }}
name: Test ${{ matrix.app }}
runs-on: [self-hosted]

View File

@@ -118,7 +118,6 @@ jobs:
git config user.name "cozystack-bot"
git config user.email "217169706+cozystack-bot@users.noreply.github.com"
git remote set-url origin https://cozystack-bot:${GH_PAT}@github.com/${GITHUB_REPOSITORY}
git config --unset-all http.https://github.com/.extraheader || true
git add .
git commit -m "Prepare release ${GITHUB_REF#refs/tags/}" -s || echo "No changes to commit"
git push origin HEAD || true
@@ -149,35 +148,36 @@ jobs:
version: ${{ steps.tag.outputs.tag }} # A
compare-to: ${{ steps.latest_release.outputs.tag }} # B
# Create or reuse draft release
# Create or reuse DRAFT GitHub Release
- name: Create / reuse draft release
if: steps.check_release.outputs.skip == 'false'
id: release
uses: actions/github-script@v7
with:
script: |
const tag = '${{ steps.tag.outputs.tag }}';
const isRc = ${{ steps.tag.outputs.is_rc }};
const releases = await github.rest.repos.listReleases({
const tag = '${{ steps.tag.outputs.tag }}';
const isRc = ${{ steps.tag.outputs.is_rc }};
const outdated = '${{ steps.semver.outputs.comparison-result }}' === '<';
const makeLatest = outdated ? false : 'legacy';
const releases = await github.rest.repos.listReleases({
owner: context.repo.owner,
repo: context.repo.repo
});
let rel = releases.data.find(r => r.tag_name === tag);
let rel = releases.data.find(r => r.tag_name === tag);
if (!rel) {
rel = await github.rest.repos.createRelease({
owner: context.repo.owner,
repo: context.repo.repo,
tag_name: tag,
name: tag,
draft: true,
prerelease: isRc // no make_latest for drafts
tag_name: tag,
name: tag,
draft: true,
prerelease: isRc,
make_latest: makeLatest
});
console.log(`Draft release created for ${tag}`);
} else {
console.log(`Re-using existing release ${tag}`);
}
core.setOutput('upload_url', rel.upload_url);
# Build + upload assets (optional)

2
.gitignore vendored
View File

@@ -77,5 +77,3 @@ fabric.properties
.DS_Store
**/.DS_Store
tmp/

View File

@@ -11,14 +11,14 @@ repos:
- id: run-make-generate
name: Run 'make generate' in all app directories
entry: |
flock -x .git/pre-commit.lock sh -c '
for dir in ./packages/apps/*/ ./packages/extra/*/ ./packages/system/cozystack-api/; do
/bin/bash -c '
for dir in ./packages/apps/*/; do
if [ -d "$dir" ]; then
echo "Running make generate in $dir"
make generate -C "$dir" || exit $?
(cd "$dir" && make generate)
fi
done
git diff --color=always | cat
'
language: system
language: script
files: ^.*$

View File

@@ -30,5 +30,3 @@ This list is sorted in chronological order, based on the submission date.
| [Bootstack](https://bootstack.app/) | @mrkhachaturov | 2024-08-01| At Bootstack, we utilize a Kubernetes operator specifically designed to simplify and streamline cloud infrastructure creation.|
| [gohost](https://gohost.kz/) | @karabass_off | 2024-02-01 | Our company has been working in the market of Kazakhstan for more than 15 years, providing clients with a standard set of services: VPS/VDC, IaaS, shared hosting, etc. Now we are expanding the lineup by introducing Bare Metal Kubenetes cluster under Cozystack management. |
| [Urmanac](https://urmanac.com) | @kingdonb | 2024-12-04 | Urmanac is the future home of a hosting platform for the knowledge base of a community of personal server enthusiasts. We use Cozystack to provide support services for web sites hosted using both conventional deployments and on SpinKube, with WASM. |
| [Hidora](https://hikube.cloud) | @matthieu-robin | 2025-09-17 | Hidora is a Swiss cloud provider delivering managed services and infrastructure solutions through datacenters located in Switzerland, ensuring data sovereignty and reliability. Its sovereign cloud platform, Hikube, is designed to run workloads with high availability across multiple datacenters, providing enterprises with a secure and scalable foundation for their applications based on Cozystack. |
|

View File

@@ -18,12 +18,10 @@ build: build-deps
make -C packages/system/cilium image
make -C packages/system/kubeovn image
make -C packages/system/kubeovn-webhook image
make -C packages/system/kubeovn-plunger image
make -C packages/system/dashboard image
make -C packages/system/metallb image
make -C packages/system/kamaji image
make -C packages/system/bucket image
make -C packages/system/objectstorage-controller image
make -C packages/core/testing image
make -C packages/core/installer image
make manifests

View File

@@ -1,89 +0,0 @@
/*
Copyright 2025.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +kubebuilder:object:root=true
// CozystackResourceDefinition is the Schema for the cozystackresourcedefinitions API
type CozystackResourceDefinition struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec CozystackResourceDefinitionSpec `json:"spec,omitempty"`
}
// +kubebuilder:object:root=true
// CozystackResourceDefinitionList contains a list of CozystackResourceDefinition
type CozystackResourceDefinitionList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []CozystackResourceDefinition `json:"items"`
}
func init() {
SchemeBuilder.Register(&CozystackResourceDefinition{}, &CozystackResourceDefinitionList{})
}
type CozystackResourceDefinitionSpec struct {
// Application configuration
Application CozystackResourceDefinitionApplication `json:"application"`
// Release configuration
Release CozystackResourceDefinitionRelease `json:"release"`
}
type CozystackResourceDefinitionChart struct {
// Name of the Helm chart
Name string `json:"name"`
// Source reference for the Helm chart
SourceRef SourceRef `json:"sourceRef"`
}
type SourceRef struct {
// Kind of the source reference
// +kubebuilder:default:="HelmRepository"
Kind string `json:"kind"`
// Name of the source reference
Name string `json:"name"`
// Namespace of the source reference
// +kubebuilder:default:="cozy-public"
Namespace string `json:"namespace"`
}
type CozystackResourceDefinitionApplication struct {
// Kind of the application, used for UI and API
Kind string `json:"kind"`
// OpenAPI schema for the application, used for API validation
OpenAPISchema string `json:"openAPISchema"`
// Plural name of the application, used for UI and API
Plural string `json:"plural"`
// Singular name of the application, used for UI and API
Singular string `json:"singular"`
}
type CozystackResourceDefinitionRelease struct {
// Helm chart configuration
Chart CozystackResourceDefinitionChart `json:"chart"`
// Labels for the release
Labels map[string]string `json:"labels,omitempty"`
// Prefix for the release name
Prefix string `json:"prefix"`
}

View File

@@ -25,135 +25,6 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CozystackResourceDefinition) DeepCopyInto(out *CozystackResourceDefinition) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CozystackResourceDefinition.
func (in *CozystackResourceDefinition) DeepCopy() *CozystackResourceDefinition {
if in == nil {
return nil
}
out := new(CozystackResourceDefinition)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CozystackResourceDefinition) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CozystackResourceDefinitionApplication) DeepCopyInto(out *CozystackResourceDefinitionApplication) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CozystackResourceDefinitionApplication.
func (in *CozystackResourceDefinitionApplication) DeepCopy() *CozystackResourceDefinitionApplication {
if in == nil {
return nil
}
out := new(CozystackResourceDefinitionApplication)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CozystackResourceDefinitionChart) DeepCopyInto(out *CozystackResourceDefinitionChart) {
*out = *in
out.SourceRef = in.SourceRef
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CozystackResourceDefinitionChart.
func (in *CozystackResourceDefinitionChart) DeepCopy() *CozystackResourceDefinitionChart {
if in == nil {
return nil
}
out := new(CozystackResourceDefinitionChart)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CozystackResourceDefinitionList) DeepCopyInto(out *CozystackResourceDefinitionList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CozystackResourceDefinition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CozystackResourceDefinitionList.
func (in *CozystackResourceDefinitionList) DeepCopy() *CozystackResourceDefinitionList {
if in == nil {
return nil
}
out := new(CozystackResourceDefinitionList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CozystackResourceDefinitionList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CozystackResourceDefinitionRelease) DeepCopyInto(out *CozystackResourceDefinitionRelease) {
*out = *in
out.Chart = in.Chart
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CozystackResourceDefinitionRelease.
func (in *CozystackResourceDefinitionRelease) DeepCopy() *CozystackResourceDefinitionRelease {
if in == nil {
return nil
}
out := new(CozystackResourceDefinitionRelease)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CozystackResourceDefinitionSpec) DeepCopyInto(out *CozystackResourceDefinitionSpec) {
*out = *in
out.Application = in.Application
in.Release.DeepCopyInto(&out.Release)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CozystackResourceDefinitionSpec.
func (in *CozystackResourceDefinitionSpec) DeepCopy() *CozystackResourceDefinitionSpec {
if in == nil {
return nil
}
out := new(CozystackResourceDefinitionSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in Selector) DeepCopyInto(out *Selector) {
{
@@ -175,21 +46,6 @@ func (in Selector) DeepCopy() Selector {
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SourceRef) DeepCopyInto(out *SourceRef) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceRef.
func (in *SourceRef) DeepCopy() *SourceRef {
if in == nil {
return nil
}
out := new(SourceRef)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Workload) DeepCopyInto(out *Workload) {
*out = *in

View File

@@ -206,14 +206,6 @@ func main() {
os.Exit(1)
}
if err = (&controller.CozystackResourceDefinitionReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "CozystackResourceDefinitionReconciler")
os.Exit(1)
}
// +kubebuilder:scaffold:builder
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {

View File

@@ -1,176 +0,0 @@
/*
Copyright 2025.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"crypto/tls"
"flag"
"os"
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
// to ensure that exec-entrypoint and run can make use of them.
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/controller-runtime/pkg/metrics"
"sigs.k8s.io/controller-runtime/pkg/metrics/filters"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"sigs.k8s.io/controller-runtime/pkg/webhook"
"github.com/cozystack/cozystack/internal/controller/kubeovnplunger"
// +kubebuilder:scaffold:imports
)
var (
scheme = runtime.NewScheme()
setupLog = ctrl.Log.WithName("setup")
)
func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
// +kubebuilder:scaffold:scheme
}
func main() {
var metricsAddr string
var enableLeaderElection bool
var probeAddr string
var kubeOVNNamespace string
var ovnCentralName string
var secureMetrics bool
var enableHTTP2 bool
var disableTelemetry bool
var tlsOpts []func(*tls.Config)
flag.StringVar(&metricsAddr, "metrics-bind-address", "0", "The address the metrics endpoint binds to. "+
"Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.")
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
flag.StringVar(&kubeOVNNamespace, "kube-ovn-namespace", "cozy-kubeovn", "Namespace where kube-OVN is deployed.")
flag.StringVar(&ovnCentralName, "ovn-central-name", "ovn-central", "Ovn-central deployment name.")
flag.BoolVar(&enableLeaderElection, "leader-elect", false,
"Enable leader election for controller manager. "+
"Enabling this will ensure there is only one active controller manager.")
flag.BoolVar(&secureMetrics, "metrics-secure", true,
"If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.")
flag.BoolVar(&enableHTTP2, "enable-http2", false,
"If set, HTTP/2 will be enabled for the metrics and webhook servers")
flag.BoolVar(&disableTelemetry, "disable-telemetry", false,
"Disable telemetry collection")
opts := zap.Options{
Development: false,
}
opts.BindFlags(flag.CommandLine)
flag.Parse()
ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
// if the enable-http2 flag is false (the default), http/2 should be disabled
// due to its vulnerabilities. More specifically, disabling http/2 will
// prevent from being vulnerable to the HTTP/2 Stream Cancellation and
// Rapid Reset CVEs. For more information see:
// - https://github.com/advisories/GHSA-qppj-fm5r-hxr3
// - https://github.com/advisories/GHSA-4374-p667-p6c8
disableHTTP2 := func(c *tls.Config) {
setupLog.Info("disabling http/2")
c.NextProtos = []string{"http/1.1"}
}
if !enableHTTP2 {
tlsOpts = append(tlsOpts, disableHTTP2)
}
webhookServer := webhook.NewServer(webhook.Options{
TLSOpts: tlsOpts,
})
// Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server.
// More info:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.1/pkg/metrics/server
// - https://book.kubebuilder.io/reference/metrics.html
metricsServerOptions := metricsserver.Options{
BindAddress: metricsAddr,
SecureServing: secureMetrics,
TLSOpts: tlsOpts,
}
if secureMetrics {
// FilterProvider is used to protect the metrics endpoint with authn/authz.
// These configurations ensure that only authorized users and service accounts
// can access the metrics endpoint. The RBAC are configured in 'config/rbac/kustomization.yaml'. More info:
// https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.1/pkg/metrics/filters#WithAuthenticationAndAuthorization
metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization
// TODO(user): If CertDir, CertName, and KeyName are not specified, controller-runtime will automatically
// generate self-signed certificates for the metrics server. While convenient for development and testing,
// this setup is not recommended for production.
}
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
Metrics: metricsServerOptions,
WebhookServer: webhookServer,
HealthProbeBindAddress: probeAddr,
LeaderElection: enableLeaderElection,
LeaderElectionID: "29a0338b.cozystack.io",
// LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily
// when the Manager ends. This requires the binary to immediately end when the
// Manager is stopped, otherwise, this setting is unsafe. Setting this significantly
// speeds up voluntary leader transitions as the new leader don't have to wait
// LeaseDuration time first.
//
// In the default scaffold provided, the program ends immediately after
// the manager stops, so would be fine to enable this option. However,
// if you are doing or is intended to do any operation such as perform cleanups
// after the manager stops then its usage might be unsafe.
// LeaderElectionReleaseOnCancel: true,
})
if err != nil {
setupLog.Error(err, "unable to create manager")
os.Exit(1)
}
if err = (&kubeovnplunger.KubeOVNPlunger{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Registry: metrics.Registry,
}).SetupWithManager(mgr, kubeOVNNamespace, ovnCentralName); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "KubeOVNPlunger")
os.Exit(1)
}
// +kubebuilder:scaffold:builder
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up health check")
os.Exit(1)
}
if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up ready check")
os.Exit(1)
}
setupLog.Info("starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,20 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0..
-->
## Major Features and Improvements
## Security
## Fixes
## Dependencies
## Documentation
## Development, Testing, and CI/CD
---
**Full Changelog**: **Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.34.0...v0.35.0

View File

@@ -129,7 +129,7 @@ For more information, read the [Cozystack Release Workflow](https://github.com/c
* [platform] Reduce requested CPU and RAM for the `kamaji` provider. (@klinch0 in https://github.com/cozystack/cozystack/pull/825)
* [platform] Improve the reconciliation loop for the Cozystack system HelmReleases logic. (@klinch0 in https://github.com/cozystack/cozystack/pull/809 and https://github.com/cozystack/cozystack/pull/810, @kvaps in https://github.com/cozystack/cozystack/pull/811)
* [platform] Remove extra dependencies for the Piraeus operator. (@klinch0 in https://github.com/cozystack/cozystack/pull/856)
* [platform] Refactor dashboard values. (@kvaps in https://github.com/cozystack/cozystack/pull/928, patched by @lllamnyp in https://github.com/cozystack/cozystack/pull/952)
* [platform] Refactor dashboard values. (@kvaps in https://github.com/cozystack/cozystack/pull/928, patched by @llamnyp in https://github.com/cozystack/cozystack/pull/952)
* [platform] Make FluxCD artifact disabled by default. (@klinch0 in https://github.com/cozystack/cozystack/pull/964)
* [kubernetes] Update garbage collection of HelmReleases in tenant Kubernetes clusters. (@kvaps in https://github.com/cozystack/cozystack/pull/835)
* [kubernetes] Fix merging `valuesOverride` for tenant clusters. (@kvaps in https://github.com/cozystack/cozystack/pull/879)

View File

@@ -1,8 +0,0 @@
## Fixes
* [build] Update Talos Linux v1.10.3 and fix assets. (@kvaps in https://github.com/cozystack/cozystack/pull/1006)
* [ci] Fix uploading released artifacts to GitHub. (@kvaps in https://github.com/cozystack/cozystack/pull/1009)
* [ci] Separate build and testing jobs. (@kvaps in https://github.com/cozystack/cozystack/pull/1005)
* [docs] Write a full release post for v0.31.1. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/999)
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.31.0...v0.31.1

View File

@@ -1,12 +0,0 @@
## Security
* Resolve a security problem that allowed a tenant administrator to gain enhanced privileges outside the tenant. (@kvaps in https://github.com/cozystack/cozystack/pull/1062, backported in https://github.com/cozystack/cozystack/pull/1066)
## Fixes
* [platform] Fix dependencies in `distro-full` bundle. (@klinch0 in https://github.com/cozystack/cozystack/pull/1056, backported in https://github.com/cozystack/cozystack/pull/1064)
* [platform] Fix RBAC for annotating namespaces. (@kvaps in https://github.com/cozystack/cozystack/pull/1031, backported in https://github.com/cozystack/cozystack/pull/1037)
* [platform] Reduce system resource consumption by using smaller resource presets for VerticalPodAutoscaler, SeaweedFS, and KubeOVN. (@klinch0 in https://github.com/cozystack/cozystack/pull/1054, backported in https://github.com/cozystack/cozystack/pull/1058)
* [dashboard] Fix a number of issues in the Cozystack Dashboard (@kvaps in https://github.com/cozystack/cozystack/pull/1042, backported in https://github.com/cozystack/cozystack/pull/1066)
* [apps] Specify minimal working resource presets. (@kvaps in https://github.com/cozystack/cozystack/pull/1040, backported in https://github.com/cozystack/cozystack/pull/1041)
* [apps] Update built-in documentation and configuration reference for managed Clickhouse application. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/1059, backported in https://github.com/cozystack/cozystack/pull/1065)

View File

@@ -1,38 +0,0 @@
## Major Features and Improvements
* [postgres] Introduce new functionality for backup and restore in PostgreSQL. (@klinch0 in https://github.com/cozystack/cozystack/pull/1086)
* [apps] Refactor resources in managed applications. (@kvaps in https://github.com/cozystack/cozystack/pull/1106)
* [system] Make VMAgent's `extraArgs` tunable. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1091)
## Fixes
* [postgres] Escape users and database names. (@kvaps in https://github.com/cozystack/cozystack/pull/1087)
* [tenant] Fix monitoring agents HelmReleases for tenant clusters. (@klinch0 in https://github.com/cozystack/cozystack/pull/1079)
* [kubernetes] Wrap cert-manager CRDs in a conditional. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1076)
* [kubernetes] Remove `useCustomSecretForPatchContainerd` option and enable it by default. (@kvaps in https://github.com/cozystack/cozystack/pull/1104)
* [apps] Increase default resource presets for Clickhouse and Kafka from `nano` to `small`. Update OpenAPI specs and readme's. (@kvaps in https://github.com/cozystack/cozystack/pull/1103 and https://github.com/cozystack/cozystack/pull/1105)
* [linstor] Add configurable DRBD network options for connection and timeout settings, replacing scripted logic for detecting devices that lost connection. (@kvaps in https://github.com/cozystack/cozystack/pull/1094)
## Dependencies
* Update cozy-proxy to v0.2.0 (@kvaps in https://github.com/cozystack/cozystack/pull/1081)
* Update Kafka Operator to 0.45.1-rc1 (@kvaps in https://github.com/cozystack/cozystack/pull/1082 and https://github.com/cozystack/cozystack/pull/1102)
* Update Flux Operator to 0.23.0 (@kingdonb in https://github.com/cozystack/cozystack/pull/1078)
## Documentation
* [docs] Release notes for v0.32.0 and two beta-versions. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/1043)
## Development, Testing, and CI/CD
* [tests] Add Kafka, Redis. (@gwynbleidd2106 in https://github.com/cozystack/cozystack/pull/1077)
* [tests] Increase disk space for VMs in tests. (@kvaps in https://github.com/cozystack/cozystack/pull/1097)
* [tests] Upd Kubernetes v1.33. (@kvaps in https://github.com/cozystack/cozystack/pull/1083)
* [tests] increase postgres timeouts. (@kvaps in https://github.com/cozystack/cozystack/pull/1108)
* [tests] don't wait for postgres ro service. (@kvaps in https://github.com/cozystack/cozystack/pull/1109)
* [ci] Setup systemd timer to tear down sandbox. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1092)
* [ci] Split testing job into several. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1075)
* [ci] Run E2E tests as separate parallel jobs. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1093)
* [ci] Refactor GitHub workflows. (@kvaps in https://github.com/cozystack/cozystack/pull/1107)
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.32.0...v0.32.1

View File

@@ -1,91 +0,0 @@
> [!WARNING]
> A patch release [0.33.2](github.com/cozystack/cozystack/releases/tag/v0.33.2) fixing a regression in 0.33.0 has been released.
> It is recommended to skip this version and upgrade to [0.33.2](github.com/cozystack/cozystack/releases/tag/v0.33.2) instead.
## Feature Highlights
### Unified CPU and Memory Allocation Management
Since version 0.31.0, Cozystack introduced a single-point-of-truth configuration variable `cpu-allocation-ratio`,
making CPU resource requests and limits uniform in Virtual Machines managed by KubeVirt.
The new release 0.33.0 introduces `memory-allocation-ratio` and expands both variables to all managed applications and tenant resource quotas.
Resource presets also respect the allocation ratios and behave in the same way as explicit resource definitions.
The new resource definition format is concise and simple for platform users.
```yaml
# resource definition in the configuration
resources:
cpu: <defined cpu value>
memory: <defined memory value>
```
It results in Kubernetes resource requests and limits, based on defined values and the universal allocation ratios:
```yaml
# actual requests and limits, provided to the application
resources:
limits:
cpu: <defined cpu value>
memory: <defined memory value>
requests:
cpu: <defined cpu value / cpu-allocation-ratio>
memory: <defined memory value / memory-allocation-ratio>
```
When updating from earlier Cozystack versions, resource configuration in managed applications will be automatically migrated to the new format.
### Backing up and Restoring Data in Tenant Kubernetes
One of the main features of the release is backup capability for PVCs in tenant Kubernetes clusters.
It enables platform and tenant administrators to back up and restore data used by services in the tenant clusters.
This new functionality in Cozystack is powered by [Velero](https://velero.io/) and needs an external S3-compatible storage.
## Support for NFS Storage
Cozystack now supports using NFS shared storage with a new optional system module.
See the documentation: https://cozystack.io/docs/operations/storage/nfs/.
## Features and Improvements
* [kubernetes] Enable PVC backups in tenant Kubernetes clusters, powered by [Velero](https://velero.io/). (@klinch0 in https://github.com/cozystack/cozystack/pull/1132)
* [nfs-driver] Enable NFS support by introducing a new optional system module `nfs-driver`. (@kvaps in https://github.com/cozystack/cozystack/pull/1133)
* [virtual-machine] Configure CPU sockets available to VMs with the `resources.cpu.sockets` configuration value. (@klinch0 in https://github.com/cozystack/cozystack/pull/1131)
* [virtual-machine] Add support for using pre-imported "golden image" disks for virtual machines, enabling faster provisioning by referencing existing images instead of downloading via HTTP. (@gwynbleidd2106 in https://github.com/cozystack/cozystack/pull/1112)
* [kubernetes] Add an option to expose the Ingress-NGINX controller in tenant Kubernetes cluster via LoadBalancer. New configuration value `exposeMethod` offers a choice of `Proxied` and `LoadBalancer`. (@kvaps in https://github.com/cozystack/cozystack/pull/1114)
* [apps] When updating from earlier Cozystack versions, automatically migrate to the new resource definition format: from `resources.requests.[cpu,memory]` and `resources.limits.[cpu,memory]` to `resources.[cpu,memory]`. (@kvaps in https://github.com/cozystack/cozystack/pull/1127)
* [apps] Give examples of new resource definitions in the managed app README's. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/1120)
* [tenant] Respect `cpu-allocation-ratio` in tenant's `resourceQuotas`.(@kvaps in https://github.com/cozystack/cozystack/pull/1119)
* [cozy-lib] Introduce helper function to calculate Java heap params based on memory requests and limits. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1157)
## Security
* [monitoring] Disable sign up in Alerta. (@klinch0 in https://github.com/cozystack/cozystack/pull/1129)
## Fixes
* [platform] Always set resources for managed apps . (@lllamnyp in https://github.com/cozystack/cozystack/pull/1156)
* [platform] Remove the memory limit for Keycloak deployment. (@klinch0 in https://github.com/cozystack/cozystack/pull/1122)
* [kubernetes] Fix a condition in the ingress template for tenant Kubernetes. (@kvaps in https://github.com/cozystack/cozystack/pull/1143)
* [kubernetes] Fix a deadlock on reattaching a KubeVirt-CSI volume. (@kvaps in https://github.com/cozystack/cozystack/pull/1135)
* [mysql] MySQL applications with a single replica now correctly create a `LoadBalancer` service. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1113)
* [etcd] Fix resources and headless services in the etcd application. (@kvaps in https://github.com/cozystack/cozystack/pull/1128)
* [apps] Enable selecting `resourcePreset` from a drop-down list for all applications by adding enum of allowed values in the config scheme. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/1117)
* [apps] Refactor resource presets provided to managed apps by `cozy-lib`. (@kvaps in https://github.com/cozystack/cozystack/pull/1155)
* [keycloak] Calculate and pass Java heap parameters explicitly to prevent OOM errors. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1157)
## Development, Testing, and CI/CD
* [dx] Introduce cozyreport tool and gather reports in CI. (@kvaps in https://github.com/cozystack/cozystack/pull/1139)
* [ci] Use Nexus as a pull-through cache for CI. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1124)
* [ci] Save a list of observed images after each workflow run. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1089)
* [ci] Skip Cozystack tests on PRs that only change the docs. Don't restart CI when a PR is labeled. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/1136)
* [dx] Fix Makefile variables for `capi-providers`. (@kvaps in https://github.com/cozystack/cozystack/pull/1115)
* [tests] Introduce self-destructing testing environments. (@kvaps in https://github.com/cozystack/cozystack/pull/1138, https://github.com/cozystack/cozystack/pull/1140, https://github.com/cozystack/cozystack/pull/1141, https://github.com/cozystack/cozystack/pull/1142)
* [e2e] Retry flaky application tests to improve total test time. (@kvaps in https://github.com/cozystack/cozystack/pull/1123)
* [maintenance] Add a PR template. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/1121)
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.32.1...v0.33.0

View File

@@ -1,3 +0,0 @@
## Fixes
* [kubevirt-csi] Fix a regression by updating the role of the CSI controller. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1165)

View File

@@ -1,19 +0,0 @@
## Features and Improvements
* [vm-instance] Enable running [Windows](https://cozystack.io/docs/operations/virtualization/windows/) and [MikroTik RouterOS](https://cozystack.io/docs/operations/virtualization/mikrotik/) in Cozystack. Add `bus` option and always specify `bootOrder` for all disks. (@kvaps in https://github.com/cozystack/cozystack/pull/1168)
* [cozystack-api] Refactor OpenAPI Schema and support reading it from config. (@kvaps in https://github.com/cozystack/cozystack/pull/1173)
* [cozystack-api] Enable using singular resource names in Cozystack API. For example, `kubectl get tenant` is now a valid command, in addition to `kubectl get tenants`. (@kvaps in https://github.com/cozystack/cozystack/pull/1169)
* [postgres] Explain how to back up and restore PostgreSQL using Velero backups. (@klinch0 and @NickVolynkin in https://github.com/cozystack/cozystack/pull/1141)
## Fixes
* [virtual-machine,vm-instance] Adjusted RBAC role to let users read the service associated with the VMs they create. Consequently, users can now see details of the service in the dashboard and therefore read the IP address of the VM. (@klinch0 in https://github.com/cozystack/cozystack/pull/1161)
* [cozystack-api] Fix an error with `resourceVersion` which resulted in message 'failed to update HelmRelease: helmreleases.helm.toolkit.fluxcd.io "xxx" is invalid...'. (@kvaps in https://github.com/cozystack/cozystack/pull/1170)
* [cozystack-api] Fix an error in updating lists in Cozystack objects, which resulted in message "Warning: resource ... is missing the kubectl.kubernetes.io/last-applied-configuration annotation". (@kvaps in https://github.com/cozystack/cozystack/pull/1171)
* [cozystack-api] Disable `startegic-json-patch` support. (@kvaps in https://github.com/cozystack/cozystack/pull/1179)
* [dashboard] Fix the code for removing dashboard comments which used to mistakenly remove shebang from cloudInit scripts. (@kvaps in https://github.com/cozystack/cozystack/pull/1175).
* [virtual-machine] Fix cloudInit and sshKeys processing. (@kvaps in https://github.com/cozystack/cozystack/pull/1175 and https://github.com/cozystack/cozystack/commit/da3ee5d0ea9e87529c8adc4fcccffabe8782292e)
* [applications] Fix a typo in preset resource tables in the built-in documentation of managed applications. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/1172)
* [kubernetes] Enable deleting Velero component from a tenant Kubernetes cluster. (@klinch0 in https://github.com/cozystack/cozystack/pull/1176)
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.33.1...v0.33.2

View File

@@ -1,87 +0,0 @@
Cozystack v0.34.0 is a stable release.
It focuses on cluster reliability, virtualization capabilities, and enhancements to the Cozystack API.
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.34.0
-->
> [!WARNING]
> A regression was found in this release and fixed in patch [0.34.3](https://github.com/cozystack/cozystack/releases/tag/v0.34.3).
> When upgrading Cozystack, it's recommended to skip this version and upgrade directly to [0.34.3](https://github.com/cozystack/cozystack/releases/tag/v0.34.3).
## Major Features and Improvements
* [kubernetes] Enable users to select Kubernetes versions in tenant clusters. Supported versions range from 1.28 to 1.33, updated to the latest patches. (@lllamnyp and @IvanHunters in https://github.com/cozystack/cozystack/pull/1202)
* [kubernetes] Enable PVC snapshot capability in tenant Kubernetes clusters. (@klinch0 in https://github.com/cozystack/cozystack/pull/1203)
* [vpa] Implement autoscaling for the Vertical Pod Autoscaler itself, ensuring that VPA has sufficient resources and reducing the number of configuration parameters that platform administrators have to manage. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1198)
* [vm-instance] Enable running [Windows](https://cozystack.io/docs/operations/virtualization/windows/) and [MikroTik RouterOS](https://cozystack.io/docs/operations/virtualization/mikrotik/) in Cozystack. Add `bus` option and always specify `bootOrder` for all disks. (@kvaps in https://github.com/cozystack/cozystack/pull/1168)
* [cozystack-api] Specify OpenAPI schema for apps. (@kvaps in https://github.com/cozystack/cozystack/pull/1174)
* [cozystack-api] Refactor OpenAPI Schema and support reading it from config. (@kvaps in https://github.com/cozystack/cozystack/pull/1173)
* [cozystack-api] Enable using singular resource names in Cozystack API. For example, `kubectl get tenant` is now a valid command, in addition to `kubectl get tenants`. (@kvaps in https://github.com/cozystack/cozystack/pull/1169)
* [postgres] Explain how to back up and restore PostgreSQL using Velero backups. (@klinch0 and @NickVolynkin in https://github.com/cozystack/cozystack/pull/1141)
* [seaweedfs] Support multi-zone configuration for S3 storage. (@kvaps in https://github.com/cozystack/cozystack/pull/1194)
* [dashboard] Put YAML editor first when deploying and upgrading applications, as a more powerful option. Fix handling multiline strings. (@kvaps in https://github.com/cozystack/cozystack/pull/1227)
## Security
* [seaweedfs] Ensure that JWT signing keys in the SeaweedFS security configuration remain consistent across Helm upgrades. Resolve an upstream issue. (@kvaps in https://github.com/cozystack/cozystack/pull/1193 and https://github.com/seaweedfs/seaweedfs/pull/6967)
## Fixes
* [cozystack-controller] Fix stale workloads not being deleted when marked for deletion. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1210, @kvaps in https://github.com/cozystack/cozystack/pull/1229)
* [cozystack-controller] Improve reliability when updating HelmRelease objects to prevent unintended changes during reconciliation. (@klinch0 in https://github.com/cozystack/cozystack/pull/1205)
* [kubevirt-csi] Fix a regression by updating the role of the CSI controller. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1165)
* [virtual-machine,vm-instance] Adjusted RBAC role to let users read the service associated with the VMs they create. Consequently, users can now see details of the service in the dashboard and therefore read the IP address of the VM. (@klinch0 in https://github.com/cozystack/cozystack/pull/1161)
* [virtual-machine] Fix cloudInit and sshKeys processing. (@kvaps in https://github.com/cozystack/cozystack/pull/1175 and https://github.com/cozystack/cozystack/commit/da3ee5d0ea9e87529c8adc4fcccffabe8782292e)
* [cozystack-api] Fix an error with `resourceVersion` which resulted in message 'failed to update HelmRelease: helmreleases.helm.toolkit.fluxcd.io "xxx" is invalid...'. (@kvaps in https://github.com/cozystack/cozystack/pull/1170)
* [cozystack-api] Fix an error in updating lists in Cozystack objects, which resulted in message "Warning: resource ... is missing the kubectl.kubernetes.io/last-applied-configuration annotation". (@kvaps in https://github.com/cozystack/cozystack/pull/1171)
* [cozystack-api] Disable `strategic-json-patch` support. (@kvaps in https://github.com/cozystack/cozystack/pull/1179)
* [cozystack-api] Fix non-existing OpenAPI references. (@kvaps in https://github.com/cozystack/cozystack/pull/1208)
* [dashboard] Fix the code for removing dashboard comments which used to mistakenly remove shebang from `cloudInit` scripts. (@kvaps in https://github.com/cozystack/cozystack/pull/1175).
* [applications] Reorder configuration values in application README's for better readability. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/1214)
* [applications] Disallow selecting `resourcePreset = none` in the visual editor when deploying and upgrading applications. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/1196)
* [applications] Fix a typo in preset resource tables in the built-in documentation of managed applications. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/1172)
* [kubernetes] Enable deleting Velero component from a tenant Kubernetes cluster. (@klinch0 in https://github.com/cozystack/cozystack/pull/1176)
* [kubernetes] Explicitly mention available K8s versions for tenant clusters in the README. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/1212)
* [oidc] Enable deleting Keycloak service. (@klinch0 in https://github.com/cozystack/cozystack/pull/1178)
* [tenant] Enable deleting extra applications from a tenant. (@klinch0 and @kvaps and in https://github.com/cozystack/cozystack/pull/1162)
* [nats] Fix a typo in the application template. (@klinch0 in https://github.com/cozystack/cozystack/pull/1195)
* [postgres] Resolve an issue with the visibility of PostgreSQL load balancer on the dashboard. (@klinch0 https://github.com/cozystack/cozystack/pull/1204)
* [objectstorage] Update COSI controller and sidecar, including fixes from upstream. (@kvaps in https://github.com/cozystack/cozystack/pull/1209, https://github.com/kubernetes-sigs/container-object-storage-interface/pull/89, and https://github.com/kubernetes-sigs/container-object-storage-interface/pull/90)
## Dependencies
* Update FerretDB from v1 to v2.4.0.<br>**Breaking change:** before upgrading FerretDB instances, back up and restore the data following the [migration guide](https://docs.ferretdb.io/migration/migrating-from-v1/). (@kvaps in https://github.com/cozystack/cozystack/pull/1206)
* Update Talos Linux to v1.10.5. (@kvaps in https://github.com/cozystack/cozystack/pull/1186)
* Update LINSTOR to v1.31.2. (@kvaps in https://github.com/cozystack/cozystack/pull/1180)
* Update KubeVirt to v1.5.2. (@kvaps in https://github.com/cozystack/cozystack/pull/1183)
* Update CDI to v1.62.0. (@kvaps in https://github.com/cozystack/cozystack/pull/1183)
* Update Flux Operator to 0.24.0. (@kingdonb in https://github.com/cozystack/cozystack/pull/1167)
* Update Kamaji to edge-25.7.1. (@kvaps in https://github.com/cozystack/cozystack/pull/1184)
* Update Kube-OVN to v1.13.14. (@kvaps in https://github.com/cozystack/cozystack/pull/1182)
* Update Cilium to v1.17.5. (@kvaps in https://github.com/cozystack/cozystack/pull/1181)
* Update MariaDB Operator to v0.38.1. (@kvaps in https://github.com/cozystack/cozystack/pull/1188)
* Update SeaweedFS to v3.94. (@kvaps in https://github.com/cozystack/cozystack/pull/1194)
## Documentation
* [Updated Cozystack Roadmap and Backlog for 2024-2026](https://cozystack.io/docs/roadmap/). (@tym83 and @kvapsova in https://github.com/cozystack/website/pull/249)
* [Running Windows VMs](https://cozystack.io/docs/operations/virtualization/windows/). (@kvaps and @NickVolynkin in https://github.com/cozystack/website/pull/246)
* [Running MikroTik RouterOS VMs](https://cozystack.io/docs/operations/virtualization/mikrotik/). (@kvaps and @NickVolynkin in https://github.com/cozystack/website/pull/247)
* [Public-network Kubernetes Deployment](https://cozystack.io/docs/operations/faq/#public-network-kubernetes-deployment). (@klinch0 and @NickVolynkin in https://github.com/cozystack/website/pull/242)
* [How to allocate space on system disk for user storage](https://cozystack.io/docs/operations/faq/#how-to-allocate-space-on-system-disk-for-user-storage). (@klinch0 and @NickVolynkin in https://github.com/cozystack/website/pull/242)
* [Resource Management in Cozystack](https://cozystack.io/docs/guides/resource-management/). (@NickVolynkin in https://github.com/cozystack/website/pull/233)
* [Key Concepts of Cozystack](https://cozystack.io/docs/guides/concepts/). (@NickVolynkin in https://github.com/cozystack/website/pull/254)
* [Cozystack Architecture and Platform Stack](https://cozystack.io/docs/guides/platform-stack/). (@NickVolynkin in https://github.com/cozystack/website/pull/252)
* Fixed a parameter in Kubespan: `cluster.discovery.enabled = true`. (@lb0o in https://github.com/cozystack/website/pull/241)
* Updated the Linux Foundation trademark text on the Cozystack website. (@krook in https://github.com/cozystack/website/pull/251)
* Auto-update the managed applications reference pages. (@NickVolynkin in https://github.com/cozystack/website/pull/243 and https://github.com/cozystack/website/pull/245)
## Development, Testing, and CI/CD
* [ci] Improve workflow for contributors submitting PRs from forks. Use Oracle Cloud Infrastructure Registry for non-release PRs, bypassing restrictions preventing pushing to ghcr.io with default GitHub token. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1226)
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.33.0...v0.34.0

View File

@@ -1,15 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.34.1
-->
> [!WARNING]
> A regression was found in this release and fixed in patch [0.34.3](https://github.com/cozystack/cozystack/releases/tag/v0.34.3).
> When upgrading Cozystack, it's recommended to skip this version and upgrade directly to [0.34.3](https://github.com/cozystack/cozystack/releases/tag/v0.34.3).
## Fixes
* [kubernetes] Fix regression in `volumesnapshotclass` installation from https://github.com/cozystack/cozystack/pull/1203. (@kvaps in https://github.com/cozystack/cozystack/pull/1238)
* [objectstorage] Fix building objectstorage images. (@kvaps in https://github.com/cozystack/cozystack/commit/a9e9dfca1fadde1bf2b4e100753e0731bbcfe923)
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.34.0...v0.34.1

View File

@@ -1,14 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.34.2
-->
> [!WARNING]
> A regression was found in this release and fixed in patch [0.34.3](https://github.com/cozystack/cozystack/releases/tag/v0.34.3).
> When upgrading Cozystack, it's recommended to skip this version and upgrade directly to [0.34.3](https://github.com/cozystack/cozystack/releases/tag/v0.34.3).
## Fixes
* [objectstorage] Fix recording image in objectstorage. (@kvaps in https://github.com/cozystack/cozystack/commit/4d9a8389d6bc7e86d63dd976ec853b374a91a637)
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.34.1...v0.34.2

View File

@@ -1,13 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.34.3
-->
## Fixes
* [tenant] Fix tenant network policy to allow traffic to additional tenant-related services across namespace hierarchies. (@klinch0 in https://github.com/cozystack/cozystack/pull/1232, backported in https://github.com/cozystack/cozystack/pull/1272)
* [kubernetes] Add dependency for snapshot CRD and migration to latest version. (@kvaps in https://github.com/cozystack/cozystack/pull/1275, backported in https://github.com/cozystack/cozystack/pull/1279)
* [seaweedfs] Add support for whitelisting and exporting via nginx-ingress. Update cosi-driver. (@kvaps in https://github.com/cozystack/cozystack/pull/1277)
* [kubevirt] Fix building Kubevirt CCM (@kvaps in 3c7e256906e1dbb0f957dc3a205fa77a147d419d)
* [virtual-machine] Fix a regression with field `optional=true`. (@kvaps in https://github.com/cozystack/cozystack/commit/01053f7c3180d1bd045d7c5fb949984c2bdaf19d)
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.34.2...v0.34.3

View File

@@ -1,21 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.34.4
-->
## Security
* [keycloak] Store administrative passwords in the management cluster's secrets. (@IvanHunters in https://github.com/cozystack/cozystack/pull/1286)
* [keycloak] Update Keycloak client redirect URI to use HTTPS instead of HTTP. Enable `cookie-secure`. (@klinch0 in https://github.com/cozystack/cozystack/pull/1287, backported in https://github.com/cozystack/cozystack/pull/1291)
## Fixes
* [kubernetes] Resolve problems with pod names exceeding allowed length by shortening the name of volume snapshot CRD from `*-volumesnapshot-crd-for-tenant-k8s` to `*-vsnap-crd`. To apply this change, update each affected tenant Kubernetes cluster after updating Cozystack. (@klinch0 in https://github.com/cozystack/cozystack/pull/1284)
* [cozystack-api] Show correct `kind` values of `ApplicationList`. (@kvaps in https://github.com/cozystack/cozystack/pull/1290, backported in https://github.com/cozystack/cozystack/pull/1293)
## Development, Testing, and CI/CD
* [tests] Add tests for S3 buckets. (@IvanHunters in https://github.com/cozystack/cozystack/pull/1283, backported in https://github.com/cozystack/cozystack/pull/1292)
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.34.3...v0.34.4

View File

@@ -1,11 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.34.5
-->
## Fixes
* [virtual-machine] Enable using custom `instanceType` values in `virtual-machine` and `vm-instance` by disabling field validation. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1300, backported in https://github.com/cozystack/cozystack/pull/1303)
* [kubernetes] Disable VPA for VPA in tenant Kubernetes clusters. Tenant clusters have no need for this feature, and it was not designed to work in a tenant cluster, but was enabled by mistake. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1301, backported in https://github.com/cozystack/cozystack/pull/1305)
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.34.4...v0.34.5

View File

@@ -1,9 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.34.6
-->
## Fixes
* [dashboard] Fix filling multiline values in the visual editor. (@kvaps in https://github.com/cozystack/cozystack/commit/56fca9bd75efeca25f9483f6c514b6fec26d5d22 and https://github.com/cozystack/kubeapps/commit/4926bc68fabb0914afab574006643c85a597b371)
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.34.5...v0.34.6

View File

@@ -1,11 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.34.7
-->
## Fixes
* [seaweedfs] Disable proxy buffering and proxy request buffering for ingress. (@kvaps in https://github.com/cozystack/cozystack/pull/1330, backported in https://github.com/cozystack/cozystack/commit/96d462e911d4458704b596533d3f10e4b5e80862)
* [linstor] Update LINSTOR monitoring configuration to use label `controller_node` instead of `node`. (@kvaps in https://github.com/cozystack/cozystack/pull/1326, backported in https://github.com/cozystack/cozystack/pull/1327)
* [kubernetes] Disable VPA for VPA in tenant Kubernetes clusters, patched a fix from v0.34.5. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1318, backported in https://github.com/cozystack/cozystack/pull/1319)
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.34.6...v0.34.7

View File

@@ -1,11 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.34.8
-->
## Fixes
* [etcd] Fix `topologySpreadConstraints`. (@klinch0 in https://github.com/cozystack/cozystack/pull/1331, backported in https://github.com/cozystack/cozystack/pull/1332)
* [linstor] Update LINSTOR monitoring configuration: switch labels on `linstor-satellite` and `linstor-controller`. (@kvaps in https://github.com/cozystack/cozystack/pull/1335, backported in https://github.com/cozystack/cozystack/pull/1336)
* [kamaji] Fix broken migration jobs originating from missing environment variables in the in-tree build. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1338, backported in https://github.com/cozystack/cozystack/pull/1340)
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.34.7...v0.34.8

View File

@@ -1,138 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.35.0
-->
## Feature Highlights
### External Application Sources in Cozystack
Cozystack now supports adding external application packages to the platform's application catalog.
Platform administrators can include custom or third-party applications alongside built-in ones, using the Cozystack API.
Adding an application requires making an application package, similar to the ones included in Cozystack
under [`packages/apps`](https://github.com/cozystack/cozystack/tree/main/packages/apps).
Using external packages is enabled by a new CustomResourceDefinition (CRD) called `CozystackResourceDefinition` and
a corresponding controller (reconciler) that watches for these resources.
Add your own managed application using the [documentation](https://cozystack.io/docs/applications/external/)
and an example at [github.com/cozystack/external-apps-example](https://github.com/cozystack/external-apps-example).
<!--
* [platform] Enable using external application packages by adding a `CozystackResourceDefinition` reconciler. Read the documentation on [adding external applications to Cozystack](https://cozystack.io/docs/applications/external/) to learn more. (@klinch0 in https://github.com/cozystack/cozystack/pull/1313)
* [cozystack-api] Provide an API for administrators to define custom managed applications alongside existing managed apps. (@klinch in https://github.com/cozystack/cozystack/pull/1230)
-->
### Cozystack API Improvements
This release brings significant improvements to the OpenAPI specs for all managed applications in Cozystack,
including databases, tenant Kubernetes, virtual machines, monitoring, and others.
These changes include more precise type definitions for fields that were previously defined only as generic objects,
and many fields now have value constraints.
Now many possible misconfigurations are detected immediately upon API request, and not later, with a failed deployment.
The Cozystack API now also displays default values for the application resources.
Most other fields now have sane default values when such values are possible.
All these changes pave the road for the new Cozystack UI, which is currently under development.
### Hetzner RobotLB Support
MetalLB, the default load balancer included in Cozystack, is built for bare metal and self-hosted VMs,
but is not supported on most cloud providers.
For example, Hetzner provides its own RobotLB service, which Cozystack now supports as an optional component.
Read the updated guide on [deploying Cozystack on Hetzner.com](https://cozystack.io/docs/install/providers/hetzner/)
to learn more and deploy your own Cozystack cluster on Hetzner.
### S3 Service: Dedicated Clusters and Monitoring
You can now deploy dedicated Cozystack clusters to run the S3 service, powered by SeaweedFS.
Thanks to the support for [integration with remote filer endpoints](https://cozystack.io/docs/operations/stretched/seaweedfs-multidc/),
you can connect your primary Cozystack cluster to use S3 storage in a dedicated cluster.
For security, platform administrators can now configure the SeaweedFS application with
a list of IP addresses or CIDR ranges that are allowed to access the filer service.
SeaweedFS has also been integrated into the monitoring stack and now has its own Grafana dashboard.
Together, these enhancements help Cozystack users build a more reliable, scalable, and observable S3 service.
### ClickHouse Keeper
The ClickHouse application now includes a ClickHouse Keeper service to improve cluster reliability and availability.
This component is deployed by default with every ClickHouse cluster.
Learn more in the [ClickHouse configuration reference](https://cozystack.io/docs/applications/clickhouse/#clickhouse-keeper-parameters).
## Major Features and Improvements
* [platform] Enable using external application packages by adding a `CozystackResourceDefinition` reconciler. Read the documentation on [adding external applications to Cozystack](https://cozystack.io/docs/applications/external/) to learn more. (@klinch0 in https://github.com/cozystack/cozystack/pull/1313)
* [cozystack-api, apps] Add default values, clear type definitions, value constraints and other improvements to the OpenAPI specs and READMEs by migrating to [cozyvalue-gen](https://github.com/cozystack/cozyvalues-gen). (@kvaps and @NickVolynkin in https://github.com/cozystack/cozystack/pull/1216, https://github.com/cozystack/cozystack/pull/1314, https://github.com/cozystack/cozystack/pull/1316, https://github.com/cozystack/cozystack/pull/1321, and https://github.com/cozystack/cozystack/pull/1333)
* [cozystack-api] Show default values from the OpenAPI spec in the application resources. (@kvaps in https://github.com/cozystack/cozystack/pull/1241)
* [cozystack-api] Provide an API for administrators to define custom managed applications alongside existing managed apps. (@klinch in https://github.com/cozystack/cozystack/pull/1230)
* [robotlb] Introduce the Hetzner RobotLB balancer. (@IvanHunters and @gwynbleidd2106 in https://github.com/cozystack/cozystack/pull/1233)
* [platform, robotlb] Autodetect if node ports should be assigned to load balancer services. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1271)
* [seaweedfs] Enable [integration with remote filer endpoints](https://cozystack.io/docs/operations/stretched/seaweedfs-multidc/) by adding new `Client` topology. (@kvaps in https://github.com/cozystack/cozystack/pull/1239)
* [seaweedfs] Add support for whitelisting and exporting via nginx-ingress. Update cosi-driver. (@kvaps in https://github.com/cozystack/cozystack/pull/1277)
* [monitoring, seaweedfs] Add monitoring and Grafana dashboard for SeaweedFS. (@IvanHunters in https://github.com/cozystack/cozystack/pull/1285)
* [clickhouse] Add the ClickHouse Keeper component. (@klinch0 in https://github.com/cozystack/cozystack/pull/1298 and https://github.com/cozystack/cozystack/pull/1320)
## Security
* [keycloak] Store administrative passwords in the management cluster's secrets. (@IvanHunters in https://github.com/cozystack/cozystack/pull/1286)
* [keycloak] Update Keycloak client redirect URI to use HTTPS instead of HTTP. Enable `cookie-secure`. (@klinch0 in https://github.com/cozystack/cozystack/pull/1287)
## Fixes
* [platform] Introduce a fixed 2-second delay at the start of reconciliation for system and tenant Helm operations. (@klinch0 in https://github.com/cozystack/cozystack/pull/1343)
* [kubernetes] Add dependency for snapshot CRD and migration to the latest version. (@kvaps in https://github.com/cozystack/cozystack/pull/1275)
* [kubernetes] Fix regression in `volumesnapshotclass` installation from https://github.com/cozystack/cozystack/pull/1203. (@kvaps in https://github.com/cozystack/cozystack/pull/1238)
* [kubernetes] Resolve problems with pod names exceeding allowed length by shortening the name of volume snapshot CRD from `*-volumesnapshot-crd-for-tenant-k8s` to `*-vsnap-crd`. To apply this change, update each affected tenant Kubernetes cluster after updating Cozystack. (@klinch0 in https://github.com/cozystack/cozystack/pull/1284)
* [kubernetes] Disable VPA for VPA in tenant Kubernetes clusters. Tenant clusters have no need for this feature, and it was not designed to work in a tenant cluster, but was enabled by mistake. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1301 and https://github.com/cozystack/cozystack/pull/1318)
* [kamaji] Fix broken migration jobs originating from missing environment variables in the in-tree build. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1338)
* [etcd] Fix the `topologySpreadConstraints` for etcd. (@klinch0 in https://github.com/cozystack/cozystack/pull/1331)
* [tenant] Fix tenant network policy to allow traffic to additional tenant-related services across namespace hierarchies. (@klinch0 in https://github.com/cozystack/cozystack/pull/1232)
* [tenant, monitoring] Improve the reliability of tenant monitoring by increasing the timeout and number of retries. (@IvanHunters in https://github.com/cozystack/cozystack/pull/1294)
* [kubevirt] Fix building KubeVirt CCM image. (@kvaps in https://github.com/cozystack/cozystack/commit/3c7e256906e1dbb0f957dc3a205fa77a147d419d)
* [virtual-machine] Fix a regression with `optional=true` field. (@kvaps in https://github.com/cozystack/cozystack/commit/01053f7c3180d1bd045d7c5fb949984c2bdaf19d)
* [virtual-machine] Enable using custom `instanceType` values in `virtual-machine` and `vm-instance` by disabling field validation. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1300, backported in https://github.com/cozystack/cozystack/pull/1303)
* [cozystack-api] Show correct `kind` values of `ApplicationList`. (@kvaps in https://github.com/cozystack/cozystack/pull/1290)
* [cozystack-api] Add missing roles to allow cozystack-controller to read Kubernetes deployments. (@klinch0 in https://github.com/cozystack/cozystack/pull/1342)
* [linstor] Update LINSTOR monitoring configuration to use label `controller_node` instead of `node`. (@kvaps in https://github.com/cozystack/cozystack/pull/1326 and https://github.com/cozystack/cozystack/pull/1335)
* [seaweedfs] Fix SeaweedFS volume configuration. Increase the volume size limit from 100MB to 30,000MB. (@kvaps in https://github.com/cozystack/cozystack/pull/1328)
* [seaweedfs] Disable proxy buffering and proxy request buffering for ingress. (@kvaps in https://github.com/cozystack/cozystack/pull/1330)
## Dependencies
* Update flux-operator to 0.28.0. (@kingdonb in https://github.com/cozystack/cozystack/pull/1315 and https://github.com/cozystack/cozystack/pull/1344)
## Documentation
* [Reimplement Cozystack Roadmap as a GitHub project](https://github.com/orgs/cozystack/projects/1). (@cozystack team)
* [SeaweedFS Multi-DC Configuration](https://cozystack.io/docs/operations/stretched/seaweedfs-multidc/). (@kvaps and @NickVolynkin in https://github.com/cozystack/website/pull/272)
* [Troubleshooting Kube-OVN](https://cozystack.io/docs/operations/troubleshooting/#kube-ovn-crash). (@kvaps and @NickVolynkin in https://github.com/cozystack/website/pull/273)
* [Removing failed nodes from Cozystack cluster](https://cozystack.io/docs/operations/troubleshooting/#remove-a-failed-node-from-the-cluster). (@kvaps and @NickVolynkin in https://github.com/cozystack/website/pull/273)
* [Installing Talos with `kexec`](https://cozystack.io/docs/talos/install/kexec/). (@kvaps and @NickVolynkin in https://github.com/cozystack/website/pull/268)
* [Rewrite Cozystack tutorial](https://cozystack.io/docs/getting-started/). (@NickVolynkin in https://github.com/cozystack/website/pull/262 and https://github.com/cozystack/website/pull/268)
* [How to install Cozystack in Hetzner](https://cozystack.io/docs/install/providers/hetzner/). (@NickVolynkin and @IvanHunters in https://github.com/cozystack/website/pull/280)
* [Adding External Applications to Cozystack Catalog](https://cozystack.io/docs/applications/external/). (@klinch0 and @NickVolynkin in https://github.com/cozystack/website/pull/283)
* [Creating and Using Named VM Images (Golden Images)](https://cozystack.io/docs/virtualization/vm-image/) (@NickVolynkin and @kvaps in https://github.com/cozystack/website/pull/276)
* [Creating Encrypted Storage on LINSTOR](https://cozystack.io/docs/operations/storage/disk-encryption/). (@kvaps and @NickVolynkin in https://github.com/cozystack/website/pull/282)
* [Adding and removing components on Cozystack installation using `bundle-enable` and `bundle-disable`](https://cozystack.io/docs/operations/bundles/#how-to-enable-and-disable-bundle-components) (@NickVolynkin in https://github.com/cozystack/website/pull/281)
* Restructure Cozystack documentation. Bring [managed Kubernetes](https://cozystack.io/docs/kubernetes/), [managed applications](https://cozystack.io/docs/applications/), [virtualization](https://cozystack.io/docs/virtualization/), and [networking](https://cozystack.io/docs/networking/) guides to the top level. (@NickVolynkin in https://github.com/cozystack/website/pull/266)
## Development, Testing, and CI/CD
* [tests] Add tests for S3 buckets. (@IvanHunters in https://github.com/cozystack/cozystack/pull/1283)
* [tests, ci] Simplify test discovery logic; run two k8s tests as separate jobs; delete Clickhouse application after a successful test. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1236)
* [dx] When running `make` commands with `BUILDER` value specified, `PLATFORM` is optional. (@kvaps in https://github.com/cozystack/cozystack/pull/1288)
* [tests] Fix resource specification in virtual machine tests. (@IvanHunters in https://github.com/cozystack/cozystack/pull/1308)
* [tests] Increase available space for e2e tests. (@kvaps in https://github.com/cozystack/cozystack/commit/168a24ffdf1202b3bf2e7d2b5ef54b72b7403baf)
* [tests, ci] Continue application tests after one of them fails. (@NickVolynkin in https://github.com/cozystack/cozystack/commit/634b77edad6c32c101f3e5daea6a5ffc0c83d904)
* [ci] Use a subdomain of aenix.org for Nexus service in CI. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1322)
---
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.34.0...v0.35.0

View File

@@ -1,10 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.35.1
-->
## Fixes
* [cozy-lib] Fix malformed retrieval of `cozyConfig` in the cozy-lib template. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1348)
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.35.0...v0.35.1

View File

@@ -1,22 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.35.2
-->
## Features and Improvements
* [talos] Add LLDPD (`ghcr.io/siderolabs/lldpd`) as a built-in system extension, enabling LLDP-based neighbor discovery out of the box. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1351 and https://github.com/cozystack/cozystack/pull/1360)
## Fixes
* [cozystack-api] Sanitize the OpenAPI v2 schema. (@kvaps in https://github.com/cozystack/cozystack/pull/1353)
* [seaweedfs] Fix a problem where S3 gateway would be moved to an external pod, resulting in authentication failure. (@kvaps in https://github.com/cozystack/cozystack/pull/1361)
## Dependencies
* Update LINSTOR to v1.31.3. (@kvaps in https://github.com/cozystack/cozystack/pull/1358)
* Update SeaweedFS to v3.96. (@kvaps in https://github.com/cozystack/cozystack/pull/1361)
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.35.1...v0.35.2

3
go.mod
View File

@@ -59,7 +59,6 @@ require (
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
github.com/imdario/mergo v0.3.6 // indirect
@@ -67,11 +66,9 @@ require (
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/moby/spdystream v0.4.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_golang v1.19.1 // indirect

6
go.sum
View File

@@ -2,8 +2,6 @@ github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cq
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@@ -117,8 +115,6 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8=
github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -126,8 +122,6 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=
github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=

View File

@@ -48,7 +48,7 @@ kubectl get ns --no-headers | awk '$2 != "Active"' |
echo "Collecting helmreleases..."
kubectl get hr -A > $REPORT_DIR/kubernetes/helmreleases.txt 2>&1
kubectl get hr -A --no-headers | awk '$4 != "True"' | \
kubectl get hr -A | awk '$4 != "True"' | \
while read NAMESPACE NAME _; do
DIR=$REPORT_DIR/kubernetes/helmreleases/$NAMESPACE/$NAME
mkdir -p $DIR
@@ -105,7 +105,7 @@ kubectl get svc -A --no-headers | awk '$4 == "<pending>"' |
echo "Collecting pvcs..."
kubectl get pvc -A > $REPORT_DIR/kubernetes/pvcs.txt 2>&1
kubectl get pvc -A --no-headers | awk '$3 != "Bound"' |
kubectl get pvc -A | awk '$3 != "Bound"' |
while read NAMESPACE NAME _; do
DIR=$REPORT_DIR/kubernetes/pvc/$NAMESPACE/$NAME
mkdir -p $DIR

View File

@@ -24,7 +24,7 @@ run_one() {
echo "╭ » Run test: $title"
START=$(date +%s)
skip_next="+ $fn"
skip_next="+ $fn" # первую строку трассировки с именем функции пропустим
{
(
@@ -83,11 +83,11 @@ awk '
}
printf("### %s\n", title)
printf("%s() {\n", fname)
print " set -e"
print " set -e" # ошибка → падение теста
next
}
/^}$/ {
print " return 0"
print " return 0" # если автор не сделал exit 1 — тест ОК
print "}"
next
}

View File

@@ -81,7 +81,6 @@ modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//main/capacity-p
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//flux/flux-control-plane.json
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//flux/flux-stats.json
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//kafka/strimzi-kafka.json
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//seaweedfs/seaweedfs.json
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//goldpinger/goldpinger.json
EOT

View File

@@ -1,47 +0,0 @@
#!/usr/bin/env bats
@test "Create and Verify Seeweedfs Bucket" {
# Create the bucket resource
name='test'
kubectl apply -f - <<EOF
apiVersion: apps.cozystack.io/v1alpha1
kind: Bucket
metadata:
name: ${name}
namespace: tenant-test
spec: {}
EOF
# Wait for the bucket to be ready
kubectl -n tenant-test wait hr bucket-${name} --timeout=100s --for=condition=ready
kubectl -n tenant-test wait bucketclaims.objectstorage.k8s.io bucket-${name} --timeout=300s --for=jsonpath='{.status.bucketReady}'
kubectl -n tenant-test wait bucketaccesses.objectstorage.k8s.io bucket-${name} --timeout=300s --for=jsonpath='{.status.accessGranted}'
# Get and decode credentials
kubectl -n tenant-test get secret bucket-${name} -ojsonpath='{.data.BucketInfo}' | base64 -d > bucket-test-credentials.json
# Get credentials from the secret
ACCESS_KEY=$(jq -r '.spec.secretS3.accessKeyID' bucket-test-credentials.json)
SECRET_KEY=$(jq -r '.spec.secretS3.accessSecretKey' bucket-test-credentials.json)
BUCKET_NAME=$(jq -r '.spec.bucketName' bucket-test-credentials.json)
# Start port-forwarding
bash -c 'timeout 100s kubectl port-forward service/seaweedfs-s3 -n tenant-root 8333:8333 > /dev/null 2>&1 &'
# Wait for port-forward to be ready
timeout 30 sh -ec 'until nc -z localhost 8333; do sleep 1; done'
# Set up MinIO alias with error handling
mc alias set local https://localhost:8333 $ACCESS_KEY $SECRET_KEY --insecure
# Upload file to bucket
mc cp bucket-test-credentials.json $BUCKET_NAME/bucket-test-credentials.json
# Verify file was uploaded
mc ls $BUCKET_NAME/bucket-test-credentials.json
# Clean up uploaded file
mc rm $BUCKET_NAME/bucket-test-credentials.json
kubectl -n tenant-test delete bucket.apps.cozystack.io ${name}
}

View File

@@ -27,10 +27,6 @@ spec:
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
clickhouseKeeper:
enabled: true
resourcesPreset: "micro"
size: "1Gi"
resources: {}
resourcesPreset: "nano"
EOF
@@ -42,5 +38,4 @@ EOF
timeout 100 sh -ec "until kubectl -n tenant-test get svc chi-clickhouse-$name-clickhouse-0-0 -o jsonpath='{.spec.ports[*].port}' | grep -q '9000 8123 9009'; do sleep 10; done"
timeout 80 sh -ec "until kubectl -n tenant-test get sts chi-clickhouse-$name-clickhouse-0-1 ; do sleep 10; done"
kubectl -n tenant-test wait statefulset.apps/chi-clickhouse-$name-clickhouse-0-1 --timeout=140s --for=jsonpath='{.status.replicas}'=1
kubectl -n tenant-test delete clickhouse $name
}

View File

@@ -1,6 +0,0 @@
#!/usr/bin/env bats
@test "Create a tenant Kubernetes control plane with latest version" {
. hack/e2e-apps/run-kubernetes.sh
run_kubernetes_test 'keys | sort_by(.) | .[-1]' 'test-latest-version' '59991'
}

View File

@@ -1,6 +0,0 @@
#!/usr/bin/env bats
@test "Create a tenant Kubernetes control plane with previous version" {
. hack/e2e-apps/run-kubernetes.sh
run_kubernetes_test 'keys | sort_by(.) | .[-2]' 'test-previous-version' '59992'
}

View File

@@ -0,0 +1,72 @@
#!/usr/bin/env bats
@test "Create a tenant Kubernetes control plane" {
kubectl apply -f - <<EOF
apiVersion: apps.cozystack.io/v1alpha1
kind: Kubernetes
metadata:
name: test
namespace: tenant-test
spec:
addons:
certManager:
enabled: false
valuesOverride: {}
cilium:
valuesOverride: {}
fluxcd:
enabled: false
valuesOverride: {}
gatewayAPI:
enabled: false
gpuOperator:
enabled: false
valuesOverride: {}
ingressNginx:
enabled: true
hosts: []
valuesOverride: {}
monitoringAgents:
enabled: false
valuesOverride: {}
verticalPodAutoscaler:
valuesOverride: {}
controlPlane:
apiServer:
resources: {}
resourcesPreset: small
controllerManager:
resources: {}
resourcesPreset: micro
konnectivity:
server:
resources: {}
resourcesPreset: micro
replicas: 2
scheduler:
resources: {}
resourcesPreset: micro
host: ""
nodeGroups:
md0:
ephemeralStorage: 20Gi
gpus: []
instanceType: u1.medium
maxReplicas: 10
minReplicas: 0
resources:
cpu: ""
memory: ""
roles:
- ingress-nginx
storageClass: replicated
EOF
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
timeout 10 sh -ec 'until kubectl get kamajicontrolplane -n tenant-test kubernetes-test; do sleep 1; done'
kubectl wait --for=condition=TenantControlPlaneCreated kamajicontrolplane -n tenant-test kubernetes-test --timeout=4m
kubectl wait tcp -n tenant-test kubernetes-test --timeout=2m --for=jsonpath='{.status.kubernetesResources.version.status}'=Ready
kubectl wait deploy --timeout=4m --for=condition=available -n tenant-test kubernetes-test kubernetes-test-cluster-autoscaler kubernetes-test-kccm kubernetes-test-kcsi-controller
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=1m --for=jsonpath='{.status.replicas}'=2
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=10m --for=jsonpath='{.status.v1beta2.readyReplicas}'=2
kubectl -n tenant-test delete kuberneteses.apps.cozystack.io test
}

View File

@@ -1,101 +0,0 @@
run_kubernetes_test() {
local version_expr="$1"
local test_name="$2"
local port="$3"
local k8s_version=$(yq "$version_expr" packages/apps/kubernetes/files/versions.yaml)
kubectl apply -f - <<EOF
apiVersion: apps.cozystack.io/v1alpha1
kind: Kubernetes
metadata:
name: "${test_name}"
namespace: tenant-test
spec:
addons:
certManager:
enabled: false
valuesOverride: {}
cilium:
valuesOverride: {}
fluxcd:
enabled: false
valuesOverride: {}
gatewayAPI:
enabled: false
gpuOperator:
enabled: false
valuesOverride: {}
ingressNginx:
enabled: true
hosts: []
valuesOverride: {}
monitoringAgents:
enabled: false
valuesOverride: {}
verticalPodAutoscaler:
valuesOverride: {}
controlPlane:
apiServer:
resources: {}
resourcesPreset: small
controllerManager:
resources: {}
resourcesPreset: micro
konnectivity:
server:
resources: {}
resourcesPreset: micro
replicas: 2
scheduler:
resources: {}
resourcesPreset: micro
host: ""
nodeGroups:
md0:
ephemeralStorage: 20Gi
gpus: []
instanceType: u1.medium
maxReplicas: 10
minReplicas: 0
roles:
- ingress-nginx
storageClass: replicated
version: "${k8s_version}"
EOF
# Wait for the tenant-test namespace to be active
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
# Wait for the Kamaji control plane to be created (retry for up to 10 seconds)
timeout 10 sh -ec 'until kubectl get kamajicontrolplane -n tenant-test kubernetes-'"${test_name}"'; do sleep 1; done'
# Wait for the tenant control plane to be fully created (timeout after 4 minutes)
kubectl wait --for=condition=TenantControlPlaneCreated kamajicontrolplane -n tenant-test kubernetes-${test_name} --timeout=4m
# Wait for Kubernetes resources to be ready (timeout after 2 minutes)
kubectl wait tcp -n tenant-test kubernetes-${test_name} --timeout=2m --for=jsonpath='{.status.kubernetesResources.version.status}'=Ready
# Wait for all required deployments to be available (timeout after 4 minutes)
kubectl wait deploy --timeout=4m --for=condition=available -n tenant-test kubernetes-${test_name} kubernetes-${test_name}-cluster-autoscaler kubernetes-${test_name}-kccm kubernetes-${test_name}-kcsi-controller
# Wait for the machine deployment to scale to 2 replicas (timeout after 1 minute)
kubectl wait machinedeployment kubernetes-${test_name}-md0 -n tenant-test --timeout=1m --for=jsonpath='{.status.replicas}'=2
# Get the admin kubeconfig and save it to a file
kubectl get secret kubernetes-${test_name}-admin-kubeconfig -ojsonpath='{.data.super-admin\.conf}' -n tenant-test | base64 -d > tenantkubeconfig
# Update the kubeconfig to use localhost for the API server
yq -i ".clusters[0].cluster.server = \"https://localhost:${port}\"" tenantkubeconfig
# Set up port forwarding to the Kubernetes API server for a 40 second timeout
bash -c 'timeout 40s kubectl port-forward service/kubernetes-'"${test_name}"' -n tenant-test '"${port}"':6443 > /dev/null 2>&1 &'
# Verify the Kubernetes version matches what we expect (retry for up to 20 seconds)
timeout 20 sh -ec 'until kubectl --kubeconfig tenantkubeconfig version 2>/dev/null | grep -Fq "Server Version: ${k8s_version}"; do sleep 5; done'
# Wait for all machine deployment replicas to be ready (timeout after 10 minutes)
kubectl wait machinedeployment kubernetes-${test_name}-md0 -n tenant-test --timeout=10m --for=jsonpath='{.status.v1beta2.readyReplicas}'=2
# Clean up by deleting the Kubernetes resource
kubectl -n tenant-test delete kuberneteses.apps.cozystack.io $test_name
}

View File

@@ -20,7 +20,9 @@ spec:
storage: 5Gi
storageClass: replicated
gpus: []
resources: {}
resources:
cpu: ""
memory: ""
sshKeys:
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF
test@test

View File

@@ -18,8 +18,8 @@ spec:
EOF
sleep 5
kubectl -n tenant-test wait hr vm-disk-$name --timeout=5s --for=condition=ready
kubectl -n tenant-test wait dv vm-disk-$name --timeout=250s --for=condition=ready
kubectl -n tenant-test wait pvc vm-disk-$name --timeout=200s --for=jsonpath='{.status.phase}'=Bound
kubectl -n tenant-test wait dv vm-disk-$name --timeout=150s --for=condition=ready
kubectl -n tenant-test wait pvc vm-disk-$name --timeout=100s --for=jsonpath='{.status.phase}'=Bound
}
@test "Create a VM Instance" {
@@ -42,6 +42,9 @@ spec:
disks:
- name: $diskName
gpus: []
resources:
cpu: ""
memory: ""
sshKeys:
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF
test@test

View File

@@ -123,24 +123,16 @@ EOF
@test "Configure Tenant and wait for applications" {
# Patch root tenant and wait for its releases
kubectl patch tenants/root -n tenant-root --type merge -p '{"spec":{"host":"example.org","ingress":true,"monitoring":true,"etcd":true,"isolated":true}}'
kubectl patch tenants/root -n tenant-root --type merge -p '{"spec":{"host":"example.org","ingress":true,"monitoring":true,"etcd":true,"isolated":true, "seaweedfs": true}}'
timeout 60 sh -ec 'until kubectl get hr -n tenant-root etcd ingress monitoring tenant-root >/dev/null 2>&1; do sleep 1; done'
kubectl wait hr/etcd hr/ingress hr/tenant-root -n tenant-root --timeout=2m --for=condition=ready
timeout 60 sh -ec 'until kubectl get hr -n tenant-root etcd ingress monitoring seaweedfs tenant-root >/dev/null 2>&1; do sleep 1; done'
kubectl wait hr/etcd hr/ingress hr/tenant-root hr/seaweedfs -n tenant-root --timeout=4m --for=condition=ready
# TODO: Workaround ingress unvailability issue
if ! kubectl wait hr/monitoring -n tenant-root --timeout=2m --for=condition=ready; then
flux reconcile hr monitoring -n tenant-root --force
kubectl wait hr/monitoring -n tenant-root --timeout=2m --for=condition=ready
fi
if ! kubectl wait hr/seaweedfs-system -n tenant-root --timeout=2m --for=condition=ready; then
flux reconcile hr seaweedfs-system -n tenant-root --force
kubectl wait hr/seaweedfs-system -n tenant-root --timeout=2m --for=condition=ready
fi
# Expose Cozystack services through ingress
kubectl patch configmap/cozystack -n cozy-system --type merge -p '{"data":{"expose-services":"api,dashboard,cdi-uploadproxy,vm-exportproxy,keycloak"}}'
@@ -152,7 +144,7 @@ EOF
kubectl wait sts/etcd -n tenant-root --for=jsonpath='{.status.readyReplicas}'=3 --timeout=5m
# VictoriaMetrics components
kubectl wait vmalert/vmalert-shortterm vmalertmanager/alertmanager -n tenant-root --for=jsonpath='{.status.updateStatus}'=operational --timeout=15m
kubectl wait vmalert/vmalert-shortterm vmalertmanager/alertmanager -n tenant-root --for=jsonpath='{.status.updateStatus}'=operational --timeout=5m
kubectl wait vlogs/generic -n tenant-root --for=jsonpath='{.status.updateStatus}'=operational --timeout=5m
kubectl wait vmcluster/shortterm vmcluster/longterm -n tenant-root --for=jsonpath='{.status.clusterStatus}'=operational --timeout=5m
@@ -189,22 +181,9 @@ spec:
ingress: false
isolated: true
monitoring: false
resourceQuotas:
cpu: "60"
memory: "128Gi"
storage: "100Gi"
resourceQuotas: {}
seaweedfs: false
EOF
kubectl wait hr/tenant-test -n tenant-root --timeout=1m --for=condition=ready
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
# Wait for ResourceQuota to appear and assert values
timeout 60 sh -ec 'until [ "$(kubectl get quota -n tenant-test --no-headers 2>/dev/null | wc -l)" -ge 1 ]; do sleep 1; done'
kubectl get quota -n tenant-test \
-o jsonpath='{range .items[*]}{.spec.hard.requests\.memory}{" "}{.spec.hard.requests\.storage}{"\n"}{end}' \
| grep -qx '137438953472 100Gi'
# Assert LimitRange defaults for containers
kubectl get limitrange -n tenant-test \
-o jsonpath='{range .items[*].spec.limits[*]}{.default.cpu}{" "}{.default.memory}{" "}{.defaultRequest.cpu}{" "}{.defaultRequest.memory}{"\n"}{end}' \
| grep -qx '250m 128Mi 25m 128Mi'
}

View File

@@ -82,7 +82,7 @@ EOF
for i in 1 2 3; do
cp nocloud-amd64.raw srv${i}/system.img
qemu-img resize srv${i}/system.img 50G
qemu-img create srv${i}/data.img 200G
qemu-img create srv${i}/data.img 100G
done
}
@@ -132,30 +132,29 @@ machine:
- usermode_helper=disabled
- name: zfs
- name: spl
- name: lldpd
registries:
mirrors:
docker.io:
endpoints:
- https://dockerio.nexus.aenix.org
- https://dockerio.nexus.lllamnyp.su
cr.fluentbit.io:
endpoints:
- https://fluentbit.nexus.aenix.org
- https://fluentbit.nexus.lllamnyp.su
docker-registry3.mariadb.com:
endpoints:
- https://mariadb.nexus.aenix.org
- https://mariadb.nexus.lllamnyp.su
gcr.io:
endpoints:
- https://gcr.nexus.aenix.org
- https://gcr.nexus.lllamnyp.su
ghcr.io:
endpoints:
- https://ghcr.nexus.aenix.org
- https://ghcr.nexus.lllamnyp.su
quay.io:
endpoints:
- https://quay.nexus.aenix.org
- https://quay.nexus.lllamnyp.su
registry.k8s.io:
endpoints:
- https://k8s.nexus.aenix.org
- https://k8s.nexus.lllamnyp.su
files:
- content: |
[plugins]

View File

@@ -1,20 +0,0 @@
#!/usr/bin/env bats
# -----------------------------------------------------------------------------
# Test OpenAPI endpoints in a Kubernetes cluster
# -----------------------------------------------------------------------------
@test "Test OpenAPI v2 endpoint" {
kubectl get -v7 --raw '/openapi/v2?timeout=32s' > /dev/null
}
@test "Test OpenAPI v3 endpoint" {
kubectl get -v7 --raw '/openapi/v3/apis/apps.cozystack.io/v1alpha1' > /dev/null
}
@test "Test OpenAPI v2 endpoint (protobuf)" {
(
kubectl proxy --port=21234 & sleep 0.5
trap "kill $!" EXIT
curl -sS --fail 'http://localhost:21234/openapi/v2?timeout=32s' -H 'Accept: application/com.github.proto-openapi.spec.v2@v1.0+protobuf' > /dev/null
)
}

View File

@@ -32,10 +32,6 @@ kube::codegen::gen_helpers \
--boilerplate "${SCRIPT_ROOT}/hack/boilerplate.go.txt" \
"${SCRIPT_ROOT}/pkg/apis"
kube::codegen::gen_helpers \
--boilerplate "${SCRIPT_ROOT}/hack/boilerplate.go.txt" \
"${SCRIPT_ROOT}/api"
if [[ -n "${API_KNOWN_VIOLATIONS_DIR:-}" ]]; then
report_filename="${API_KNOWN_VIOLATIONS_DIR}/cozystack_api_violation_exceptions.list"
if [[ "${UPDATE_API_KNOWN_VIOLATIONS:-}" == "true" ]]; then

View File

@@ -1,183 +0,0 @@
package controller
import (
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"sort"
"sync"
"time"
"github.com/cozystack/cozystack/internal/shared/crdmem"
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
"github.com/go-logr/logr"
appsv1 "k8s.io/api/apps/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
type CozystackResourceDefinitionReconciler struct {
client.Client
Scheme *runtime.Scheme
Debounce time.Duration
mu sync.Mutex
lastEvent time.Time
lastHandled time.Time
mem *crdmem.Memory
}
func (r *CozystackResourceDefinitionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logger := log.FromContext(ctx)
crd := &cozyv1alpha1.CozystackResourceDefinition{}
err := r.Get(ctx, types.NamespacedName{Name: req.Name}, crd)
if err == nil {
if r.mem != nil {
r.mem.Upsert(crd)
}
r.mu.Lock()
r.lastEvent = time.Now()
r.mu.Unlock()
return ctrl.Result{}, nil
}
if err != nil && !apierrors.IsNotFound(err) {
return ctrl.Result{}, err
}
if apierrors.IsNotFound(err) && r.mem != nil {
r.mem.Delete(req.Name)
}
if req.Namespace == "cozy-system" && req.Name == "cozystack-api" {
return r.debouncedRestart(ctx, logger)
}
return ctrl.Result{}, nil
}
func (r *CozystackResourceDefinitionReconciler) SetupWithManager(mgr ctrl.Manager) error {
if r.Debounce == 0 {
r.Debounce = 5 * time.Second
}
if r.mem == nil {
r.mem = crdmem.Global()
}
if err := r.mem.EnsurePrimingWithManager(mgr); err != nil {
return err
}
return ctrl.NewControllerManagedBy(mgr).
Named("cozystackresource-controller").
For(&cozyv1alpha1.CozystackResourceDefinition{}, builder.WithPredicates()).
Watches(
&cozyv1alpha1.CozystackResourceDefinition{},
handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request {
r.mu.Lock()
r.lastEvent = time.Now()
r.mu.Unlock()
return []reconcile.Request{{
NamespacedName: types.NamespacedName{
Namespace: "cozy-system",
Name: "cozystack-api",
},
}}
}),
).
Complete(r)
}
type crdHashView struct {
Name string `json:"name"`
Spec cozyv1alpha1.CozystackResourceDefinitionSpec `json:"spec"`
}
func (r *CozystackResourceDefinitionReconciler) computeConfigHash() (string, error) {
if r.mem == nil {
return "", nil
}
snapshot := r.mem.Snapshot()
sort.Slice(snapshot, func(i, j int) bool { return snapshot[i].Name < snapshot[j].Name })
views := make([]crdHashView, 0, len(snapshot))
for i := range snapshot {
views = append(views, crdHashView{
Name: snapshot[i].Name,
Spec: snapshot[i].Spec,
})
}
b, err := json.Marshal(views)
if err != nil {
return "", err
}
sum := sha256.Sum256(b)
return hex.EncodeToString(sum[:]), nil
}
func (r *CozystackResourceDefinitionReconciler) debouncedRestart(ctx context.Context, logger logr.Logger) (ctrl.Result, error) {
r.mu.Lock()
le := r.lastEvent
lh := r.lastHandled
debounce := r.Debounce
r.mu.Unlock()
if debounce <= 0 {
debounce = 5 * time.Second
}
if le.IsZero() {
return ctrl.Result{}, nil
}
if d := time.Since(le); d < debounce {
return ctrl.Result{RequeueAfter: debounce - d}, nil
}
if !lh.Before(le) {
return ctrl.Result{}, nil
}
newHash, err := r.computeConfigHash()
if err != nil {
return ctrl.Result{}, err
}
deploy := &appsv1.Deployment{}
if err := r.Get(ctx, types.NamespacedName{Namespace: "cozy-system", Name: "cozystack-api"}, deploy); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
if deploy.Spec.Template.Annotations == nil {
deploy.Spec.Template.Annotations = map[string]string{}
}
oldHash := deploy.Spec.Template.Annotations["cozystack.io/config-hash"]
if oldHash == newHash && oldHash != "" {
r.mu.Lock()
r.lastHandled = le
r.mu.Unlock()
logger.Info("No changes in CRD config; skipping restart", "hash", newHash)
return ctrl.Result{}, nil
}
patch := client.MergeFrom(deploy.DeepCopy())
deploy.Spec.Template.Annotations["cozystack.io/config-hash"] = newHash
if err := r.Patch(ctx, deploy, patch); err != nil {
return ctrl.Result{}, err
}
r.mu.Lock()
r.lastHandled = le
r.mu.Unlock()
logger.Info("Updated cozystack-api podTemplate config-hash; rollout triggered",
"old", oldHash, "new", newHash)
return ctrl.Result{}, nil
}

View File

@@ -1,280 +0,0 @@
package kubeovnplunger
import (
"bytes"
"context"
"fmt"
"io"
"strings"
"time"
"github.com/cozystack/cozystack/internal/sse"
"github.com/cozystack/cozystack/pkg/ovnstatus"
"github.com/prometheus/client_golang/prometheus"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/remotecommand"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
var (
srv *sse.Server
)
const (
rescanInterval = 1 * time.Minute
)
// KubeOVNPlunger watches the ovn-central cluster members
type KubeOVNPlunger struct {
client.Client
Scheme *runtime.Scheme
ClientSet kubernetes.Interface
REST *rest.Config
Registry prometheus.Registerer
metrics metrics
lastLeader map[string]string
seenCIDs map[string]map[string]struct{}
}
// Reconcile runs the checks on the ovn-central members to see if their views of the cluster are consistent
func (r *KubeOVNPlunger) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
l := log.FromContext(ctx)
deploy := &appsv1.Deployment{}
if err := r.Get(ctx, req.NamespacedName, deploy); err != nil {
return ctrl.Result{}, err
}
iphints := map[string]string{}
for _, env := range deploy.Spec.Template.Spec.Containers[0].Env {
if env.Name != "NODE_IPS" {
continue
}
for _, ip := range strings.Split(env.Value, ",") {
iphints[ip] = ""
}
break
}
if len(iphints) == 0 {
l.Info("WARNING: running without IP hints, some error conditions cannot be detected")
}
pods := &corev1.PodList{}
if err := r.List(ctx, pods, client.InNamespace(req.Namespace), client.MatchingLabels(map[string]string{"app": req.Name})); err != nil {
return ctrl.Result{}, fmt.Errorf("list ovn-central pods: %w", err)
}
nbmv := make([]ovnstatus.MemberView, 0, len(pods.Items))
sbmv := make([]ovnstatus.MemberView, 0, len(pods.Items))
nbSnaps := make([]ovnstatus.HealthSnapshot, 0, len(pods.Items))
sbSnaps := make([]ovnstatus.HealthSnapshot, 0, len(pods.Items))
// TODO: get real iphints
for i := range pods.Items {
o := ovnstatus.OVNClient{}
o.ApplyDefaults()
o.Runner = func(ctx context.Context, bin string, args ...string) (string, error) {
cmd := append([]string{bin}, args...)
eo := ExecOptions{
Namespace: req.Namespace,
Pod: pods.Items[i].Name,
Container: pods.Items[i].Spec.Containers[0].Name,
Command: cmd,
}
res, err := r.ExecPod(ctx, eo)
if err != nil {
return "", err
}
return res.Stdout, nil
}
nb, sb, err1, err2 := o.HealthBoth(ctx)
if err1 != nil || err2 != nil {
l.Error(fmt.Errorf("health check failed: nb=%w, sb=%w", err1, err2), "pod", pods.Items[i].Name)
continue
}
nbSnaps = append(nbSnaps, nb)
sbSnaps = append(sbSnaps, sb)
nbmv = append(nbmv, ovnstatus.BuildMemberView(nb))
sbmv = append(sbmv, ovnstatus.BuildMemberView(sb))
}
r.recordAndPruneCIDs("nb", cidFromSnaps(nbSnaps))
r.recordAndPruneCIDs("sb", cidFromSnaps(sbSnaps))
nbmv = ovnstatus.NormalizeViews(nbmv)
sbmv = ovnstatus.NormalizeViews(sbmv)
nbecv := ovnstatus.AnalyzeConsensusWithIPHints(nbmv, &ovnstatus.Hints{ExpectedIPs: iphints})
sbecv := ovnstatus.AnalyzeConsensusWithIPHints(sbmv, &ovnstatus.Hints{ExpectedIPs: iphints})
expected := len(iphints)
r.WriteClusterMetrics("nb", nbSnaps, nbecv, expected)
r.WriteClusterMetrics("sb", sbSnaps, sbecv, expected)
r.WriteMemberMetrics("nb", nbSnaps, nbmv, nbecv)
r.WriteMemberMetrics("sb", sbSnaps, sbmv, sbecv)
srv.Publish(nbecv.PrettyString() + sbecv.PrettyString())
return ctrl.Result{}, nil
}
// SetupWithManager attaches a generic ticker to trigger a reconcile every <interval> seconds
func (r *KubeOVNPlunger) SetupWithManager(mgr ctrl.Manager, kubeOVNNamespace, appName string) error {
r.REST = rest.CopyConfig(mgr.GetConfig())
cs, err := kubernetes.NewForConfig(r.REST)
if err != nil {
return fmt.Errorf("build clientset: %w", err)
}
r.ClientSet = cs
ch := make(chan event.GenericEvent, 10)
mapFunc := func(context.Context, client.Object) []reconcile.Request {
return []reconcile.Request{{
NamespacedName: types.NamespacedName{Namespace: kubeOVNNamespace, Name: appName},
}}
}
mapper := handler.EnqueueRequestsFromMapFunc(mapFunc)
srv = sse.New(sse.Options{
Addr: ":18080",
AllowCORS: true,
})
r.initMetrics()
r.lastLeader = make(map[string]string)
r.seenCIDs = map[string]map[string]struct{}{"nb": {}, "sb": {}}
if err := ctrl.NewControllerManagedBy(mgr).
Named("kubeovnplunger").
WatchesRawSource(source.Channel(ch, mapper)).
Complete(r); err != nil {
return err
}
_ = mgr.Add(manager.RunnableFunc(func(ctx context.Context) error {
go srv.ListenAndServe()
<-ctx.Done()
_ = srv.Shutdown(context.Background())
return nil
}))
return mgr.Add(manager.RunnableFunc(func(ctx context.Context) error {
ticker := time.NewTicker(rescanInterval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return nil
case <-ticker.C:
ch <- event.GenericEvent{
Object: &metav1.PartialObjectMetadata{
ObjectMeta: metav1.ObjectMeta{
Namespace: kubeOVNNamespace,
Name: appName,
},
},
}
}
}
}))
}
type ExecOptions struct {
Namespace string
Pod string
Container string
Command []string // e.g. []string{"sh", "-c", "echo hi"}
Stdin io.Reader // optional
TTY bool // if true, stderr is merged into stdout
Timeout time.Duration // optional overall timeout
}
type ExecResult struct {
Stdout string
Stderr string
ExitCode *int // nil if not determinable
}
// ExecPod runs a command in a pod and returns stdout/stderr/exit code.
func (r *KubeOVNPlunger) ExecPod(ctx context.Context, opts ExecOptions) (*ExecResult, error) {
if opts.Namespace == "" || opts.Pod == "" || opts.Container == "" {
return nil, fmt.Errorf("namespace, pod, and container are required")
}
req := r.ClientSet.CoreV1().RESTClient().
Post().
Resource("pods").
Namespace(opts.Namespace).
Name(opts.Pod).
SubResource("exec").
VersionedParams(&corev1.PodExecOptions{
Container: opts.Container,
Command: opts.Command,
Stdin: opts.Stdin != nil,
Stdout: true,
Stderr: !opts.TTY,
TTY: opts.TTY,
}, scheme.ParameterCodec)
exec, err := remotecommand.NewSPDYExecutor(r.REST, "POST", req.URL())
if err != nil {
return nil, fmt.Errorf("spdy executor: %w", err)
}
var stdout, stderr bytes.Buffer
streamCtx := ctx
if opts.Timeout > 0 {
var cancel context.CancelFunc
streamCtx, cancel = context.WithTimeout(ctx, opts.Timeout)
defer cancel()
}
streamErr := exec.StreamWithContext(streamCtx, remotecommand.StreamOptions{
Stdin: opts.Stdin,
Stdout: &stdout,
Stderr: &stderr,
Tty: opts.TTY,
})
res := &ExecResult{Stdout: stdout.String(), Stderr: stderr.String()}
if streamErr != nil {
// Try to surface exit code instead of treating all failures as transport errors
type exitCoder interface{ ExitStatus() int }
if ec, ok := streamErr.(exitCoder); ok {
code := ec.ExitStatus()
res.ExitCode = &code
return res, nil
}
return res, fmt.Errorf("exec stream: %w", streamErr)
}
zero := 0
res.ExitCode = &zero
return res, nil
}
func (r *KubeOVNPlunger) recordAndPruneCIDs(db, currentCID string) {
// Mark current as seen
if r.seenCIDs[db] == nil {
r.seenCIDs[db] = map[string]struct{}{}
}
if currentCID != "" {
r.seenCIDs[db][currentCID] = struct{}{}
}
// Build a set of "still active" CIDs this cycle (could be none if you failed to collect)
active := map[string]struct{}{}
if currentCID != "" {
active[currentCID] = struct{}{}
}
// Any seen CID that isn't active now is stale -> delete all its series
for cid := range r.seenCIDs[db] {
if _, ok := active[cid]; ok {
continue
}
r.deleteAllFor(db, cid)
delete(r.seenCIDs[db], cid)
}
}

View File

@@ -1,34 +0,0 @@
package kubeovnplunger
import (
"context"
"testing"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/config"
)
var testPlunger *KubeOVNPlunger
func init() {
scheme := runtime.NewScheme()
cfg := config.GetConfigOrDie()
c, _ := client.New(cfg, client.Options{})
cs, _ := kubernetes.NewForConfig(cfg)
testPlunger = &KubeOVNPlunger{
Client: c,
Scheme: scheme,
ClientSet: cs,
REST: cfg,
}
}
func TestPlungerGetsStatuses(t *testing.T) {
_, err := testPlunger.Reconcile(context.Background(), ctrl.Request{})
if err != nil {
t.Errorf("error should be nil but it's %s", err)
}
}

View File

@@ -1,423 +0,0 @@
package kubeovnplunger
import (
"time"
"github.com/cozystack/cozystack/pkg/ovnstatus"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
type metrics struct {
// --- Core cluster health (per DB/cid) ---
clusterQuorum *prometheus.GaugeVec // 1/0
allAgree *prometheus.GaugeVec // 1/0
membersExpected *prometheus.GaugeVec
membersObserved *prometheus.GaugeVec
ipsExpected *prometheus.GaugeVec
ipsObserved *prometheus.GaugeVec
excessMembers *prometheus.GaugeVec
missingMembers *prometheus.GaugeVec
unexpectedIPsCount *prometheus.GaugeVec
missingExpectedIPsCount *prometheus.GaugeVec
ipConflictsCount *prometheus.GaugeVec
sidAddrDisagreements *prometheus.GaugeVec
// --- Consensus summary (per DB/cid) ---
consensusMajoritySize *prometheus.GaugeVec
consensusMinoritySize *prometheus.GaugeVec
consensusDiffsTotal *prometheus.GaugeVec
// --- Detail exports (sparse, keyed by IP/SID) ---
unexpectedIPGauge *prometheus.GaugeVec // {db,cid,ip} -> 1
missingExpectedIPGauge *prometheus.GaugeVec // {db,cid,ip} -> 1
ipConflictGauge *prometheus.GaugeVec // {db,cid,ip} -> count(sids)
suspectStaleGauge *prometheus.GaugeVec // {db,cid,sid} -> 1
// --- Per-member liveness/freshness (per DB/cid/sid[/ip]) ---
memberConnected *prometheus.GaugeVec // {db,cid,sid,ip}
memberLeader *prometheus.GaugeVec // {db,cid,sid}
memberLastMsgMs *prometheus.GaugeVec // {db,cid,sid}
memberIndex *prometheus.GaugeVec // {db,cid,sid}
memberIndexGap *prometheus.GaugeVec // {db,cid,sid}
memberReporter *prometheus.GaugeVec // {db,cid,sid}
memberMissingReporter *prometheus.GaugeVec // {db,cid,sid}
// --- Ops/housekeeping ---
leaderTransitionsTotal *prometheus.CounterVec // {db,cid}
collectErrorsTotal *prometheus.CounterVec // {db,cid}
publishEventsTotal *prometheus.CounterVec // {db,cid}
snapshotTimestampSec *prometheus.GaugeVec // {db,cid}
}
func (r *KubeOVNPlunger) initMetrics() {
p := promauto.With(r.Registry)
ns := "ovn"
// --- Core cluster health ---
r.metrics.clusterQuorum = p.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Subsystem: "cluster", Name: "quorum",
Help: "1 if cluster has quorum, else 0",
}, []string{"db", "cid"})
r.metrics.allAgree = p.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Subsystem: "cluster", Name: "all_agree",
Help: "1 if all members report identical membership",
}, []string{"db", "cid"})
r.metrics.membersExpected = p.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Subsystem: "cluster", Name: "members_expected",
Help: "Expected cluster size (replicas)",
}, []string{"db", "cid"})
r.metrics.membersObserved = p.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Subsystem: "cluster", Name: "members_observed",
Help: "Observed members (distinct SIDs across views)",
}, []string{"db", "cid"})
r.metrics.ipsExpected = p.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Subsystem: "cluster", Name: "ips_expected",
Help: "Expected distinct member IPs (from k8s hints)",
}, []string{"db", "cid"})
r.metrics.ipsObserved = p.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Subsystem: "cluster", Name: "ips_observed",
Help: "Observed distinct member IPs (from OVN views)",
}, []string{"db", "cid"})
r.metrics.excessMembers = p.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Subsystem: "cluster", Name: "excess_members",
Help: "Members over expected (>=0)",
}, []string{"db", "cid"})
r.metrics.missingMembers = p.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Subsystem: "cluster", Name: "missing_members",
Help: "Members short of expected (>=0)",
}, []string{"db", "cid"})
r.metrics.unexpectedIPsCount = p.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Subsystem: "cluster", Name: "unexpected_ips",
Help: "Count of IPs in OVN not present in k8s expected set",
}, []string{"db", "cid"})
r.metrics.missingExpectedIPsCount = p.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Subsystem: "cluster", Name: "missing_expected_ips",
Help: "Count of expected IPs not found in OVN",
}, []string{"db", "cid"})
r.metrics.ipConflictsCount = p.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Subsystem: "cluster", Name: "ip_conflicts",
Help: "Number of IPs claimed by multiple SIDs",
}, []string{"db", "cid"})
r.metrics.sidAddrDisagreements = p.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Subsystem: "cluster", Name: "sid_address_disagreements",
Help: "Number of SIDs seen with >1 distinct addresses",
}, []string{"db", "cid"})
// --- Consensus summary ---
r.metrics.consensusMajoritySize = p.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Subsystem: "consensus", Name: "majority_size",
Help: "Majority group size (0 if none)",
}, []string{"db", "cid"})
r.metrics.consensusMinoritySize = p.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Subsystem: "consensus", Name: "minority_size",
Help: "Minority group size",
}, []string{"db", "cid"})
r.metrics.consensusDiffsTotal = p.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Subsystem: "consensus", Name: "diffs_total",
Help: "Total per-reporter differences vs truth (missing + extra + mismatches)",
}, []string{"db", "cid"})
// --- Detail exports (sparse) ---
r.metrics.unexpectedIPGauge = p.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Subsystem: "consensus", Name: "unexpected_ip",
Help: "Unexpected IP present in OVN; value fixed at 1",
}, []string{"db", "cid", "ip"})
r.metrics.missingExpectedIPGauge = p.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Subsystem: "consensus", Name: "missing_expected_ip",
Help: "Expected IP missing from OVN; value fixed at 1",
}, []string{"db", "cid", "ip"})
r.metrics.ipConflictGauge = p.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Subsystem: "consensus", Name: "ip_conflict",
Help: "Number of SIDs claiming the same IP for this key",
}, []string{"db", "cid", "ip"})
r.metrics.suspectStaleGauge = p.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Subsystem: "consensus", Name: "suspect_stale",
Help: "Suspected stale SID candidate for kick; value fixed at 1 (emit only when remediation is warranted)",
}, []string{"db", "cid", "sid"})
// --- Per-member liveness/freshness ---
r.metrics.memberConnected = p.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Subsystem: "member", Name: "connected",
Help: "1 if local server reports connected/quorum, else 0",
}, []string{"db", "cid", "sid", "ip"})
r.metrics.memberLeader = p.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Subsystem: "member", Name: "leader",
Help: "1 if this member is leader, else 0",
}, []string{"db", "cid", "sid"})
r.metrics.memberLastMsgMs = p.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Subsystem: "member", Name: "last_msg_ms",
Help: "Follower->leader 'last msg' age in ms (legacy heuristic). NaN/omit if unknown",
}, []string{"db", "cid", "sid"})
r.metrics.memberIndex = p.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Subsystem: "member", Name: "index",
Help: "Local Raft log index",
}, []string{"db", "cid", "sid"})
r.metrics.memberIndexGap = p.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Subsystem: "member", Name: "index_gap",
Help: "Leader index minus local index (>=0)",
}, []string{"db", "cid", "sid"})
r.metrics.memberReporter = p.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Subsystem: "member", Name: "reporter",
Help: "1 if a self-view from this SID was collected in the scrape cycle",
}, []string{"db", "cid", "sid"})
r.metrics.memberMissingReporter = p.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Subsystem: "member", Name: "missing_reporter",
Help: "1 if SID appears in union but produced no self-view",
}, []string{"db", "cid", "sid"})
// --- Ops/housekeeping ---
r.metrics.leaderTransitionsTotal = p.NewCounterVec(prometheus.CounterOpts{
Namespace: ns, Subsystem: "ops", Name: "leader_transitions_total",
Help: "Count of observed leader SID changes",
}, []string{"db", "cid"})
r.metrics.collectErrorsTotal = p.NewCounterVec(prometheus.CounterOpts{
Namespace: ns, Subsystem: "ops", Name: "collect_errors_total",
Help: "Count of errors during health collection/analysis",
}, []string{"db", "cid"})
r.metrics.publishEventsTotal = p.NewCounterVec(prometheus.CounterOpts{
Namespace: ns, Subsystem: "ops", Name: "publish_events_total",
Help: "Count of SSE publish events (optional)",
}, []string{"db", "cid"})
r.metrics.snapshotTimestampSec = p.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Subsystem: "ops", Name: "snapshot_timestamp_seconds",
Help: "Unix timestamp of the last successful consensus snapshot",
}, []string{"db", "cid"})
}
func (r *KubeOVNPlunger) WriteClusterMetrics(db string, snaps []ovnstatus.HealthSnapshot, ecv ovnstatus.ExtendedConsensusResult, expectedReplicas int) {
cid := cidFromSnaps(snaps)
// Core cluster health
r.metrics.clusterQuorum.WithLabelValues(db, cid).Set(b2f(ecv.HasMajority))
r.metrics.allAgree.WithLabelValues(db, cid).Set(b2f(ecv.AllAgree))
r.metrics.membersExpected.WithLabelValues(db, cid).Set(float64(expectedReplicas))
r.metrics.membersObserved.WithLabelValues(db, cid).Set(float64(ecv.MembersCount))
r.metrics.ipsExpected.WithLabelValues(db, cid).Set(float64(len(ecv.ConsensusResult.TruthView.Members))) // optional; or len(hints.ExpectedIPs)
r.metrics.ipsObserved.WithLabelValues(db, cid).Set(float64(ecv.DistinctIPCount))
r.metrics.excessMembers.WithLabelValues(db, cid).Set(float64(ecv.ExpectedExcess))
r.metrics.missingMembers.WithLabelValues(db, cid).Set(float64(ecv.ExpectedShortfall))
r.metrics.unexpectedIPsCount.WithLabelValues(db, cid).Set(float64(len(ecv.UnexpectedIPs)))
r.metrics.missingExpectedIPsCount.WithLabelValues(db, cid).Set(float64(len(ecv.MissingExpectedIPs)))
r.metrics.ipConflictsCount.WithLabelValues(db, cid).Set(float64(len(ecv.IPConflicts)))
// Count SIDs with >1 distinct addresses
disagree := 0
for _, n := range ecv.SIDAddressDisagreements {
if n > 1 {
disagree++
}
}
r.metrics.sidAddrDisagreements.WithLabelValues(db, cid).Set(float64(disagree))
// Consensus summary
r.metrics.consensusMajoritySize.WithLabelValues(db, cid).Set(float64(len(ecv.MajorityMembers)))
r.metrics.consensusMinoritySize.WithLabelValues(db, cid).Set(float64(len(ecv.MinorityMembers)))
// Sum diffs across reporters (missing + extra + mismatches)
totalDiffs := 0
for _, d := range ecv.Diffs {
totalDiffs += len(d.MissingSIDs) + len(d.ExtraSIDs) + len(d.AddressMismatches)
}
r.metrics.consensusDiffsTotal.WithLabelValues(db, cid).Set(float64(totalDiffs))
// Sparse per-key exports (reset then re-emit for this {db,cid})
r.metrics.unexpectedIPGauge.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
for _, ip := range ecv.UnexpectedIPs {
r.metrics.unexpectedIPGauge.WithLabelValues(db, cid, ip).Set(1)
}
r.metrics.missingExpectedIPGauge.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
for _, ip := range ecv.MissingExpectedIPs {
r.metrics.missingExpectedIPGauge.WithLabelValues(db, cid, ip).Set(1)
}
r.metrics.ipConflictGauge.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
for ip, sids := range ecv.IPConflicts {
r.metrics.ipConflictGauge.WithLabelValues(db, cid, ip).Set(float64(len(sids)))
}
// Only emit suspects when remediation is warranted (e.g., TooManyMembers / unexpected IPs / conflicts)
r.metrics.suspectStaleGauge.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
if ecv.TooManyMembers || len(ecv.UnexpectedIPs) > 0 || len(ecv.IPConflicts) > 0 {
for _, sid := range ecv.SuspectStaleSIDs {
r.metrics.suspectStaleGauge.WithLabelValues(db, cid, sid).Set(1)
}
}
// Snapshot timestamp
r.metrics.snapshotTimestampSec.WithLabelValues(db, cid).Set(float64(time.Now().Unix()))
}
func (r *KubeOVNPlunger) WriteMemberMetrics(db string, snaps []ovnstatus.HealthSnapshot, views []ovnstatus.MemberView, ecv ovnstatus.ExtendedConsensusResult) {
cid := cidFromSnaps(snaps)
// Figure out current leader SID (prefer local view from any leader snapshot)
curLeader := ""
for _, s := range snaps {
if s.Local.Leader {
curLeader = s.Local.SID
break
}
}
// Leader transitions
key := db + "|" + cid
if prev, ok := r.lastLeader[key]; ok && prev != "" && curLeader != "" && prev != curLeader {
r.metrics.leaderTransitionsTotal.WithLabelValues(db, cid).Inc()
}
if curLeader != "" {
r.lastLeader[key] = curLeader
}
// Build quick maps for reporter set & IP per SID (best-effort)
reporter := map[string]struct{}{}
for _, v := range views {
if v.FromSID != "" {
reporter[v.FromSID] = struct{}{}
}
}
sidToIP := map[string]string{}
for _, v := range views {
for sid, addr := range v.Members {
if sidToIP[sid] == "" && addr != "" {
sidToIP[sid] = ovnstatus.AddrToIP(addr) // expose addrToIP or wrap here
}
}
}
// Reset member vectors for this {db,cid} (avoid stale series)
r.metrics.memberConnected.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
r.metrics.memberLeader.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
r.metrics.memberLastMsgMs.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
r.metrics.memberIndex.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
r.metrics.memberIndexGap.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
r.metrics.memberReporter.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
r.metrics.memberMissingReporter.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
// Leader index (to compute gaps)
lIdx := leaderIndex(snaps, curLeader)
// Emit one series per snapshot (self view)
for _, s := range snaps {
sid := s.Local.SID
ip := sidToIP[sid]
if ip == "" {
ip = "unknown"
}
r.metrics.memberConnected.WithLabelValues(db, cid, sid, ip).Set(b2f(s.Local.Connected))
r.metrics.memberLeader.WithLabelValues(db, cid, sid).Set(b2f(s.Local.Leader))
r.metrics.memberIndex.WithLabelValues(db, cid, sid).Set(float64(s.Local.Index))
if lIdx != nil && s.Local.Index >= 0 {
gap := *lIdx - s.Local.Index
if gap < 0 {
gap = 0
}
r.metrics.memberIndexGap.WithLabelValues(db, cid, sid).Set(float64(gap))
}
// Reporter presence
_, isReporter := reporter[sid]
r.metrics.memberReporter.WithLabelValues(db, cid, sid).Set(b2f(isReporter))
}
// “Missing reporter” SIDs = union reporters (from ecv)
reporterSet := map[string]struct{}{}
for sid := range reporter {
reporterSet[sid] = struct{}{}
}
unionSet := map[string]struct{}{}
for _, sid := range ecv.UnionMembers {
unionSet[sid] = struct{}{}
}
for sid := range unionSet {
if _, ok := reporterSet[sid]; !ok {
r.metrics.memberMissingReporter.WithLabelValues(db, cid, sid).Set(1)
}
}
// Legacy follower freshness (if you kept LastMsgMs in servers parsing)
// We only know LastMsgMs from the Full.Servers in each snapshot; pick the freshest per SID.
lastMsg := map[string]int64{}
for _, s := range snaps {
for _, srv := range s.Full.Servers {
if srv.LastMsgMs != nil {
cur, ok := lastMsg[srv.SID]
if !ok || *srv.LastMsgMs < cur {
lastMsg[srv.SID] = *srv.LastMsgMs
}
}
}
}
for sid, ms := range lastMsg {
r.metrics.memberLastMsgMs.WithLabelValues(db, cid, sid).Set(float64(ms))
}
}
func (r *KubeOVNPlunger) deleteAllFor(db, cid string) {
// Cluster-level vecs (db,cid)
r.metrics.clusterQuorum.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
r.metrics.allAgree.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
r.metrics.membersExpected.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
r.metrics.membersObserved.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
r.metrics.ipsExpected.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
r.metrics.ipsObserved.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
r.metrics.excessMembers.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
r.metrics.missingMembers.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
r.metrics.unexpectedIPsCount.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
r.metrics.missingExpectedIPsCount.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
r.metrics.ipConflictsCount.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
r.metrics.sidAddrDisagreements.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
r.metrics.consensusMajoritySize.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
r.metrics.consensusMinoritySize.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
r.metrics.consensusDiffsTotal.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
// Sparse detail vecs (db,cid,*)
r.metrics.unexpectedIPGauge.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
r.metrics.missingExpectedIPGauge.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
r.metrics.ipConflictGauge.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
r.metrics.suspectStaleGauge.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
// Per-member vecs (db,cid,*)
r.metrics.memberConnected.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
r.metrics.memberLeader.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
r.metrics.memberLastMsgMs.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
r.metrics.memberIndex.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
r.metrics.memberIndexGap.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
r.metrics.memberReporter.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
r.metrics.memberMissingReporter.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
// Ops vecs (db,cid)
r.metrics.leaderTransitionsTotal.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
r.metrics.collectErrorsTotal.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
r.metrics.publishEventsTotal.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
r.metrics.snapshotTimestampSec.DeletePartialMatch(prometheus.Labels{"db": db, "cid": cid})
}

View File

@@ -1,31 +0,0 @@
package kubeovnplunger
import "github.com/cozystack/cozystack/pkg/ovnstatus"
func b2f(b bool) float64 {
if b {
return 1
}
return 0
}
// Pull a cluster UUID (cid) from any snapshots Local.CID (falls back to "")
func cidFromSnaps(snaps []ovnstatus.HealthSnapshot) string {
for _, s := range snaps {
if s.Local.CID != "" {
return s.Local.CID
}
}
return ""
}
// Map SID -> last local index to compute gaps (optional)
func leaderIndex(snaps []ovnstatus.HealthSnapshot, leaderSID string) (idx *int64) {
for _, s := range snaps {
if s.Local.SID == leaderSID && s.Local.Index > 0 {
v := s.Local.Index
return &v
}
}
return nil
}

View File

@@ -33,7 +33,6 @@ const requestedAt = "reconcile.fluxcd.io/requestedAt"
func (r *CozystackConfigReconciler) Reconcile(ctx context.Context, _ ctrl.Request) (ctrl.Result, error) {
log := log.FromContext(ctx)
time.Sleep(2 * time.Second)
digest, err := r.computeDigest(ctx)
if err != nil {
@@ -55,7 +54,6 @@ func (r *CozystackConfigReconciler) Reconcile(ctx context.Context, _ ctrl.Reques
if !isSystemApp && !isTenantRoot {
continue
}
patchTarget := hr.DeepCopy()
if hr.Annotations == nil {
hr.Annotations = map[string]string{}
@@ -64,12 +62,13 @@ func (r *CozystackConfigReconciler) Reconcile(ctx context.Context, _ ctrl.Reques
if hr.Annotations[digestAnnotation] == digest {
continue
}
patchTarget.Annotations[digestAnnotation] = digest
patchTarget.Annotations[forceReconcileKey] = now
patchTarget.Annotations[requestedAt] = now
patch := client.MergeFrom(hr.DeepCopy())
if err := r.Patch(ctx, patchTarget, patch); err != nil {
hr.Annotations[digestAnnotation] = digest
hr.Annotations[forceReconcileKey] = now
hr.Annotations[requestedAt] = now
if err := r.Patch(ctx, &hr, patch); err != nil {
log.Error(err, "failed to patch HelmRelease", "name", hr.Name, "namespace", hr.Namespace)
continue
}

View File

@@ -26,7 +26,6 @@ type TenantHelmReconciler struct {
func (r *TenantHelmReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logger := log.FromContext(ctx)
time.Sleep(2 * time.Second)
hr := &helmv2.HelmRelease{}
if err := r.Get(ctx, req.NamespacedName, hr); err != nil {

View File

@@ -3,11 +3,11 @@ package controller
import (
"context"
"strings"
"time"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
@@ -15,50 +15,63 @@ import (
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
)
const (
deletionRequeueDelay = 30 * time.Second
)
// WorkloadMonitorReconciler reconciles a WorkloadMonitor object
type WorkloadReconciler struct {
client.Client
Scheme *runtime.Scheme
}
// workload_controller.go
func (r *WorkloadReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := log.FromContext(ctx)
logger := log.FromContext(ctx)
w := &cozyv1alpha1.Workload{}
if err := r.Get(ctx, req.NamespacedName, w); err != nil {
err := r.Get(ctx, req.NamespacedName, w)
if err != nil {
if apierrors.IsNotFound(err) {
return ctrl.Result{}, nil
}
logger.Error(err, "Unable to fetch Workload")
return ctrl.Result{}, err
}
// it's being deleted, nothing to handle
if w.DeletionTimestamp != nil {
return ctrl.Result{}, nil
}
t := getMonitoredObject(w)
if t == nil {
err = r.Delete(ctx, w)
if err != nil {
logger.Error(err, "failed to delete workload")
}
return ctrl.Result{}, err
}
// If my monitor is gone, delete me.
monName, has := w.Labels["workloads.cozystack.io/monitor"]
if !has {
return ctrl.Result{}, client.IgnoreNotFound(r.Delete(ctx, w))
err = r.Get(ctx, types.NamespacedName{Name: t.GetName(), Namespace: t.GetNamespace()}, t)
// found object, nothing to do
if err == nil {
return ctrl.Result{}, nil
}
monitor := &cozyv1alpha1.WorkloadMonitor{}
if err := r.Get(ctx, client.ObjectKey{Namespace: w.Namespace, Name: monName}, monitor); apierrors.IsNotFound(err) {
// Monitor is gone → delete the Workload. Ignore NotFound here, too.
return ctrl.Result{}, client.IgnoreNotFound(r.Delete(ctx, w))
} else if err != nil {
// Some other error fetching the monitor
log.Error(err, "failed to get WorkloadMonitor", "monitor", monName)
// error getting object but not 404 -- requeue
if !apierrors.IsNotFound(err) {
logger.Error(err, "failed to get dependent object", "kind", t.GetObjectKind(), "dependent-object-name", t.GetName())
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
err = r.Delete(ctx, w)
if err != nil {
logger.Error(err, "failed to delete workload")
}
return ctrl.Result{}, err
}
// SetupWithManager registers our controller with the Manager and sets up watches.
func (r *WorkloadReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
// Watch Workload objects
// Watch WorkloadMonitor objects
For(&cozyv1alpha1.Workload{}).
Complete(r)
}

View File

@@ -1,17 +1,10 @@
package controller
import (
"context"
"testing"
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
func TestUnprefixedMonitoredObjectReturnsNil(t *testing.T) {
@@ -31,83 +24,3 @@ func TestPodMonitoredObject(t *testing.T) {
t.Errorf(`getMonitoredObject(&Workload{Name: "%s"}) == %v, want &Pod{Name: "mypod"}`, w.Name, obj)
}
}
func TestWorkloadReconciler_DeletesOnMissingMonitor(t *testing.T) {
scheme := runtime.NewScheme()
_ = cozyv1alpha1.AddToScheme(scheme)
_ = corev1.AddToScheme(scheme)
// Workload with a non-existent monitor
w := &cozyv1alpha1.Workload{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-foo",
Namespace: "default",
Labels: map[string]string{
"workloadmonitor.cozystack.io/name": "missing-monitor",
},
},
}
fakeClient := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(w).
Build()
reconciler := &WorkloadReconciler{Client: fakeClient}
req := reconcile.Request{NamespacedName: types.NamespacedName{Name: "pod-foo", Namespace: "default"}}
if _, err := reconciler.Reconcile(context.TODO(), req); err != nil {
t.Fatalf("Reconcile returned error: %v", err)
}
// Expect workload to be deleted
err := fakeClient.Get(context.TODO(), req.NamespacedName, &cozyv1alpha1.Workload{})
if !apierrors.IsNotFound(err) {
t.Errorf("expected workload to be deleted, got: %v", err)
}
}
func TestWorkloadReconciler_KeepsWhenAllExist(t *testing.T) {
scheme := runtime.NewScheme()
_ = cozyv1alpha1.AddToScheme(scheme)
_ = corev1.AddToScheme(scheme)
// Create a monitor and its backing Pod
monitor := &cozyv1alpha1.WorkloadMonitor{
ObjectMeta: metav1.ObjectMeta{
Name: "mon",
Namespace: "default",
},
}
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
},
}
w := &cozyv1alpha1.Workload{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-foo",
Namespace: "default",
Labels: map[string]string{
"workloadmonitor.cozystack.io/name": "mon",
},
},
}
fakeClient := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(monitor, pod, w).
Build()
reconciler := &WorkloadReconciler{Client: fakeClient}
req := reconcile.Request{NamespacedName: types.NamespacedName{Name: "pod-foo", Namespace: "default"}}
if _, err := reconciler.Reconcile(context.TODO(), req); err != nil {
t.Fatalf("Reconcile returned error: %v", err)
}
// Expect workload to remain
err := fakeClient.Get(context.TODO(), req.NamespacedName, &cozyv1alpha1.Workload{})
if err != nil {
t.Errorf("expected workload to persist, got error: %v", err)
}
}

View File

@@ -9,7 +9,6 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"
"k8s.io/utils/pointer"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -112,7 +111,6 @@ func (r *WorkloadMonitorReconciler) reconcileServiceForMonitor(
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("svc-%s", svc.Name),
Namespace: svc.Namespace,
Labels: make(map[string]string, len(svc.Labels)),
},
}
@@ -139,12 +137,9 @@ func (r *WorkloadMonitorReconciler) reconcileServiceForMonitor(
_, err := ctrl.CreateOrUpdate(ctx, r.Client, workload, func() error {
// Update owner references with the new monitor
updateOwnerReferences(workload.GetObjectMeta(), &svc)
updateOwnerReferences(workload.GetObjectMeta(), monitor)
for k, v := range svc.Labels {
workload.Labels[k] = v
}
workload.Labels["workloads.cozystack.io/monitor"] = monitor.Name
workload.Labels = svc.Labels
// Fill Workload status fields:
workload.Status.Kind = monitor.Spec.Kind
@@ -173,7 +168,6 @@ func (r *WorkloadMonitorReconciler) reconcilePVCForMonitor(
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("pvc-%s", pvc.Name),
Namespace: pvc.Namespace,
Labels: make(map[string]string, len(pvc.Labels)),
},
}
@@ -190,12 +184,9 @@ func (r *WorkloadMonitorReconciler) reconcilePVCForMonitor(
_, err := ctrl.CreateOrUpdate(ctx, r.Client, workload, func() error {
// Update owner references with the new monitor
updateOwnerReferences(workload.GetObjectMeta(), &pvc)
updateOwnerReferences(workload.GetObjectMeta(), monitor)
for k, v := range pvc.Labels {
workload.Labels[k] = v
}
workload.Labels["workloads.cozystack.io/monitor"] = monitor.Name
workload.Labels = pvc.Labels
// Fill Workload status fields:
workload.Status.Kind = monitor.Spec.Kind
@@ -257,19 +248,19 @@ func (r *WorkloadMonitorReconciler) reconcilePodForMonitor(
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("pod-%s", pod.Name),
Namespace: pod.Namespace,
Labels: make(map[string]string, len(pod.Labels)),
Labels: map[string]string{},
},
}
metaLabels := r.getWorkloadMetadata(&pod)
_, err := ctrl.CreateOrUpdate(ctx, r.Client, workload, func() error {
// Update owner references with the new monitor
updateOwnerReferences(workload.GetObjectMeta(), &pod)
updateOwnerReferences(workload.GetObjectMeta(), monitor)
// Copy labels from the Pod if needed
for k, v := range pod.Labels {
workload.Labels[k] = v
}
workload.Labels["workloads.cozystack.io/monitor"] = monitor.Name
// Add workload meta to labels
for k, v := range metaLabels {
@@ -379,24 +370,15 @@ func (r *WorkloadMonitorReconciler) Reconcile(ctx context.Context, req ctrl.Requ
monitor.Status.ObservedReplicas = observedReplicas
monitor.Status.AvailableReplicas = availableReplicas
// Update the WorkloadMonitor status in the cluster
err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {
fresh := &cozyv1alpha1.WorkloadMonitor{}
if err := r.Get(ctx, req.NamespacedName, fresh); err != nil {
return err
}
fresh.Status.ObservedReplicas = observedReplicas
fresh.Status.AvailableReplicas = availableReplicas
// Default to operational = true, but check MinReplicas if set
monitor.Status.Operational = pointer.Bool(true)
if monitor.Spec.MinReplicas != nil && availableReplicas < *monitor.Spec.MinReplicas {
monitor.Status.Operational = pointer.Bool(false)
}
// Default to operational = true, but check MinReplicas if set
monitor.Status.Operational = pointer.Bool(true)
if monitor.Spec.MinReplicas != nil && availableReplicas < *monitor.Spec.MinReplicas {
monitor.Status.Operational = pointer.Bool(false)
}
return r.Status().Update(ctx, fresh)
})
if err != nil {
logger.Error(err, "unable to update WorkloadMonitor status after retries")
// Update the WorkloadMonitor status in the cluster
if err := r.Status().Update(ctx, monitor); err != nil {
logger.Error(err, "Unable to update WorkloadMonitor status", "monitor", monitor.Name)
return ctrl.Result{}, err
}

View File

@@ -1,99 +0,0 @@
package crdmem
import (
"context"
"sync"
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)
type Memory struct {
mu sync.RWMutex
data map[string]cozyv1alpha1.CozystackResourceDefinition
primed bool
primeOnce sync.Once
}
func New() *Memory {
return &Memory{data: make(map[string]cozyv1alpha1.CozystackResourceDefinition)}
}
var (
global *Memory
globalOnce sync.Once
)
func Global() *Memory {
globalOnce.Do(func() { global = New() })
return global
}
func (m *Memory) Upsert(obj *cozyv1alpha1.CozystackResourceDefinition) {
if obj == nil {
return
}
m.mu.Lock()
m.data[obj.Name] = *obj.DeepCopy()
m.mu.Unlock()
}
func (m *Memory) Delete(name string) {
m.mu.Lock()
delete(m.data, name)
m.mu.Unlock()
}
func (m *Memory) Snapshot() []cozyv1alpha1.CozystackResourceDefinition {
m.mu.RLock()
defer m.mu.RUnlock()
out := make([]cozyv1alpha1.CozystackResourceDefinition, 0, len(m.data))
for _, v := range m.data {
out = append(out, v)
}
return out
}
func (m *Memory) IsPrimed() bool {
m.mu.RLock()
defer m.mu.RUnlock()
return m.primed
}
type runnable func(context.Context) error
func (r runnable) Start(ctx context.Context) error { return r(ctx) }
func (m *Memory) EnsurePrimingWithManager(mgr ctrl.Manager) error {
var errOut error
m.primeOnce.Do(func() {
errOut = mgr.Add(runnable(func(ctx context.Context) error {
if ok := mgr.GetCache().WaitForCacheSync(ctx); !ok {
return nil
}
var list cozyv1alpha1.CozystackResourceDefinitionList
if err := mgr.GetClient().List(ctx, &list); err == nil {
for i := range list.Items {
m.Upsert(&list.Items[i])
}
m.mu.Lock()
m.primed = true
m.mu.Unlock()
}
return nil
}))
})
return errOut
}
func (m *Memory) ListFromCacheOrAPI(ctx context.Context, c client.Client) ([]cozyv1alpha1.CozystackResourceDefinition, error) {
if m.IsPrimed() {
return m.Snapshot(), nil
}
var list cozyv1alpha1.CozystackResourceDefinitionList
if err := c.List(ctx, &list); err != nil {
return nil, err
}
return list.Items, nil
}

View File

@@ -1,293 +0,0 @@
// Package sse provides a tiny Server-Sent Events server with pluggable routes.
// No external deps; safe for quick demos and small dashboards.
package sse
import (
"context"
"fmt"
"html/template"
"log"
"net/http"
"strings"
"sync"
"time"
)
// Options configures the SSE server.
type Options struct {
// Addr is the listening address, e.g. ":8080" or "127.0.0.1:0".
Addr string
// IndexPath is the path serving a minimal live HTML page ("" to disable).
// e.g. "/" or "/status"
IndexPath string
// StreamPath is the SSE endpoint path, e.g. "/stream".
StreamPath string
// Title for the index page (cosmetic).
Title string
// AllowCORS, if true, sets Access-Control-Allow-Origin: * for /stream.
AllowCORS bool
// ClientBuf is the per-client buffered message queue size.
// If 0, defaults to 16. When full, new messages are dropped for that client.
ClientBuf int
// Heartbeat sends a comment line every interval to keep connections alive.
// If 0, defaults to 25s.
Heartbeat time.Duration
// Logger (optional). If nil, log.Printf is used.
Logger *log.Logger
}
// Server is a simple SSE broadcaster.
type Server struct {
opts Options
mux *http.ServeMux
http *http.Server
clientsMu sync.RWMutex
clients map[*client]struct{}
// latest holds the most recent payload (sent to new clients on connect).
latestMu sync.RWMutex
latest string
}
type client struct {
ch chan string
closeCh chan struct{}
flusher http.Flusher
w http.ResponseWriter
req *http.Request
logf func(string, ...any)
heartbeat time.Duration
}
func New(opts Options) *Server {
if opts.ClientBuf <= 0 {
opts.ClientBuf = 16
}
if opts.Heartbeat <= 0 {
opts.Heartbeat = 25 * time.Second
}
if opts.Addr == "" {
opts.Addr = ":8080"
}
if opts.StreamPath == "" {
opts.StreamPath = "/stream"
}
if opts.IndexPath == "" {
opts.IndexPath = "/"
}
s := &Server{
opts: opts,
mux: http.NewServeMux(),
clients: make(map[*client]struct{}),
}
s.routes()
s.http = &http.Server{
Addr: opts.Addr,
Handler: s.mux,
ReadHeaderTimeout: 10 * time.Second,
}
return s
}
func (s *Server) routes() {
if s.opts.IndexPath != "" {
s.mux.HandleFunc(s.opts.IndexPath, s.handleIndex)
}
s.mux.HandleFunc(s.opts.StreamPath, s.handleStream)
}
func (s *Server) logf(format string, args ...any) {
if s.opts.Logger != nil {
s.opts.Logger.Printf(format, args...)
} else {
log.Printf(format, args...)
}
}
// ListenAndServe starts the HTTP server (blocking).
func (s *Server) ListenAndServe() error {
s.logf("sse: listening on http://%s (index=%s, stream=%s)", s.http.Addr, s.opts.IndexPath, s.opts.StreamPath)
return s.http.ListenAndServe()
}
// Shutdown gracefully stops the server.
func (s *Server) Shutdown(ctx context.Context) error {
s.clientsMu.Lock()
for c := range s.clients {
close(c.closeCh)
}
s.clientsMu.Unlock()
return s.http.Shutdown(ctx)
}
// Publish broadcasts a new payload to all clients and stores it as latest.
func (s *Server) Publish(payload string) {
// Store latest
s.latestMu.Lock()
s.latest = payload
s.latestMu.Unlock()
// Broadcast
s.clientsMu.RLock()
defer s.clientsMu.RUnlock()
for c := range s.clients {
select {
case c.ch <- payload:
default:
// Drop if client is slow (buffer full)
if s.opts.Logger != nil {
s.opts.Logger.Printf("sse: dropping message to slow client %p", c)
}
}
}
}
func (s *Server) handleIndex(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/html; charset=utf-8")
page := indexTemplate(s.opts.Title, s.opts.StreamPath)
_, _ = w.Write([]byte(page))
}
func (s *Server) handleStream(w http.ResponseWriter, r *http.Request) {
// Required SSE headers
if s.opts.AllowCORS {
w.Header().Set("Access-Control-Allow-Origin", "*")
}
w.Header().Set("Content-Type", "text/event-stream")
w.Header().Set("Cache-Control", "no-cache")
w.Header().Set("Connection", "keep-alive")
flusher, ok := w.(http.Flusher)
if !ok {
http.Error(w, "streaming unsupported", http.StatusInternalServerError)
return
}
c := &client{
ch: make(chan string, s.opts.ClientBuf),
closeCh: make(chan struct{}),
flusher: flusher,
w: w,
req: r,
logf: s.logf,
heartbeat: s.opts.Heartbeat,
}
// Register client
s.clientsMu.Lock()
s.clients[c] = struct{}{}
s.clientsMu.Unlock()
// Initial comment to open the stream for some proxies
fmt.Fprintf(w, ": connected %s\n\n", time.Now().Format(time.RFC3339))
flusher.Flush()
// Send latest if any
s.latestMu.RLock()
latest := s.latest
s.latestMu.RUnlock()
if latest != "" {
writeSSE(w, latest)
flusher.Flush()
}
// Start pump
go c.pump()
// Block until client disconnects
<-r.Context().Done()
// Unregister client
close(c.closeCh)
s.clientsMu.Lock()
delete(s.clients, c)
s.clientsMu.Unlock()
}
func (c *client) pump() {
t := time.NewTicker(c.heartbeat)
defer t.Stop()
for {
select {
case <-c.closeCh:
return
case msg := <-c.ch:
writeSSE(c.w, msg)
c.flusher.Flush()
case <-t.C:
// heartbeat comment (keeps connections alive through proxies)
fmt.Fprint(c.w, ": hb\n\n")
c.flusher.Flush()
}
}
}
func writeSSE(w http.ResponseWriter, msg string) {
// Split on lines; each needs its own "data:" field per the SSE spec
lines := strings.Split(strings.TrimRight(msg, "\n"), "\n")
for _, ln := range lines {
fmt.Fprintf(w, "data: %s\n", ln)
}
fmt.Fprint(w, "\n")
}
// Minimal index page with live updates
func indexTemplate(title, streamPath string) string {
if title == "" {
title = "SSE Stream"
}
if streamPath == "" {
streamPath = "/stream"
}
const tpl = `<!doctype html>
<html>
<head>
<meta charset="utf-8" />
<title>{{.Title}}</title>
<style>
body { font-family: system-ui, sans-serif; margin: 2rem; }
pre { background:#111; color:#eee; padding:1rem; border-radius:12px; white-space:pre-wrap;}
.status { margin-bottom: 1rem; }
</style>
</head>
<body>
<h1>{{.Title}}</h1>
<div class="status">Connecting…</div>
<pre id="out"></pre>
<script>
const statusEl = document.querySelector('.status');
const out = document.getElementById('out');
const es = new EventSource('{{.Stream}}');
es.onmessage = (e) => {
// Replace content with the latest full snapshot
if (e.data === "") return;
// We accumulate until a blank 'data:' terminator; simpler approach: reset on first line.
// For this demo, server always sends full content in one event, so just overwrite.
out.textContent = (out._acc ?? "") + e.data + "\n";
};
es.addEventListener('open', () => { statusEl.textContent = "Connected"; out._acc = ""; });
es.addEventListener('error', () => { statusEl.textContent = "Disconnected (browser will retry)…"; out._acc = ""; });
// Optional: keep the latest only per message
es.onmessage = (e) => {
out.textContent = e.data + "\n";
statusEl.textContent = "Connected";
};
</script>
</body>
</html>`
page, _ := template.New("idx").Parse(tpl)
var b strings.Builder
_ = page.Execute(&b, map[string]any{
"Title": title,
"Stream": streamPath,
})
return b.String()
}

View File

@@ -4,5 +4,6 @@
cd packages/core/installer
make image-cozystack REGISTRY=YOUR_CUSTOM_REGISTRY
make apply
kubectl delete pod dashboard-redis-master-0 -n cozy-dashboard
kubectl delete po -l app=source-controller -n cozy-fluxcd
```

View File

@@ -1,5 +1,4 @@
include ../../../scripts/package.mk
generate:
cozyvalues-gen -v values.yaml -s values.schema.json -r README.md
yq -o json -i '.properties = {}' values.schema.json
readme-generator -v values.yaml -s values.schema.json -r README.md

View File

@@ -1,5 +1,5 @@
{
"title": "Chart Values",
"type": "object",
"properties": {}
}
"title": "Chart Values",
"type": "object",
"properties": {}
}

View File

@@ -16,7 +16,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.13.0
version: 0.11.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to

View File

@@ -4,15 +4,21 @@ include ../../../scripts/common-envs.mk
include ../../../scripts/package.mk
generate:
cozyvalues-gen -v values.yaml -s values.schema.json -r README.md
readme-generator -v values.yaml -s values.schema.json -r README.md
yq -i -o json --indent 4 '.properties.resourcesPreset.enum = ["none", "nano", "micro", "small", "medium", "large", "xlarge", "2xlarge"]' values.schema.json
image:
docker buildx build images/clickhouse-backup \
--provenance false \
--builder=$(BUILDER) \
--platform=$(PLATFORM) \
--tag $(REGISTRY)/clickhouse-backup:$(call settag,$(CLICKHOUSE_BACKUP_TAG)) \
--cache-from type=registry,ref=$(REGISTRY)/clickhouse-backup:latest \
--cache-to type=inline \
--metadata-file images/clickhouse-backup.json \
$(BUILDX_ARGS)
--push=$(PUSH) \
--label "org.opencontainers.image.source=https://github.com/cozystack/cozystack" \
--load=$(LOAD)
echo "$(REGISTRY)/clickhouse-backup:$(call settag,$(CLICKHOUSE_BACKUP_TAG))@$$(yq e '."containerimage.digest"' images/clickhouse-backup.json -o json -r)" \
> images/clickhouse-backup.tag
rm -f images/clickhouse-backup.json

View File

@@ -23,54 +23,35 @@ For more details, read [Restic: Effective Backup from Stdin](https://blog.aenix.
### Common parameters
| Name | Description | Type | Value |
| ------------------ | ----------------------------------------------------------------------------------------------------------------------------------------- | ----------- | ------- |
| `replicas` | Number of Clickhouse replicas | `int` | `2` |
| `shards` | Number of Clickhouse shards | `int` | `1` |
| `resources` | Explicit CPU and memory configuration for each Clickhouse replica. When left empty, the preset defined in `resourcesPreset` is applied. | `*object` | `null` |
| `resources.cpu` | CPU available to each replica | `*quantity` | `null` |
| `resources.memory` | Memory (RAM) available to each replica | `*quantity` | `null` |
| `resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: `nano`, `micro`, `small`, `medium`, `large`, `xlarge`, `2xlarge`. | `string` | `small` |
| `size` | Persistent Volume Claim size, available for application data | `quantity` | `10Gi` |
| `storageClass` | StorageClass used to store the data | `string` | `""` |
| Name | Description | Value |
| ---------------- | -------------------------------------------------------- | ------ |
| `size` | Size of Persistent Volume for data | `10Gi` |
| `logStorageSize` | Size of Persistent Volume for logs | `2Gi` |
| `shards` | Number of Clickhouse shards | `1` |
| `replicas` | Number of Clickhouse replicas | `2` |
| `storageClass` | StorageClass used to store the data | `""` |
| `logTTL` | TTL (expiration time) for query_log and query_thread_log | `15` |
### Configuration parameters
### Application-specific parameters
| Name | Description | Type | Value |
| ---------------------- | ------------------------------------------------------------ | ------------------- | ------- |
| `logStorageSize` | Size of Persistent Volume for logs | `quantity` | `2Gi` |
| `logTTL` | TTL (expiration time) for `query_log` and `query_thread_log` | `int` | `15` |
| `users` | Users configuration | `map[string]object` | `{...}` |
| `users[name].password` | Password for the user | `*string` | `null` |
| `users[name].readonly` | User is `readonly`, default is `false`. | `*bool` | `null` |
| Name | Description | Value |
| ------- | ------------------- | ----- |
| `users` | Users configuration | `{}` |
### Backup parameters
| Name | Description | Type | Value |
| ------------------------ | ---------------------------------------------- | -------- | ------------------------------------------------------ |
| `backup` | Backup configuration | `object` | `{}` |
| `backup.enabled` | Enable regular backups, default is `false` | `bool` | `false` |
| `backup.s3Region` | AWS S3 region where backups are stored | `string` | `us-east-1` |
| `backup.s3Bucket` | S3 bucket used for storing backups | `string` | `s3.example.org/clickhouse-backups` |
| `backup.schedule` | Cron schedule for automated backups | `string` | `0 2 * * *` |
| `backup.cleanupStrategy` | Retention strategy for cleaning up old backups | `string` | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
| `backup.s3AccessKey` | Access key for S3, used for authentication | `string` | `<your-access-key>` |
| `backup.s3SecretKey` | Secret key for S3, used for authentication | `string` | `<your-secret-key>` |
| `backup.resticPassword` | Password for Restic backup encryption | `string` | `<password>` |
### Clickhouse Keeper parameters
| Name | Description | Type | Value |
| ---------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | ----------- | ------- |
| `clickhouseKeeper` | Clickhouse Keeper configuration | `*object` | `null` |
| `clickhouseKeeper.enabled` | Deploy ClickHouse Keeper for cluster coordination | `*bool` | `true` |
| `clickhouseKeeper.size` | Persistent Volume Claim size, available for application data | `*quantity` | `1Gi` |
| `clickhouseKeeper.resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: `nano`, `micro`, `small`, `medium`, `large`, `xlarge`, `2xlarge`. | `string` | `micro` |
| `clickhouseKeeper.replicas` | Number of Keeper replicas | `*int` | `3` |
| Name | Description | Value |
| ------------------------ | --------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ |
| `backup.enabled` | Enable periodic backups | `false` |
| `backup.s3Region` | AWS S3 region where backups are stored | `us-east-1` |
| `backup.s3Bucket` | S3 bucket used for storing backups | `s3.example.org/clickhouse-backups` |
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
| `backup.cleanupStrategy` | Retention strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
| `backup.s3AccessKey` | Access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
| `backup.s3SecretKey` | Secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
| `backup.resticPassword` | Password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
| `resources` | Explicit CPU and memory configuration for each ClickHouse replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
| `resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `small` |
## Parameter examples and reference
@@ -94,6 +75,6 @@ This setting is ignored if the corresponding `resources` value is set.
| `micro` | `500m` | `256Mi` |
| `small` | `1` | `512Mi` |
| `medium` | `1` | `1Gi` |
| `large` | `2` | `2Gi` |
| `large` | `3` | `2Gi` |
| `xlarge` | `4` | `4Gi` |
| `2xlarge` | `8` | `8Gi` |

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/clickhouse-backup:0.13.0@sha256:3faf7a4cebf390b9053763107482de175aa0fdb88c1e77424fd81100b1c3a205
ghcr.io/cozystack/cozystack/clickhouse-backup:0.11.0@sha256:3faf7a4cebf390b9053763107482de175aa0fdb88c1e77424fd81100b1c3a205

View File

@@ -1,96 +0,0 @@
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
{{- $clusterDomain := (index $cozyConfig.data "cluster-domain") | default "cozy.local" }}
{{- if .Values.clickhouseKeeper.enabled }}
apiVersion: "clickhouse-keeper.altinity.com/v1"
kind: "ClickHouseKeeperInstallation"
metadata:
name: "{{ .Release.Name }}-keeper"
annotations:
prometheus.io/port: "7000"
prometheus.io/scrape: "true"
spec:
namespaceDomainPattern: "%s.svc.{{ $clusterDomain }}"
configuration:
clusters:
- name: "cluster1"
layout:
replicasCount: {{ .Values.clickhouseKeeper.replicas }}
settings:
logger/level: "trace"
logger/console: "true"
listen_host: "0.0.0.0"
keeper_server/four_letter_word_white_list: "*"
keeper_server/coordination_settings/raft_logs_level: "information"
prometheus/endpoint: "/metrics"
prometheus/port: "7000"
prometheus/metrics: "true"
prometheus/events: "true"
prometheus/asynchronous_metrics: "true"
prometheus/status_info: "false"
defaults:
templates:
# Templates are specified as default for all clusters
podTemplate: default
dataVolumeClaimTemplate: default
templates:
podTemplates:
- name: default
metadata:
labels:
app: "{{ .Release.Name }}-keeper"
annotations:
prometheus.io/port: "7000"
prometheus.io/scrape: "true"
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- "{{ .Release.Name }}-keeper"
topologyKey: "kubernetes.io/hostname"
containers:
- name: clickhouse-keeper
imagePullPolicy: IfNotPresent
image: clickhouse/clickhouse-keeper:24.9.2.42
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.clickhouseKeeper.resourcesPreset .Values.resources $) | nindent 20 }}
securityContext:
fsGroup: 101
volumeClaimTemplates:
- name: default
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "{{ .Values.clickhouseKeeper.size }}"
---
apiVersion: operator.victoriametrics.com/v1beta1
kind: VMPodScrape
metadata:
name: {{ .Release.Name }}-keeper
namespace: {{ .Release.Namespace }}
spec:
selector:
matchLabels:
app: {{ .Release.Name }}-keeper
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
podMetricsEndpoints:
- port: metrics
path: /metrics
interval: 30s
scheme: http
relabelConfigs:
- action: replace
sourceLabels: [__meta_kubernetes_pod_node_name]
targetLabel: instance
{{- end }}

View File

@@ -91,18 +91,6 @@ spec:
layout:
shardsCount: {{ .Values.shards }}
replicasCount: {{ .Values.replicas }}
{{- if .Values.clickhouseKeeper.enabled }}
zookeeper:
nodes:
{{- $replicas := int .Values.clickhouseKeeper.replicas }}
{{- $release := .Release.Name }}
{{- $namespace := .Release.Namespace }}
{{- $clusterDomain := .Values.clusterDomain }}
{{- range $i := until $replicas }}
- host: "chk-{{ $release }}-keeper-cluster1-0-{{ $i }}.{{ $namespace }}.svc.{{ $clusterDomain }}"
port: 2181
{{- end }}
{{- end }}
templates:
volumeClaimTemplates:
- name: data-volume-template

View File

@@ -23,9 +23,6 @@ rules:
- workloadmonitors
resourceNames:
- {{ .Release.Name }}
{{- if .Values.clickhouseKeeper.enabled }}
- {{ .Release.Name }}-keeper
{{- end }}
verbs: ["get", "list", "watch"]
---
kind: RoleBinding

View File

@@ -11,18 +11,3 @@ spec:
selector:
app.kubernetes.io/instance: {{ $.Release.Name }}
version: {{ $.Chart.Version }}
{{- if .Values.clickhouseKeeper.enabled }}
---
apiVersion: cozystack.io/v1alpha1
kind: WorkloadMonitor
metadata:
name: {{ $.Release.Name }}-keeper
spec:
replicas: {{ .Values.clickhouseKeeper.replicas }}
minReplicas: 1
kind: clickhouse
type: clickhouse
selector:
app: {{ $.Release.Name }}-keeper
version: {{ $.Chart.Version }}
{{- end }}

View File

@@ -1,223 +1,101 @@
{
"title": "Chart Values",
"type": "object",
"properties": {
"backup": {
"description": "Backup configuration",
"type": "object",
"default": {},
"required": [
"cleanupStrategy",
"enabled",
"resticPassword",
"s3AccessKey",
"s3Bucket",
"s3Region",
"s3SecretKey",
"schedule"
],
"properties": {
"cleanupStrategy": {
"description": "Retention strategy for cleaning up old backups",
"type": "string",
"default": "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
"title": "Chart Values",
"type": "object",
"properties": {
"size": {
"type": "string",
"description": "Size of Persistent Volume for data",
"default": "10Gi"
},
"enabled": {
"description": "Enable regular backups, default is `false`",
"type": "boolean",
"default": false
"logStorageSize": {
"type": "string",
"description": "Size of Persistent Volume for logs",
"default": "2Gi"
},
"resticPassword": {
"description": "Password for Restic backup encryption",
"type": "string",
"default": "\u003cpassword\u003e"
},
"s3AccessKey": {
"description": "Access key for S3, used for authentication",
"type": "string",
"default": "\u003cyour-access-key\u003e"
},
"s3Bucket": {
"description": "S3 bucket used for storing backups",
"type": "string",
"default": "s3.example.org/clickhouse-backups"
},
"s3Region": {
"description": "AWS S3 region where backups are stored",
"type": "string",
"default": "us-east-1"
},
"s3SecretKey": {
"description": "Secret key for S3, used for authentication",
"type": "string",
"default": "\u003cyour-secret-key\u003e"
},
"schedule": {
"description": "Cron schedule for automated backups",
"type": "string",
"default": "0 2 * * *"
}
}
},
"clickhouseKeeper": {
"description": "Clickhouse Keeper configuration",
"type": "object",
"default": {},
"required": [
"resourcesPreset"
],
"properties": {
"enabled": {
"description": "Deploy ClickHouse Keeper for cluster coordination",
"type": "boolean",
"default": true
"shards": {
"type": "number",
"description": "Number of Clickhouse shards",
"default": 1
},
"replicas": {
"description": "Number of Keeper replicas",
"type": "integer",
"default": 3
"type": "number",
"description": "Number of Clickhouse replicas",
"default": 2
},
"storageClass": {
"type": "string",
"description": "StorageClass used to store the data",
"default": ""
},
"logTTL": {
"type": "number",
"description": "TTL (expiration time) for query_log and query_thread_log",
"default": 15
},
"backup": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean",
"description": "Enable periodic backups",
"default": false
},
"s3Region": {
"type": "string",
"description": "AWS S3 region where backups are stored",
"default": "us-east-1"
},
"s3Bucket": {
"type": "string",
"description": "S3 bucket used for storing backups",
"default": "s3.example.org/clickhouse-backups"
},
"schedule": {
"type": "string",
"description": "Cron schedule for automated backups",
"default": "0 2 * * *"
},
"cleanupStrategy": {
"type": "string",
"description": "Retention strategy for cleaning up old backups",
"default": "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
},
"s3AccessKey": {
"type": "string",
"description": "Access key for S3, used for authentication",
"default": "oobaiRus9pah8PhohL1ThaeTa4UVa7gu"
},
"s3SecretKey": {
"type": "string",
"description": "Secret key for S3, used for authentication",
"default": "ju3eum4dekeich9ahM1te8waeGai0oog"
},
"resticPassword": {
"type": "string",
"description": "Password for Restic backup encryption",
"default": "ChaXoveekoh6eigh4siesheeda2quai0"
}
}
},
"resources": {
"type": "object",
"description": "Explicit CPU and memory configuration for each ClickHouse replica. When left empty, the preset defined in `resourcesPreset` is applied.",
"default": {}
},
"resourcesPreset": {
"description": "Default sizing preset used when `resources` is omitted. Allowed values: `nano`, `micro`, `small`, `medium`, `large`, `xlarge`, `2xlarge`.",
"type": "string",
"default": "micro",
"enum": [
"nano",
"micro",
"small",
"medium",
"large",
"xlarge",
"2xlarge"
]
},
"size": {
"description": "Persistent Volume Claim size, available for application data",
"default": "1Gi",
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
"anyOf": [
{
"type": "integer"
},
{
"type": "string"
}
],
"x-kubernetes-int-or-string": true
"type": "string",
"description": "Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.",
"default": "small",
"enum": [
"none",
"nano",
"micro",
"small",
"medium",
"large",
"xlarge",
"2xlarge"
]
}
}
},
"logStorageSize": {
"description": "Size of Persistent Volume for logs",
"default": "2Gi",
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
"anyOf": [
{
"type": "integer"
},
{
"type": "string"
}
],
"x-kubernetes-int-or-string": true
},
"logTTL": {
"description": "TTL (expiration time) for `query_log` and `query_thread_log`",
"type": "integer",
"default": 15
},
"replicas": {
"description": "Number of Clickhouse replicas",
"type": "integer",
"default": 2
},
"resources": {
"description": "Explicit CPU and memory configuration for each Clickhouse replica. When left empty, the preset defined in `resourcesPreset` is applied.",
"type": "object",
"default": {},
"properties": {
"cpu": {
"description": "CPU available to each replica",
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
"anyOf": [
{
"type": "integer"
},
{
"type": "string"
}
],
"x-kubernetes-int-or-string": true
},
"memory": {
"description": "Memory (RAM) available to each replica",
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
"anyOf": [
{
"type": "integer"
},
{
"type": "string"
}
],
"x-kubernetes-int-or-string": true
}
}
},
"resourcesPreset": {
"description": "Default sizing preset used when `resources` is omitted. Allowed values: `nano`, `micro`, `small`, `medium`, `large`, `xlarge`, `2xlarge`.",
"type": "string",
"default": "small",
"enum": [
"nano",
"micro",
"small",
"medium",
"large",
"xlarge",
"2xlarge"
]
},
"shards": {
"description": "Number of Clickhouse shards",
"type": "integer",
"default": 1
},
"size": {
"description": "Persistent Volume Claim size, available for application data",
"default": "10Gi",
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
"anyOf": [
{
"type": "integer"
},
{
"type": "string"
}
],
"x-kubernetes-int-or-string": true
},
"storageClass": {
"description": "StorageClass used to store the data",
"type": "string"
},
"users": {
"description": "Users configuration",
"type": "object",
"default": {},
"additionalProperties": {
"type": "object",
"properties": {
"password": {
"description": "Password for the user",
"type": "string"
},
"readonly": {
"description": "User is `readonly`, default is `false`.",
"type": "boolean"
}
}
}
}
}
}
}

View File

@@ -1,36 +1,22 @@
## @section Common parameters
## @param size Size of Persistent Volume for data
## @param logStorageSize Size of Persistent Volume for logs
## @param shards Number of Clickhouse shards
## @param replicas Number of Clickhouse replicas
## @param storageClass StorageClass used to store the data
## @param logTTL TTL (expiration time) for query_log and query_thread_log
##
## @param replicas {int} Number of Clickhouse replicas
replicas: 2
## @param shards {int} Number of Clickhouse shards
shards: 1
## @param resources {*resources} Explicit CPU and memory configuration for each Clickhouse replica. When left empty, the preset defined in `resourcesPreset` is applied.
## @field resources.cpu {*quantity} CPU available to each replica
## @field resources.memory {*quantity} Memory (RAM) available to each replica
# resources:
# cpu: 4000m
# memory: 4Gi
resources: {}
## @param resourcesPreset {string enum:"nano,micro,small,medium,large,xlarge,2xlarge"} Default sizing preset used when `resources` is omitted. Allowed values: `nano`, `micro`, `small`, `medium`, `large`, `xlarge`, `2xlarge`.
resourcesPreset: "small"
## @param size {quantity} Persistent Volume Claim size, available for application data
size: 10Gi
## @param storageClass {string} StorageClass used to store the data
storageClass: ""
## @section Application-specific parameters
##
## @param logStorageSize {quantity} Size of Persistent Volume for logs
logStorageSize: 2Gi
## @param logTTL {int} TTL (expiration time) for `query_log` and `query_thread_log`
shards: 1
replicas: 2
storageClass: ""
logTTL: 15
## @param users {map[string]user} Users configuration
## @field user.password {*string} Password for the user
## @field user.readonly {*bool} User is `readonly`, default is `false`.
## @section Configuration parameters
## @param users [object] Users configuration
## Example:
## users:
## user1:
@@ -41,37 +27,31 @@ logTTL: 15
##
users: {}
## @section Backup parameters
## @param backup {backup} Backup configuration
## @field backup.enabled {bool} Enable regular backups, default is `false`
## @field backup.s3Region {string} AWS S3 region where backups are stored
## @field backup.s3Bucket {string} S3 bucket used for storing backups
## @field backup.schedule {string} Cron schedule for automated backups
## @field backup.cleanupStrategy {string} Retention strategy for cleaning up old backups
## @field backup.s3AccessKey {string} Access key for S3, used for authentication
## @field backup.s3SecretKey {string} Secret key for S3, used for authentication
## @field backup.resticPassword {string} Password for Restic backup encryption
## @param backup.enabled Enable periodic backups
## @param backup.s3Region AWS S3 region where backups are stored
## @param backup.s3Bucket S3 bucket used for storing backups
## @param backup.schedule Cron schedule for automated backups
## @param backup.cleanupStrategy Retention strategy for cleaning up old backups
## @param backup.s3AccessKey Access key for S3, used for authentication
## @param backup.s3SecretKey Secret key for S3, used for authentication
## @param backup.resticPassword Password for Restic backup encryption
backup:
enabled: false
s3Region: us-east-1
s3Bucket: "s3.example.org/clickhouse-backups"
s3Bucket: s3.example.org/clickhouse-backups
schedule: "0 2 * * *"
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
s3AccessKey: "<your-access-key>"
s3SecretKey: "<your-secret-key>"
resticPassword: "<password>"
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
## @param resources Explicit CPU and memory configuration for each ClickHouse replica. When left empty, the preset defined in `resourcesPreset` is applied.
resources: {}
# resources:
# cpu: 4000m
# memory: 4Gi
## @section Clickhouse Keeper parameters
## @param clickhouseKeeper {*clickhouseKeeper} Clickhouse Keeper configuration
## @field clickhouseKeeper.enabled {*bool} Deploy ClickHouse Keeper for cluster coordination
## @field clickhouseKeeper.size {*quantity} Persistent Volume Claim size, available for application data
## @field clickhouseKeeper.resourcesPreset {string enum:"nano,micro,small,medium,large,xlarge,2xlarge"} Default sizing preset used when `resources` is omitted. Allowed values: `nano`, `micro`, `small`, `medium`, `large`, `xlarge`, `2xlarge`.
## @field clickhouseKeeper.replicas {*int} Number of Keeper replicas
clickhouseKeeper:
enabled: true
size: 1Gi
resourcesPreset: micro
replicas: 3
## @param resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.
resourcesPreset: "small"

View File

@@ -16,10 +16,10 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 1.1.0
version: 0.8.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: 2.4.0
appVersion: "1.24.0"

View File

@@ -1,11 +1,5 @@
include ../../../scripts/package.mk
generate:
cozyvalues-gen -v values.yaml -s values.schema.json -r README.md
update:
tag=$$(git ls-remote --tags --sort="v:refname" https://github.com/FerretDB/FerretDB | awk -F'[/^]' '{sub("^v", "", $$3)} END{print $$3}') && \
pgtag=$$(skopeo list-tags docker://ghcr.io/ferretdb/postgres-documentdb | jq -r --arg tag "$$tag" '.Tags[] | select(endswith("ferretdb-" + $$tag))' | sort -V | tail -n1) && \
sed -i "s|\(imageName: ghcr.io/ferretdb/postgres-documentdb:\).*|\1$$pgtag|" templates/postgres.yaml && \
sed -i "s|\(image: ghcr.io/ferretdb/ferretdb:\).*|\1$$tag|" templates/ferretdb.yaml && \
sed -i "s|\(appVersion: \).*|\1$$tag|" Chart.yaml
readme-generator -v values.yaml -s values.schema.json -r README.md
yq -i -o json --indent 4 '.properties.resourcesPreset.enum = ["none", "nano", "micro", "small", "medium", "large", "xlarge", "2xlarge"]' values.schema.json

View File

@@ -8,52 +8,37 @@ Internally, FerretDB service is backed by Postgres.
### Common parameters
| Name | Description | Type | Value |
| ------------------ | ----------------------------------------------------------------------------------------------------------------------------------------- | ----------- | ------- |
| `replicas` | Number of replicas | `int` | `2` |
| `resources` | Explicit CPU and memory configuration for each FerretDB replica. When left empty, the preset defined in `resourcesPreset` is applied. | `*object` | `null` |
| `resources.cpu` | CPU available to each replica | `*quantity` | `null` |
| `resources.memory` | Memory (RAM) available to each replica | `*quantity` | `null` |
| `resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: `nano`, `micro`, `small`, `medium`, `large`, `xlarge`, `2xlarge`. | `string` | `micro` |
| `size` | Persistent Volume Claim size, available for application data | `quantity` | `10Gi` |
| `storageClass` | StorageClass used to store the data | `string` | `""` |
| `external` | Enable external access from outside the cluster | `bool` | `false` |
| Name | Description | Value |
| ------------------------ | --------------------------------------------------------------------------------------------------------------------------- | ------- |
| `external` | Enable external access from outside the cluster | `false` |
| `size` | Persistent Volume size | `10Gi` |
| `replicas` | Number of replicas | `2` |
| `storageClass` | StorageClass used to store the data | `""` |
| `quorum.minSyncReplicas` | Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed | `0` |
| `quorum.maxSyncReplicas` | Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the total number of replicas) | `0` |
### Configuration parameters
### Application-specific parameters
| Name | Description | Type | Value |
| ------------------------ | --------------------------------------------------------------------------------------------------------------------------- | ------------------- | ------- |
| `quorum` | Configuration for the quorum-based synchronous replication | `object` | `{}` |
| `quorum.minSyncReplicas` | Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed | `int` | `0` |
| `quorum.maxSyncReplicas` | Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the total number of replicas) | `int` | `0` |
| `users` | Users configuration | `map[string]object` | `{...}` |
| `users[name].password` | Password for the user | `*string` | `null` |
| Name | Description | Value |
| ------- | ------------------- | ----- |
| `users` | Users configuration | `{}` |
### Backup parameters
| Name | Description | Type | Value |
| ------------------------ | ---------------------------------------------------------- | -------- | ----------------------------------- |
| `backup` | Backup configuration | `object` | `{}` |
| `backup.enabled` | Enable regular backups, default is `false`. | `bool` | `false` |
| `backup.schedule` | Cron schedule for automated backups | `string` | `0 2 * * * *` |
| `backup.retentionPolicy` | Retention policy | `string` | `30d` |
| `backup.endpointURL` | S3 Endpoint used to upload data to the cloud | `string` | `http://minio-gateway-service:9000` |
| `backup.destinationPath` | Path to store the backup (i.e. s3://bucket/path/to/folder) | `string` | `s3://bucket/path/to/folder/` |
| `backup.s3AccessKey` | Access key for S3, used for authentication | `string` | `<your-access-key>` |
| `backup.s3SecretKey` | Secret key for S3, used for authentication | `string` | `<your-secret-key>` |
| Name | Description | Value |
| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ |
| `backup.enabled` | Enable periodic backups | `false` |
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/postgres-backups` |
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
| `backup.cleanupStrategy` | The strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
| `resources` | Explicit CPU and memory configuration for each FerretDB replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
| `resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `nano` |
### Bootstrap (recovery) parameters
| Name | Description | Type | Value |
| ------------------------ | --------------------------------------------------------------------------------------------------------------------- | --------- | ------- |
| `bootstrap` | Bootstrap (recovery) configuration | `object` | `{}` |
| `bootstrap.enabled` | Restore database cluster from a backup | `*bool` | `false` |
| `bootstrap.recoveryTime` | Timestamp (PITR) up to which recovery will proceed, expressed in RFC 3339 format. If left empty, will restore latest. | `*string` | `""` |
| `bootstrap.oldName` | Name of database cluster before deleting | `*string` | `""` |
## Parameter examples and reference
@@ -77,6 +62,6 @@ This setting is ignored if the corresponding `resources` value is set.
| `micro` | `500m` | `256Mi` |
| `small` | `1` | `512Mi` |
| `medium` | `1` | `1Gi` |
| `large` | `2` | `2Gi` |
| `large` | `3` | `2Gi` |
| `xlarge` | `4` | `4Gi` |
| `2xlarge` | `8` | `8Gi` |

View File

@@ -0,0 +1 @@
ghcr.io/cozystack/cozystack/postgres-backup:0.14.0@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f

View File

@@ -0,0 +1,99 @@
{{- if .Values.backup.enabled }}
{{ $image := .Files.Get "images/backup.json" | fromJson }}
apiVersion: batch/v1
kind: CronJob
metadata:
name: {{ .Release.Name }}-backup
spec:
schedule: "{{ .Values.backup.schedule }}"
concurrencyPolicy: Forbid
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 3
jobTemplate:
spec:
backoffLimit: 2
template:
spec:
restartPolicy: OnFailure
template:
metadata:
annotations:
checksum/config: {{ include (print $.Template.BasePath "/backup-script.yaml") . | sha256sum }}
checksum/secret: {{ include (print $.Template.BasePath "/backup-secret.yaml") . | sha256sum }}
spec:
restartPolicy: Never
containers:
- name: pgdump
image: "{{ $.Files.Get "images/postgres-backup.tag" | trim }}"
command:
- /bin/sh
- /scripts/backup.sh
env:
- name: REPO_PREFIX
value: {{ required "s3Bucket is not specified!" .Values.backup.s3Bucket | quote }}
- name: CLEANUP_STRATEGY
value: {{ required "cleanupStrategy is not specified!" .Values.backup.cleanupStrategy | quote }}
- name: PGUSER
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-postgres-superuser
key: username
- name: PGPASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-postgres-superuser
key: password
- name: PGHOST
value: {{ .Release.Name }}-postgres-rw
- name: PGPORT
value: "5432"
- name: PGDATABASE
value: postgres
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-backup
key: s3AccessKey
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-backup
key: s3SecretKey
- name: AWS_DEFAULT_REGION
value: {{ .Values.backup.s3Region }}
- name: RESTIC_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-backup
key: resticPassword
volumeMounts:
- mountPath: /scripts
name: scripts
- mountPath: /tmp
name: tmp
- mountPath: /.cache
name: cache
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
volumes:
- name: scripts
secret:
secretName: {{ .Release.Name }}-backup-script
- name: tmp
emptyDir: {}
- name: cache
emptyDir: {}
securityContext:
runAsNonRoot: true
runAsUser: 9000
runAsGroup: 9000
seccompProfile:
type: RuntimeDefault
{{- end }}

View File

@@ -0,0 +1,50 @@
{{- if .Values.backup.enabled }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-backup-script
stringData:
backup.sh: |
#!/bin/sh
set -e
set -o pipefail
JOB_ID="job-$(uuidgen|cut -f1 -d-)"
DB_LIST=$(psql -Atq -c 'SELECT datname FROM pg_catalog.pg_database;' | grep -v '^\(postgres\|app\|template.*\)$')
echo DB_LIST=$(echo "$DB_LIST" | shuf) # shuffle list
echo "Job ID: $JOB_ID"
echo "Target repo: $REPO_PREFIX"
echo "Cleanup strategy: $CLEANUP_STRATEGY"
echo "Start backup for:"
echo "$DB_LIST"
echo
echo "Backup started at `date +%Y-%m-%d\ %H:%M:%S`"
for db in $DB_LIST; do
(
set -x
restic -r "s3:${REPO_PREFIX}/$db" cat config >/dev/null 2>&1 || \
restic -r "s3:${REPO_PREFIX}/$db" init --repository-version 2
restic -r "s3:${REPO_PREFIX}/$db" unlock --remove-all >/dev/null 2>&1 || true # no locks, k8s takes care of it
pg_dump -Z0 -Ft -d "$db" | \
restic -r "s3:${REPO_PREFIX}/$db" backup --tag "$JOB_ID" --stdin --stdin-filename dump.tar
restic -r "s3:${REPO_PREFIX}/$db" tag --tag "$JOB_ID" --set "completed"
)
done
echo "Backup finished at `date +%Y-%m-%d\ %H:%M:%S`"
echo
echo "Run cleanup:"
echo
echo "Cleanup started at `date +%Y-%m-%d\ %H:%M:%S`"
for db in $DB_LIST; do
(
set -x
restic forget -r "s3:${REPO_PREFIX}/$db" --group-by=tags --keep-tag "completed" # keep completed snapshots only
restic forget -r "s3:${REPO_PREFIX}/$db" --group-by=tags $CLEANUP_STRATEGY
restic prune -r "s3:${REPO_PREFIX}/$db"
)
done
echo "Cleanup finished at `date +%Y-%m-%d\ %H:%M:%S`"
{{- end }}

View File

@@ -0,0 +1,11 @@
{{- if .Values.backup.enabled }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-backup
stringData:
s3AccessKey: {{ required "s3AccessKey is not specified!" .Values.backup.s3AccessKey }}
s3SecretKey: {{ required "s3SecretKey is not specified!" .Values.backup.s3SecretKey }}
resticPassword: {{ required "resticPassword is not specified!" .Values.backup.resticPassword }}
{{- end }}

View File

@@ -1,12 +0,0 @@
{{- if .Values.backup.enabled }}
---
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: {{ .Release.Name }}-postgres
spec:
schedule: {{ .Values.backup.schedule | quote }}
backupOwnerReference: self
cluster:
name: {{ .Release.Name }}-postgres
{{- end }}

View File

@@ -8,9 +8,7 @@ spec:
type: {{ ternary "LoadBalancer" "ClusterIP" .Values.external }}
{{- if .Values.external }}
externalTrafficPolicy: Local
{{- if (include "cozy-lib.network.disableLoadBalancerNodePorts" $ | fromYaml) }}
allocateLoadBalancerNodePorts: false
{{- end }}
{{- end }}
ports:
- name: ferretdb

View File

@@ -16,14 +16,12 @@ spec:
spec:
containers:
- name: ferretdb
image: ghcr.io/ferretdb/ferretdb:2.4.0
image: ghcr.io/ferretdb/ferretdb:1.24.0
ports:
- containerPort: 27017
env:
- name: POSTGRESQL_PASSWORD
- name: FERRETDB_POSTGRESQL_URL
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-postgres-superuser
key: password
- name: FERRETDB_POSTGRESQL_URL
value: "postgresql://postgres:$(POSTGRESQL_PASSWORD)@{{ .Release.Name }}-postgres-rw:5432/postgres"
name: {{ .Release.Name }}-postgres-app
key: uri

View File

@@ -0,0 +1,66 @@
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Release.Name }}-init-job
annotations:
"helm.sh/hook": post-install,post-upgrade
"helm.sh/hook-weight": "-5"
"helm.sh/hook-delete-policy": before-hook-creation
spec:
template:
metadata:
name: {{ .Release.Name }}-init-job
annotations:
checksum/config: {{ include (print $.Template.BasePath "/init-script.yaml") . | sha256sum }}
spec:
restartPolicy: Never
containers:
- name: postgres
image: ghcr.io/cloudnative-pg/postgresql:15.3
command:
- bash
- /scripts/init.sh
env:
- name: PGUSER
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-postgres-superuser
key: username
- name: PGPASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-postgres-superuser
key: password
- name: PGHOST
value: {{ .Release.Name }}-postgres-rw
- name: PGPORT
value: "5432"
- name: PGDATABASE
value: postgres
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
volumeMounts:
- mountPath: /etc/secret
name: secret
- mountPath: /scripts
name: scripts
securityContext:
fsGroup: 26
runAsGroup: 26
runAsNonRoot: true
runAsUser: 26
seccompProfile:
type: RuntimeDefault
volumes:
- name: secret
secret:
secretName: {{ .Release.Name }}-postgres-superuser
- name: scripts
secret:
secretName: {{ .Release.Name }}-init-script

View File

@@ -0,0 +1,131 @@
{{- $existingSecret := lookup "v1" "Secret" .Release.Namespace (printf "%s-credentials" .Release.Name) }}
{{- $passwords := dict }}
{{- with (index $existingSecret "data") }}
{{- range $k, $v := . }}
{{- $_ := set $passwords $k (b64dec $v) }}
{{- end }}
{{- end }}
{{- range $user, $u := .Values.users }}
{{- if $u.password }}
{{- $_ := set $passwords $user $u.password }}
{{- else if not (index $passwords $user) }}
{{- $_ := set $passwords $user (randAlphaNum 16) }}
{{- end }}
{{- end }}
{{- if .Values.users }}
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-credentials
stringData:
{{- range $user, $u := .Values.users }}
{{ quote $user }}: {{ quote (index $passwords $user) }}
{{- end }}
{{- end }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-init-script
stringData:
init.sh: |
#!/bin/bash
set -e
until pg_isready ; do sleep 5; done
echo "== create users"
{{- if .Values.users }}
psql -v ON_ERROR_STOP=1 <<\EOT
{{- range $user, $u := .Values.users }}
SELECT 'CREATE ROLE {{ $user }} LOGIN INHERIT;'
WHERE NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = '{{ $user }}')\gexec
ALTER ROLE {{ $user }} WITH PASSWORD '{{ index $passwords $user }}' LOGIN INHERIT {{ ternary "REPLICATION" "NOREPLICATION" (default false $u.replication) }};
COMMENT ON ROLE {{ $user }} IS 'user managed by helm';
{{- end }}
EOT
{{- end }}
echo "== delete users"
MANAGED_USERS=$(echo '\du+' | psql | awk -F'|' '$4 == " user managed by helm" {print $1}' | awk NF=NF RS= OFS=' ')
DEFINED_USERS="{{ join " " (keys .Values.users) }}"
DELETE_USERS=$(for user in $MANAGED_USERS; do case " $DEFINED_USERS " in *" $user "*) :;; *) echo $user;; esac; done)
echo "users to delete: $DELETE_USERS"
for user in $DELETE_USERS; do
# https://stackoverflow.com/a/51257346/2931267
psql -v ON_ERROR_STOP=1 --echo-all <<EOT
REASSIGN OWNED BY $user TO postgres;
DROP OWNED BY $user;
DROP USER $user;
EOT
done
echo "== create roles"
psql -v ON_ERROR_STOP=1 --echo-all <<\EOT
SELECT 'CREATE ROLE app_admin NOINHERIT;'
WHERE NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'app_admin')\gexec
COMMENT ON ROLE app_admin IS 'role managed by helm';
EOT
echo "== grant privileges on databases to roles"
psql -v ON_ERROR_STOP=1 --echo-all -d "app" <<\EOT
ALTER DATABASE app OWNER TO app_admin;
DO $$
DECLARE
schema_record record;
BEGIN
-- Loop over all schemas
FOR schema_record IN SELECT schema_name FROM information_schema.schemata WHERE schema_name NOT IN ('pg_catalog', 'information_schema') LOOP
-- Changing Schema Ownership
EXECUTE format('ALTER SCHEMA %I OWNER TO %I', schema_record.schema_name, 'app_admin');
-- Add rights for the admin role
EXECUTE format('GRANT ALL ON SCHEMA %I TO %I', schema_record.schema_name, 'app_admin');
EXECUTE format('GRANT ALL ON ALL TABLES IN SCHEMA %I TO %I', schema_record.schema_name, 'app_admin');
EXECUTE format('GRANT ALL ON ALL SEQUENCES IN SCHEMA %I TO %I', schema_record.schema_name, 'app_admin');
EXECUTE format('GRANT ALL ON ALL FUNCTIONS IN SCHEMA %I TO %I', schema_record.schema_name, 'app_admin');
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON TABLES TO %I', schema_record.schema_name, 'app_admin');
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON SEQUENCES TO %I', schema_record.schema_name, 'app_admin');
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON FUNCTIONS TO %I', schema_record.schema_name, 'app_admin');
END LOOP;
END$$;
EOT
echo "== setup event trigger for schema creation"
psql -v ON_ERROR_STOP=1 --echo-all -d "app" <<\EOT
CREATE OR REPLACE FUNCTION auto_grant_schema_privileges()
RETURNS event_trigger LANGUAGE plpgsql AS $$
DECLARE
obj record;
BEGIN
FOR obj IN SELECT * FROM pg_event_trigger_ddl_commands() WHERE command_tag = 'CREATE SCHEMA' LOOP
-- Set owner for schema
EXECUTE format('ALTER SCHEMA %I OWNER TO %I', obj.object_identity, 'app_admin');
-- Set privileges for admin role
EXECUTE format('GRANT ALL ON SCHEMA %I TO %I', obj.object_identity, 'app_admin');
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON TABLES TO %I', obj.object_identity, 'app_admin');
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON SEQUENCES TO %I', obj.object_identity, 'app_admin');
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON FUNCTIONS TO %I', obj.object_identity, 'app_admin');
END LOOP;
END;
$$;
DROP EVENT TRIGGER IF EXISTS trigger_auto_grant;
CREATE EVENT TRIGGER trigger_auto_grant ON ddl_command_end
WHEN TAG IN ('CREATE SCHEMA')
EXECUTE PROCEDURE auto_grant_schema_privileges();
EOT
echo "== assign roles to users"
psql -v ON_ERROR_STOP=1 --echo-all <<\EOT
GRANT app_admin TO app;
{{- range $user, $u := $.Values.users }}
GRANT app_admin TO {{ $user }};
{{- end }}
EOT

View File

@@ -5,50 +5,6 @@ metadata:
name: {{ .Release.Name }}-postgres
spec:
instances: {{ .Values.replicas }}
{{- if .Values.backup.enabled }}
backup:
barmanObjectStore:
destinationPath: {{ .Values.backup.destinationPath }}
endpointURL: {{ .Values.backup.endpointURL }}
s3Credentials:
accessKeyId:
name: {{ .Release.Name }}-s3-creds
key: AWS_ACCESS_KEY_ID
secretAccessKey:
name: {{ .Release.Name }}-s3-creds
key: AWS_SECRET_ACCESS_KEY
retentionPolicy: {{ .Values.backup.retentionPolicy }}
{{- end }}
bootstrap:
initdb:
postInitSQL:
- 'CREATE EXTENSION IF NOT EXISTS documentdb CASCADE;'
{{- if .Values.bootstrap.enabled }}
recovery:
source: {{ .Values.bootstrap.oldName }}
{{- if .Values.bootstrap.recoveryTime }}
recoveryTarget:
targetTime: {{ .Values.bootstrap.recoveryTime }}
{{- end }}
{{- end }}
{{- if .Values.bootstrap.enabled }}
externalClusters:
- name: {{ .Values.bootstrap.oldName }}
barmanObjectStore:
destinationPath: {{ .Values.backup.destinationPath }}
endpointURL: {{ .Values.backup.endpointURL }}
s3Credentials:
accessKeyId:
name: {{ .Release.Name }}-s3-creds
key: AWS_ACCESS_KEY_ID
secretAccessKey:
name: {{ .Release.Name }}-s3-creds
key: AWS_SECRET_ACCESS_KEY
{{- end }}
imageName: ghcr.io/ferretdb/postgres-documentdb:17-0.105.0-ferretdb-2.4.0
postgresUID: 999
postgresGID: 999
enableSuperuserAccess: true
{{- $configMap := lookup "v1" "ConfigMap" "cozy-system" "cozystack-scheduling" }}
{{- if $configMap }}
@@ -66,17 +22,6 @@ spec:
monitoring:
enablePodMonitor: true
postgresql:
shared_preload_libraries:
- pg_cron
- pg_documentdb_core
- pg_documentdb
parameters:
cron.database_name: 'postgres'
pg_hba:
- host postgres postgres 127.0.0.1/32 trust
- host postgres postgres ::1/128 trust
storage:
size: {{ required ".Values.size is required" .Values.size }}
{{- with .Values.storageClass }}
@@ -97,6 +42,8 @@ spec:
passwordSecret:
name: {{ printf "%s-user-%s" $.Release.Name $user }}
login: true
inRoles:
- app
{{- end }}
{{- end }}

View File

@@ -1,187 +1,106 @@
{
"title": "Chart Values",
"type": "object",
"properties": {
"backup": {
"description": "Backup configuration",
"type": "object",
"default": {},
"required": [
"destinationPath",
"enabled",
"endpointURL",
"retentionPolicy",
"s3AccessKey",
"s3SecretKey",
"schedule"
],
"properties": {
"destinationPath": {
"description": "Path to store the backup (i.e. s3://bucket/path/to/folder)",
"type": "string",
"default": "s3://bucket/path/to/folder/"
"title": "Chart Values",
"type": "object",
"properties": {
"external": {
"type": "boolean",
"description": "Enable external access from outside the cluster",
"default": false
},
"enabled": {
"description": "Enable regular backups, default is `false`.",
"type": "boolean",
"default": false
"size": {
"type": "string",
"description": "Persistent Volume size",
"default": "10Gi"
},
"endpointURL": {
"description": "S3 Endpoint used to upload data to the cloud",
"type": "string",
"default": "http://minio-gateway-service:9000"
"replicas": {
"type": "number",
"description": "Number of replicas",
"default": 2
},
"retentionPolicy": {
"description": "Retention policy",
"type": "string",
"default": "30d"
"storageClass": {
"type": "string",
"description": "StorageClass used to store the data",
"default": ""
},
"s3AccessKey": {
"description": "Access key for S3, used for authentication",
"type": "string",
"default": "\u003cyour-access-key\u003e"
},
"s3SecretKey": {
"description": "Secret key for S3, used for authentication",
"type": "string",
"default": "\u003cyour-secret-key\u003e"
},
"schedule": {
"description": "Cron schedule for automated backups",
"type": "string",
"default": "0 2 * * * *"
}
}
},
"bootstrap": {
"description": "Bootstrap (recovery) configuration",
"type": "object",
"default": {},
"properties": {
"enabled": {
"description": "Restore database cluster from a backup",
"type": "boolean",
"default": false
},
"oldName": {
"description": "Name of database cluster before deleting",
"type": "string"
},
"recoveryTime": {
"description": "Timestamp (PITR) up to which recovery will proceed, expressed in RFC 3339 format. If left empty, will restore latest.",
"type": "string"
}
}
},
"external": {
"description": "Enable external access from outside the cluster",
"type": "boolean",
"default": false
},
"quorum": {
"description": "Configuration for the quorum-based synchronous replication",
"type": "object",
"default": {},
"required": [
"maxSyncReplicas",
"minSyncReplicas"
],
"properties": {
"maxSyncReplicas": {
"description": "Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the total number of replicas)",
"type": "integer",
"default": 0
},
"minSyncReplicas": {
"description": "Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed",
"type": "integer",
"default": 0
}
}
},
"replicas": {
"description": "Number of replicas",
"type": "integer",
"default": 2
},
"resources": {
"description": "Explicit CPU and memory configuration for each FerretDB replica. When left empty, the preset defined in `resourcesPreset` is applied.",
"type": "object",
"default": {},
"properties": {
"cpu": {
"description": "CPU available to each replica",
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
"anyOf": [
{
"type": "integer"
},
{
"type": "string"
"quorum": {
"type": "object",
"properties": {
"minSyncReplicas": {
"type": "number",
"description": "Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed",
"default": 0
},
"maxSyncReplicas": {
"type": "number",
"description": "Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the total number of replicas)",
"default": 0
}
}
],
"x-kubernetes-int-or-string": true
},
"memory": {
"description": "Memory (RAM) available to each replica",
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
"anyOf": [
{
"type": "integer"
},
{
"type": "string"
"backup": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean",
"description": "Enable periodic backups",
"default": false
},
"s3Region": {
"type": "string",
"description": "The AWS S3 region where backups are stored",
"default": "us-east-1"
},
"s3Bucket": {
"type": "string",
"description": "The S3 bucket used for storing backups",
"default": "s3.example.org/postgres-backups"
},
"schedule": {
"type": "string",
"description": "Cron schedule for automated backups",
"default": "0 2 * * *"
},
"cleanupStrategy": {
"type": "string",
"description": "The strategy for cleaning up old backups",
"default": "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
},
"s3AccessKey": {
"type": "string",
"description": "The access key for S3, used for authentication",
"default": "oobaiRus9pah8PhohL1ThaeTa4UVa7gu"
},
"s3SecretKey": {
"type": "string",
"description": "The secret key for S3, used for authentication",
"default": "ju3eum4dekeich9ahM1te8waeGai0oog"
},
"resticPassword": {
"type": "string",
"description": "The password for Restic backup encryption",
"default": "ChaXoveekoh6eigh4siesheeda2quai0"
}
}
],
"x-kubernetes-int-or-string": true
}
}
},
"resourcesPreset": {
"description": "Default sizing preset used when `resources` is omitted. Allowed values: `nano`, `micro`, `small`, `medium`, `large`, `xlarge`, `2xlarge`.",
"type": "string",
"default": "micro",
"enum": [
"nano",
"micro",
"small",
"medium",
"large",
"xlarge",
"2xlarge"
]
},
"size": {
"description": "Persistent Volume Claim size, available for application data",
"default": "10Gi",
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
"anyOf": [
{
"type": "integer"
},
{
"type": "string"
"resources": {
"type": "object",
"description": "Explicit CPU and memory configuration for each FerretDB replica. When left empty, the preset defined in `resourcesPreset` is applied.",
"default": {}
},
"resourcesPreset": {
"type": "string",
"description": "Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.",
"default": "nano",
"enum": [
"none",
"nano",
"micro",
"small",
"medium",
"large",
"xlarge",
"2xlarge"
]
}
],
"x-kubernetes-int-or-string": true
},
"storageClass": {
"description": "StorageClass used to store the data",
"type": "string"
},
"users": {
"description": "Users configuration",
"type": "object",
"default": {},
"additionalProperties": {
"type": "object",
"properties": {
"password": {
"description": "Password for the user",
"type": "string"
}
}
}
}
}
}
}

View File

@@ -1,35 +1,25 @@
## @section Common parameters
##
## @param replicas {int} Number of replicas
replicas: 2
## @param resources {*resources} Explicit CPU and memory configuration for each FerretDB replica. When left empty, the preset defined in `resourcesPreset` is applied.
## @field resources.cpu {*quantity} CPU available to each replica
## @field resources.memory {*quantity} Memory (RAM) available to each replica
# resources:
# cpu: 4000m
# memory: 4Gi
resources: {}
## @param resourcesPreset {string enum:"nano,micro,small,medium,large,xlarge,2xlarge"} Default sizing preset used when `resources` is omitted. Allowed values: `nano`, `micro`, `small`, `medium`, `large`, `xlarge`, `2xlarge`.
resourcesPreset: "micro"
## @param size {quantity} Persistent Volume Claim size, available for application data
size: 10Gi
## @param storageClass {string} StorageClass used to store the data
storageClass: ""
## @param external {bool} Enable external access from outside the cluster
## @param external Enable external access from outside the cluster
## @param size Persistent Volume size
## @param replicas Number of replicas
## @param storageClass StorageClass used to store the data
##
external: false
size: 10Gi
replicas: 2
storageClass: ""
## @section Application-specific parameters
##
## @param quorum {quorum} Configuration for the quorum-based synchronous replication
## @field quorum.minSyncReplicas {int} Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed
## @field quorum.maxSyncReplicas {int} Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the total number of replicas)
## Configuration for the quorum-based synchronous replication
## @param quorum.minSyncReplicas Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed
## @param quorum.maxSyncReplicas Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the total number of replicas)
quorum:
minSyncReplicas: 0
maxSyncReplicas: 0
## @param users {map[string]user} Users configuration
## @field user.password {*string} Password for the user
## @section Configuration parameters
## @param users [object] Users configuration
## Example:
## users:
## user1:
@@ -39,40 +29,31 @@ quorum:
##
users: {}
## @section Backup parameters
##
## @param backup {backup} Backup configuration
## @field backup.enabled {bool} Enable regular backups, default is `false`.
## @field backup.schedule {string} Cron schedule for automated backups
## @field backup.retentionPolicy {string} Retention policy
## @field backup.endpointURL {string} S3 Endpoint used to upload data to the cloud
## @field backup.destinationPath {string} Path to store the backup (i.e. s3://bucket/path/to/folder)
## @field backup.s3AccessKey {string} Access key for S3, used for authentication
## @field backup.s3SecretKey {string} Secret key for S3, used for authentication
## @param backup.enabled Enable periodic backups
## @param backup.s3Region The AWS S3 region where backups are stored
## @param backup.s3Bucket The S3 bucket used for storing backups
## @param backup.schedule Cron schedule for automated backups
## @param backup.cleanupStrategy The strategy for cleaning up old backups
## @param backup.s3AccessKey The access key for S3, used for authentication
## @param backup.s3SecretKey The secret key for S3, used for authentication
## @param backup.resticPassword The password for Restic backup encryption
backup:
enabled: false
schedule: "0 2 * * * *"
retentionPolicy: 30d
endpointURL: http://minio-gateway-service:9000
destinationPath: s3://bucket/path/to/folder/
s3AccessKey: "<your-access-key>"
s3SecretKey: "<your-secret-key>"
## @section Bootstrap (recovery) parameters
##
## @param bootstrap {bootstrap} Bootstrap (recovery) configuration
## @field bootstrap.enabled {*bool} Restore database cluster from a backup
## @field bootstrap.recoveryTime {*string} Timestamp (PITR) up to which recovery will proceed, expressed in RFC 3339 format. If left empty, will restore latest.
## @field bootstrap.oldName {*string} Name of database cluster before deleting
##
bootstrap:
enabled: false
# example: 2020-11-26 15:22:00.00000+00
recoveryTime: ""
oldName: ""
s3Region: us-east-1
s3Bucket: s3.example.org/postgres-backups
schedule: "0 2 * * *"
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
## @param resources Explicit CPU and memory configuration for each FerretDB replica. When left empty, the preset defined in `resourcesPreset` is applied.
resources: {}
# resources:
# cpu: 4000m
# memory: 4Gi
## @param resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.
resourcesPreset: "nano"

View File

@@ -16,7 +16,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.7.0
version: 0.6.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to

View File

@@ -7,17 +7,24 @@ image: image-nginx
image-nginx:
docker buildx build images/nginx-cache \
--provenance false \
--builder=$(BUILDER) \
--platform=$(PLATFORM) \
--tag $(REGISTRY)/nginx-cache:$(call settag,$(NGINX_CACHE_TAG)) \
--cache-from type=registry,ref=$(REGISTRY)/nginx-cache:latest \
--cache-to type=inline \
--metadata-file images/nginx-cache.json \
$(BUILDX_ARGS)
--push=$(PUSH) \
--label "org.opencontainers.image.source=https://github.com/cozystack/cozystack" \
--load=$(LOAD)
echo "$(REGISTRY)/nginx-cache:$(call settag,$(NGINX_CACHE_TAG))@$$(yq e '."containerimage.digest"' images/nginx-cache.json -o json -r)" \
> images/nginx-cache.tag
rm -f images/nginx-cache.json
generate:
cozyvalues-gen -v values.yaml -s values.schema.json -r README.md
readme-generator -v values.yaml -s values.schema.json -r README.md
yq -i -o json --indent 4 '.properties.haproxy.properties.resourcesPreset.enum = ["none", "nano", "micro", "small", "medium", "large", "xlarge", "2xlarge"]' values.schema.json
yq -i -o json --indent 4 '.properties.nginx.properties.resourcesPreset.enum = ["none", "nano", "micro", "small", "medium", "large", "xlarge", "2xlarge"]' values.schema.json
update:
tag=$$(git ls-remote --tags --sort="v:refname" https://github.com/chrislim2888/IP2Location-C-Library | awk -F'[/^]' 'END{print $$3}') && \

View File

@@ -60,43 +60,23 @@ The deployment architecture is illustrated in the diagram below:
### Common parameters
| Name | Description | Type | Value |
| -------------- | ------------------------------------------------------------ | ---------- | ------- |
| `size` | Persistent Volume Claim size, available for application data | `quantity` | `10Gi` |
| `storageClass` | StorageClass used to store the data | `string` | `""` |
| `external` | Enable external access from outside the cluster | `bool` | `false` |
| Name | Description | Value |
| ------------------------- | ------------------------------------------------------------------------------------------------------------------------------------ | ------- |
| `external` | Enable external access from outside the cluster | `false` |
| `size` | Persistent Volume size | `10Gi` |
| `storageClass` | StorageClass used to store the data | `""` |
| `haproxy.replicas` | Number of HAProxy replicas | `2` |
| `nginx.replicas` | Number of Nginx replicas | `2` |
| `haproxy.resources` | Explicit CPU and memory configuration for each HAProxy replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
| `haproxy.resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `nano` |
| `nginx.resources` | Explicit CPU and memory configuration for each nginx replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
| `nginx.resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `nano` |
### Configuration parameters
### Application-specific parameters
| Name | Description | Type | Value |
| ----------- | ----------------------------------------------- | ---------- | ----- |
| `endpoints` | Endpoints configuration, as a list of <ip:port> | `[]string` | `[]` |
### HAProxy parameters
| Name | Description | Type | Value |
| -------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | ----------- | ------ |
| `haproxy` | HAProxy configuration | `object` | `{}` |
| `haproxy.replicas` | Number of HAProxy replicas | `int` | `2` |
| `haproxy.resources` | Explicit CPU and memory configuration for each replica. When left empty, the preset defined in `resourcesPreset` is applied. | `object` | `{}` |
| `haproxy.resources.cpu` | CPU available to each replica | `*quantity` | `null` |
| `haproxy.resources.memory` | Memory (RAM) available to each replica | `*quantity` | `null` |
| `haproxy.resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: `nano`, `micro`, `small`, `medium`, `large`, `xlarge`, `2xlarge`. | `string` | `nano` |
### Nginx parameters
| Name | Description | Type | Value |
| ------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------- | ----------- | ------ |
| `nginx` | Nginx configuration | `object` | `{}` |
| `nginx.replicas` | Number of Nginx replicas | `int` | `2` |
| `nginx.resources` | Explicit CPU and memory configuration for each replica. When left empty, the preset defined in `resourcesPreset` is applied. | `*object` | `null` |
| `nginx.resources.cpu` | CPU available to each replica | `*quantity` | `null` |
| `nginx.resources.memory` | Memory (RAM) available to each replica | `*quantity` | `null` |
| `nginx.resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: `nano`, `micro`, `small`, `medium`, `large`, `xlarge`, `2xlarge`. | `string` | `nano` |
| Name | Description | Value |
| ----------- | ----------------------- | ----- |
| `endpoints` | Endpoints configuration | `[]` |
## Parameter examples and reference
@@ -120,7 +100,7 @@ This setting is ignored if the corresponding `resources` value is set.
| `micro` | `500m` | `256Mi` |
| `small` | `1` | `512Mi` |
| `medium` | `1` | `1Gi` |
| `large` | `2` | `2Gi` |
| `large` | `3` | `2Gi` |
| `xlarge` | `4` | `4Gi` |
| `2xlarge` | `8` | `8Gi` |

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/nginx-cache:0.7.0@sha256:e0a07082bb6fc6aeaae2315f335386f1705a646c72f9e0af512aebbca5cb2b15
ghcr.io/cozystack/cozystack/nginx-cache:0.6.0@sha256:b7633717cd7449c0042ae92d8ca9b36e4d69566561f5c7d44e21058e7d05c6d5

View File

@@ -10,9 +10,7 @@ spec:
type: {{ ternary "LoadBalancer" "ClusterIP" .Values.external }}
{{- if .Values.external }}
externalTrafficPolicy: Local
{{- if (include "cozy-lib.network.disableLoadBalancerNodePorts" $ | fromYaml) }}
allocateLoadBalancerNodePorts: false
{{- end }}
{{- end }}
selector:
app: {{ .Release.Name }}-haproxy

View File

@@ -1,164 +1,87 @@
{
"title": "Chart Values",
"type": "object",
"properties": {
"endpoints": {
"description": "Endpoints configuration, as a list of \u003cip:port\u003e",
"type": "array",
"default": [],
"items": {
"type": "string"
}
},
"external": {
"description": "Enable external access from outside the cluster",
"type": "boolean",
"default": false
},
"haproxy": {
"description": "HAProxy configuration",
"type": "object",
"default": {},
"required": [
"replicas",
"resources",
"resourcesPreset"
],
"properties": {
"replicas": {
"description": "Number of HAProxy replicas",
"type": "integer",
"default": 2
"title": "Chart Values",
"type": "object",
"properties": {
"external": {
"type": "boolean",
"description": "Enable external access from outside the cluster",
"default": false
},
"resources": {
"description": "Explicit CPU and memory configuration for each replica. When left empty, the preset defined in `resourcesPreset` is applied.",
"type": "object",
"default": {},
"properties": {
"cpu": {
"description": "CPU available to each replica",
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
"anyOf": [
{
"type": "integer"
"size": {
"type": "string",
"description": "Persistent Volume size",
"default": "10Gi"
},
"storageClass": {
"type": "string",
"description": "StorageClass used to store the data",
"default": ""
},
"haproxy": {
"type": "object",
"properties": {
"replicas": {
"type": "number",
"description": "Number of HAProxy replicas",
"default": 2
},
{
"type": "string"
}
],
"x-kubernetes-int-or-string": true
},
"memory": {
"description": "Memory (RAM) available to each replica",
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
"anyOf": [
{
"type": "integer"
"resources": {
"type": "object",
"description": "Explicit CPU and memory configuration for each HAProxy replica. When left empty, the preset defined in `resourcesPreset` is applied.",
"default": {}
},
{
"type": "string"
"resourcesPreset": {
"type": "string",
"description": "Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.",
"default": "nano",
"enum": [
"none",
"nano",
"micro",
"small",
"medium",
"large",
"xlarge",
"2xlarge"
]
}
],
"x-kubernetes-int-or-string": true
}
}
},
"resourcesPreset": {
"description": "Default sizing preset used when `resources` is omitted. Allowed values: `nano`, `micro`, `small`, `medium`, `large`, `xlarge`, `2xlarge`.",
"type": "string",
"default": "nano",
"enum": [
"nano",
"micro",
"small",
"medium",
"large",
"xlarge",
"2xlarge"
]
}
}
},
"nginx": {
"description": "Nginx configuration",
"type": "object",
"default": {},
"required": [
"replicas",
"resourcesPreset"
],
"properties": {
"replicas": {
"description": "Number of Nginx replicas",
"type": "integer",
"default": 2
},
"resources": {
"description": "Explicit CPU and memory configuration for each replica. When left empty, the preset defined in `resourcesPreset` is applied.",
"type": "object",
"default": {},
"properties": {
"cpu": {
"description": "CPU available to each replica",
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
"anyOf": [
{
"type": "integer"
"nginx": {
"type": "object",
"properties": {
"replicas": {
"type": "number",
"description": "Number of Nginx replicas",
"default": 2
},
{
"type": "string"
}
],
"x-kubernetes-int-or-string": true
},
"memory": {
"description": "Memory (RAM) available to each replica",
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
"anyOf": [
{
"type": "integer"
"resources": {
"type": "object",
"description": "Explicit CPU and memory configuration for each nginx replica. When left empty, the preset defined in `resourcesPreset` is applied.",
"default": {}
},
{
"type": "string"
"resourcesPreset": {
"type": "string",
"description": "Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.",
"default": "nano",
"enum": [
"none",
"nano",
"micro",
"small",
"medium",
"large",
"xlarge",
"2xlarge"
]
}
],
"x-kubernetes-int-or-string": true
}
}
},
"resourcesPreset": {
"description": "Default sizing preset used when `resources` is omitted. Allowed values: `nano`, `micro`, `small`, `medium`, `large`, `xlarge`, `2xlarge`.",
"type": "string",
"default": "nano",
"enum": [
"nano",
"micro",
"small",
"medium",
"large",
"xlarge",
"2xlarge"
]
"endpoints": {
"type": "array",
"description": "Endpoints configuration",
"default": [],
"items": {}
}
}
},
"size": {
"description": "Persistent Volume Claim size, available for application data",
"default": "10Gi",
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
"anyOf": [
{
"type": "integer"
},
{
"type": "string"
}
],
"x-kubernetes-int-or-string": true
},
"storageClass": {
"description": "StorageClass used to store the data",
"type": "string"
}
}
}
}

View File

@@ -1,14 +1,39 @@
## @section Common parameters
##
## @param size {quantity} Persistent Volume Claim size, available for application data
size: 10Gi
## @param storageClass {string} StorageClass used to store the data
storageClass: ""
## @param external {bool} Enable external access from outside the cluster
external: false
## @section Application-specific parameters
## @param endpoints {[]string} Endpoints configuration, as a list of <ip:port>
## @section Common parameters
## @param external Enable external access from outside the cluster
## @param size Persistent Volume size
## @param storageClass StorageClass used to store the data
## @param haproxy.replicas Number of HAProxy replicas
## @param nginx.replicas Number of Nginx replicas
##
external: false
size: 10Gi
storageClass: ""
haproxy:
replicas: 2
## @param haproxy.resources Explicit CPU and memory configuration for each HAProxy replica. When left empty, the preset defined in `resourcesPreset` is applied.
resources: {}
# resources:
# cpu: 4000m
# memory: 4Gi
## @param haproxy.resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.
resourcesPreset: "nano"
nginx:
replicas: 2
## @param nginx.resources Explicit CPU and memory configuration for each nginx replica. When left empty, the preset defined in `resourcesPreset` is applied.
resources: {}
# resources:
# cpu: 4000m
# memory: 4Gi
## @param nginx.resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.
resourcesPreset: "nano"
## @section Configuration parameters
## @param endpoints Endpoints configuration
## Example:
## endpoints:
## - 10.100.3.1:80
@@ -19,35 +44,3 @@ external: false
## - 10.100.3.13:80
##
endpoints: []
## @section HAProxy parameters
##
## @param haproxy {haproxy} HAProxy configuration
haproxy:
## @field haproxy.replicas {int} Number of HAProxy replicas
replicas: 2
## @field haproxy.resources {resources} Explicit CPU and memory configuration for each replica. When left empty, the preset defined in `resourcesPreset` is applied.
## @field resources.cpu {*quantity} CPU available to each replica
## @field resources.memory {*quantity} Memory (RAM) available to each replica
resources: {}
# resources:
# cpu: 4000m
# memory: 4Gi
## @field haproxy.resourcesPreset {string enum:"nano,micro,small,medium,large,xlarge,2xlarge"} Default sizing preset used when `resources` is omitted. Allowed values: `nano`, `micro`, `small`, `medium`, `large`, `xlarge`, `2xlarge`.
resourcesPreset: "nano"
## @section Nginx parameters
##
## @param nginx {nginx} Nginx configuration
nginx:
## @field nginx.replicas {int} Number of Nginx replicas
replicas: 2
## @field nginx.resources {*resources} Explicit CPU and memory configuration for each replica. When left empty, the preset defined in `resourcesPreset` is applied.
# resources:
# cpu: 4000m
# memory: 4Gi
resources: {}
## @field nginx.resourcesPreset {string enum:"nano,micro,small,medium,large,xlarge,2xlarge"} Default sizing preset used when `resources` is omitted. Allowed values: `nano`, `micro`, `small`, `medium`, `large`, `xlarge`, `2xlarge`.
resourcesPreset: "nano"

View File

@@ -16,7 +16,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.8.1
version: 0.8.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to

View File

@@ -1,5 +1,6 @@
include ../../../scripts/package.mk
PRESET_ENUM := ["nano","micro","small","medium","large","xlarge","2xlarge"]
generate:
cozyvalues-gen -v values.yaml -s values.schema.json -r README.md
readme-generator -v values.yaml -s values.schema.json -r README.md
yq -i -o json --indent 4 '.properties.kafka.properties.resourcesPreset.enum = ["none", "nano", "micro", "small", "medium", "large", "xlarge", "2xlarge"]' values.schema.json
yq -i -o json --indent 4 '.properties.zookeeper.properties.resourcesPreset.enum = ["none", "nano", "micro", "small", "medium", "large", "xlarge", "2xlarge"]' values.schema.json

View File

@@ -4,49 +4,25 @@
### Common parameters
| Name | Description | Type | Value |
| ---------- | ----------------------------------------------- | ------ | ------- |
| `external` | Enable external access from outside the cluster | `bool` | `false` |
| Name | Description | Value |
| --------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| `external` | Enable external access from outside the cluster | `false` |
| `kafka.size` | Persistent Volume size for Kafka | `10Gi` |
| `kafka.replicas` | Number of Kafka replicas | `3` |
| `kafka.storageClass` | StorageClass used to store the Kafka data | `""` |
| `zookeeper.size` | Persistent Volume size for ZooKeeper | `5Gi` |
| `zookeeper.replicas` | Number of ZooKeeper replicas | `3` |
| `zookeeper.storageClass` | StorageClass used to store the ZooKeeper data | `""` |
| `kafka.resources` | Explicit CPU and memory configuration for each Kafka replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
| `kafka.resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `small` |
| `zookeeper.resources` | Explicit CPU and memory configuration for each Zookeeper replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
| `zookeeper.resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `small` |
### Configuration parameters
### Application-specific parameters
| Name | Description | Type | Value |
| ---------------------- | -------------------- | ---------- | ----- |
| `topics` | Topics configuration | `[]object` | `[]` |
| `topics[i].name` | Topic name | `string` | `""` |
| `topics[i].partitions` | Number of partitions | `int` | `0` |
| `topics[i].replicas` | Number of replicas | `int` | `0` |
| `topics[i].config` | Topic configuration | `object` | `{}` |
### Kafka configuration
| Name | Description | Type | Value |
| ------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------- | ----------- | ------- |
| `kafka` | Kafka configuration | `object` | `{}` |
| `kafka.replicas` | Number of Kafka replicas | `int` | `3` |
| `kafka.resources` | Explicit CPU and memory configuration for each replica. When left empty, the preset defined in `resourcesPreset` is applied. | `*object` | `null` |
| `kafka.resources.cpu` | CPU available to each replica | `*quantity` | `null` |
| `kafka.resources.memory` | Memory (RAM) available to each replica | `*quantity` | `null` |
| `kafka.resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: `nano`, `micro`, `small`, `medium`, `large`, `xlarge`, `2xlarge`. | `string` | `small` |
| `kafka.size` | Persistent Volume size for Kafka | `quantity` | `10Gi` |
| `kafka.storageClass` | StorageClass used to store the Kafka data | `string` | `""` |
### Zookeeper configuration
| Name | Description | Type | Value |
| ---------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | ----------- | ------- |
| `zookeeper` | Zookeeper configuration | `object` | `{}` |
| `zookeeper.replicas` | Number of ZooKeeper replicas | `int` | `3` |
| `zookeeper.resources` | Explicit CPU and memory configuration for each replica. When left empty, the preset defined in `resourcesPreset` is applied. | `*object` | `null` |
| `zookeeper.resources.cpu` | CPU available to each replica | `*quantity` | `null` |
| `zookeeper.resources.memory` | Memory (RAM) available to each replica | `*quantity` | `null` |
| `zookeeper.resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: `nano`, `micro`, `small`, `medium`, `large`, `xlarge`, `2xlarge`. | `string` | `small` |
| `zookeeper.size` | Persistent Volume size for ZooKeeper | `quantity` | `5Gi` |
| `zookeeper.storageClass` | StorageClass used to store the ZooKeeper data | `string` | `""` |
| Name | Description | Value |
| -------- | -------------------- | ----- |
| `topics` | Topics configuration | `[]` |
## Parameter examples and reference
@@ -70,7 +46,7 @@ This setting is ignored if the corresponding `resources` value is set.
| `micro` | `500m` | `256Mi` |
| `small` | `1` | `512Mi` |
| `medium` | `1` | `1Gi` |
| `large` | `2` | `2Gi` |
| `large` | `3` | `2Gi` |
| `xlarge` | `4` | `4Gi` |
| `2xlarge` | `8` | `8Gi` |

View File

@@ -1,210 +1,97 @@
{
"title": "Chart Values",
"type": "object",
"properties": {
"external": {
"description": "Enable external access from outside the cluster",
"type": "boolean",
"default": false
},
"kafka": {
"description": "Kafka configuration",
"type": "object",
"default": {},
"required": [
"replicas",
"resourcesPreset",
"size",
"storageClass"
],
"properties": {
"replicas": {
"description": "Number of Kafka replicas",
"type": "integer",
"default": 3
"title": "Chart Values",
"type": "object",
"properties": {
"external": {
"type": "boolean",
"description": "Enable external access from outside the cluster",
"default": false
},
"resources": {
"description": "Explicit CPU and memory configuration for each replica. When left empty, the preset defined in `resourcesPreset` is applied.",
"type": "object",
"default": {},
"properties": {
"cpu": {
"description": "CPU available to each replica",
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
"anyOf": [
{
"type": "integer"
},
{
"type": "string"
}
],
"x-kubernetes-int-or-string": true
},
"memory": {
"description": "Memory (RAM) available to each replica",
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
"anyOf": [
{
"type": "integer"
},
{
"type": "string"
}
],
"x-kubernetes-int-or-string": true
}
}
},
"resourcesPreset": {
"description": "Default sizing preset used when `resources` is omitted. Allowed values: `nano`, `micro`, `small`, `medium`, `large`, `xlarge`, `2xlarge`.",
"type": "string",
"default": "small",
"enum": [
"nano",
"micro",
"small",
"medium",
"large",
"xlarge",
"2xlarge"
]
},
"size": {
"description": "Persistent Volume size for Kafka",
"default": "10Gi",
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
"anyOf": [
{
"type": "integer"
},
{
"type": "string"
}
],
"x-kubernetes-int-or-string": true
},
"storageClass": {
"description": "StorageClass used to store the Kafka data",
"type": "string"
}
}
},
"topics": {
"description": "Topics configuration",
"type": "array",
"default": [],
"items": {
"type": "object",
"required": [
"config",
"name",
"partitions",
"replicas"
],
"properties": {
"config": {
"description": "Topic configuration",
"kafka": {
"type": "object",
"x-kubernetes-preserve-unknown-fields": true
},
"name": {
"description": "Topic name",
"type": "string"
},
"partitions": {
"description": "Number of partitions",
"type": "integer"
},
"replicas": {
"description": "Number of replicas",
"type": "integer"
}
}
}
},
"zookeeper": {
"description": "Zookeeper configuration",
"type": "object",
"default": {},
"required": [
"replicas",
"resourcesPreset",
"size",
"storageClass"
],
"properties": {
"replicas": {
"description": "Number of ZooKeeper replicas",
"type": "integer",
"default": 3
},
"resources": {
"description": "Explicit CPU and memory configuration for each replica. When left empty, the preset defined in `resourcesPreset` is applied.",
"type": "object",
"default": {},
"properties": {
"cpu": {
"description": "CPU available to each replica",
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
"anyOf": [
{
"type": "integer"
"properties": {
"size": {
"type": "string",
"description": "Persistent Volume size for Kafka",
"default": "10Gi"
},
{
"type": "string"
}
],
"x-kubernetes-int-or-string": true
},
"memory": {
"description": "Memory (RAM) available to each replica",
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
"anyOf": [
{
"type": "integer"
"replicas": {
"type": "number",
"description": "Number of Kafka replicas",
"default": 3
},
{
"type": "string"
"storageClass": {
"type": "string",
"description": "StorageClass used to store the Kafka data",
"default": ""
},
"resources": {
"type": "object",
"description": "Explicit CPU and memory configuration for each Kafka replica. When left empty, the preset defined in `resourcesPreset` is applied.",
"default": {}
},
"resourcesPreset": {
"type": "string",
"description": "Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.",
"default": "small",
"enum": [
"none",
"nano",
"micro",
"small",
"medium",
"large",
"xlarge",
"2xlarge"
]
}
],
"x-kubernetes-int-or-string": true
}
}
},
"resourcesPreset": {
"description": "Default sizing preset used when `resources` is omitted. Allowed values: `nano`, `micro`, `small`, `medium`, `large`, `xlarge`, `2xlarge`.",
"type": "string",
"default": "small",
"enum": [
"nano",
"micro",
"small",
"medium",
"large",
"xlarge",
"2xlarge"
]
},
"size": {
"description": "Persistent Volume size for ZooKeeper",
"default": "5Gi",
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
"anyOf": [
{
"type": "integer"
},
{
"type": "string"
"zookeeper": {
"type": "object",
"properties": {
"size": {
"type": "string",
"description": "Persistent Volume size for ZooKeeper",
"default": "5Gi"
},
"replicas": {
"type": "number",
"description": "Number of ZooKeeper replicas",
"default": 3
},
"storageClass": {
"type": "string",
"description": "StorageClass used to store the ZooKeeper data",
"default": ""
},
"resources": {
"type": "object",
"description": "Explicit CPU and memory configuration for each Zookeeper replica. When left empty, the preset defined in `resourcesPreset` is applied.",
"default": {}
},
"resourcesPreset": {
"type": "string",
"description": "Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge.",
"default": "small",
"enum": [
"none",
"nano",
"micro",
"small",
"medium",
"large",
"xlarge",
"2xlarge"
]
}
}
],
"x-kubernetes-int-or-string": true
},
"storageClass": {
"description": "StorageClass used to store the ZooKeeper data",
"type": "string"
"topics": {
"type": "array",
"description": "Topics configuration",
"default": [],
"items": {}
}
}
}
}
}
}

Some files were not shown because too many files have changed in this diff Show More