Compare commits

...

1 Commits

Author SHA1 Message Date
Andrei Kvapil
156c1e8524 PoC: Move workloadmonitors to controller
Signed-off-by: Andrei Kvapil <kvapss@gmail.com>
2025-11-04 19:31:31 +01:00
33 changed files with 749 additions and 278 deletions

View File

@@ -59,6 +59,10 @@ type CozystackResourceDefinitionSpec struct {
// Dashboard configuration for this resource
Dashboard *CozystackResourceDefinitionDashboard `json:"dashboard,omitempty"`
// WorkloadMonitors configuration for this resource
// List of WorkloadMonitor templates to be created for each application instance
WorkloadMonitors []WorkloadMonitorTemplate `json:"workloadMonitors,omitempty"`
}
type CozystackResourceDefinitionChart struct {
@@ -110,17 +114,18 @@ type CozystackResourceDefinitionRelease struct {
// - {{ .namespace }}: The namespace of the resource being processed
//
// Example YAML:
// secrets:
// include:
// - matchExpressions:
// - key: badlabel
// operator: DoesNotExist
// matchLabels:
// goodlabel: goodvalue
// resourceNames:
// - "{{ .name }}-secret"
// - "{{ .kind }}-{{ .name }}-tls"
// - "specificname"
//
// secrets:
// include:
// - matchExpressions:
// - key: badlabel
// operator: DoesNotExist
// matchLabels:
// goodlabel: goodvalue
// resourceNames:
// - "{{ .name }}-secret"
// - "{{ .kind }}-{{ .name }}-tls"
// - "specificname"
type CozystackResourceDefinitionResourceSelector struct {
metav1.LabelSelector `json:",inline"`
// ResourceNames is a list of resource names to match
@@ -191,3 +196,47 @@ type CozystackResourceDefinitionDashboard struct {
// +optional
Module bool `json:"module,omitempty"`
}
// ---- WorkloadMonitor types ----
// WorkloadMonitorTemplate defines a template for creating WorkloadMonitor resources
// for application instances. Fields support Go template syntax with the following variables:
// - {{ .Release.Name }}: The name of the Helm release
// - {{ .Release.Namespace }}: The namespace of the Helm release
// - {{ .Chart.Version }}: The version of the Helm chart
// - {{ .Values.<path> }}: Any value from the Helm values
type WorkloadMonitorTemplate struct {
// Name is the name of the WorkloadMonitor.
// Supports Go template syntax (e.g., "{{ .Release.Name }}-keeper")
// +required
Name string `json:"name"`
// Kind specifies the kind of the workload (e.g., "postgres", "kafka")
// +required
Kind string `json:"kind"`
// Type specifies the type of the workload (e.g., "postgres", "zookeeper")
// +required
Type string `json:"type"`
// Selector is a map of label key-value pairs for matching workloads.
// Supports Go template syntax in values (e.g., "app.kubernetes.io/instance: {{ .Release.Name }}")
// +required
Selector map[string]string `json:"selector"`
// Replicas is a Go template expression that evaluates to the desired number of replicas.
// Example: "{{ .Values.replicas }}" or "{{ .Values.clickhouseKeeper.replicas }}"
// +optional
Replicas string `json:"replicas,omitempty"`
// MinReplicas is a Go template expression that evaluates to the minimum number of replicas.
// Example: "1" or "{{ div .Values.replicas 2 | add1 }}"
// +optional
MinReplicas string `json:"minReplicas,omitempty"`
// Condition is a Go template expression that must evaluate to "true" for the monitor to be created.
// Example: "{{ .Values.clickhouseKeeper.enabled }}"
// If empty, the monitor is always created.
// +optional
Condition string `json:"condition,omitempty"`
}

View File

@@ -244,6 +244,13 @@ func (in *CozystackResourceDefinitionSpec) DeepCopyInto(out *CozystackResourceDe
*out = new(CozystackResourceDefinitionDashboard)
(*in).DeepCopyInto(*out)
}
if in.WorkloadMonitors != nil {
in, out := &in.WorkloadMonitors, &out.WorkloadMonitors
*out = make([]WorkloadMonitorTemplate, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CozystackResourceDefinitionSpec.
@@ -461,6 +468,28 @@ func (in *WorkloadMonitorStatus) DeepCopy() *WorkloadMonitorStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkloadMonitorTemplate) DeepCopyInto(out *WorkloadMonitorTemplate) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadMonitorTemplate.
func (in *WorkloadMonitorTemplate) DeepCopy() *WorkloadMonitorTemplate {
if in == nil {
return nil
}
out := new(WorkloadMonitorTemplate)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkloadStatus) DeepCopyInto(out *WorkloadStatus) {
*out = *in

View File

@@ -192,6 +192,14 @@ func main() {
os.Exit(1)
}
if err = (&controller.WorkloadMonitorFromCRDReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "WorkloadMonitorFromCRD")
os.Exit(1)
}
if err = (&controller.WorkloadReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),

View File

@@ -0,0 +1,439 @@
package controller
import (
"bytes"
"context"
"encoding/json"
"fmt"
"strconv"
"strings"
"text/template"
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
helmv2 "github.com/fluxcd/helm-controller/api/v2"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/pointer"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
// WorkloadMonitorFromCRDReconciler reconciles HelmReleases and creates WorkloadMonitors
// based on CozystackResourceDefinition templates
type WorkloadMonitorFromCRDReconciler struct {
client.Client
Scheme *runtime.Scheme
}
// +kubebuilder:rbac:groups=helm.toolkit.fluxcd.io,resources=helmreleases,verbs=get;list;watch
// +kubebuilder:rbac:groups=cozystack.io,resources=cozystackresourcedefinitions,verbs=get;list;watch
// +kubebuilder:rbac:groups=cozystack.io,resources=workloadmonitors,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch
const (
WorkloadMonitorOwnerLabel = "workloadmonitor.cozystack.io/owned-by-crd"
WorkloadMonitorSourceLabel = "workloadmonitor.cozystack.io/helm-release"
)
// Reconcile processes HelmRelease resources and creates corresponding WorkloadMonitors
func (r *WorkloadMonitorFromCRDReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logger := log.FromContext(ctx)
// Get the HelmRelease
hr := &helmv2.HelmRelease{}
if err := r.Get(ctx, req.NamespacedName, hr); err != nil {
if errors.IsNotFound(err) {
// HelmRelease deleted - cleanup will be handled by owner references
return ctrl.Result{}, nil
}
logger.Error(err, "unable to fetch HelmRelease")
return ctrl.Result{}, err
}
// Skip system HelmReleases
if strings.HasPrefix(hr.Name, "tenant-") {
return ctrl.Result{}, nil
}
// Find the matching CozystackResourceDefinition
crd, err := r.findCRDForHelmRelease(ctx, hr)
if err != nil {
if errors.IsNotFound(err) {
// No CRD found for this HelmRelease - skip
logger.V(1).Info("No CozystackResourceDefinition found for HelmRelease", "name", hr.Name)
return ctrl.Result{}, nil
}
logger.Error(err, "unable to find CozystackResourceDefinition")
return ctrl.Result{}, err
}
// If CRD doesn't have WorkloadMonitors, cleanup any existing ones we created
if len(crd.Spec.WorkloadMonitors) == 0 {
if err := r.cleanupWorkloadMonitors(ctx, hr); err != nil {
logger.Error(err, "failed to cleanup WorkloadMonitors")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
// Get the HelmRelease values for template rendering
values, err := r.getHelmReleaseValues(ctx, hr)
if err != nil {
logger.Error(err, "unable to get HelmRelease values")
return ctrl.Result{}, err
}
// Create/update WorkloadMonitors based on templates
if err := r.reconcileWorkloadMonitors(ctx, hr, crd, values); err != nil {
logger.Error(err, "failed to reconcile WorkloadMonitors")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
// findCRDForHelmRelease finds the CozystackResourceDefinition for a given HelmRelease
func (r *WorkloadMonitorFromCRDReconciler) findCRDForHelmRelease(ctx context.Context, hr *helmv2.HelmRelease) (*cozyv1alpha1.CozystackResourceDefinition, error) {
// List all CozystackResourceDefinitions
var crdList cozyv1alpha1.CozystackResourceDefinitionList
if err := r.List(ctx, &crdList); err != nil {
return nil, err
}
// Match by chart name and prefix
for i := range crdList.Items {
crd := &crdList.Items[i]
if crd.Spec.Release.Chart.Name == hr.Spec.Chart.Spec.Chart {
// Check if HelmRelease name matches the prefix
if strings.HasPrefix(hr.Name, crd.Spec.Release.Prefix) {
return crd, nil
}
}
}
return nil, errors.NewNotFound(schema.GroupResource{Group: "cozystack.io", Resource: "cozystackresourcedefinitions"}, "")
}
// getHelmReleaseValues extracts the values from HelmRelease spec
func (r *WorkloadMonitorFromCRDReconciler) getHelmReleaseValues(ctx context.Context, hr *helmv2.HelmRelease) (map[string]interface{}, error) {
if hr.Spec.Values == nil {
return make(map[string]interface{}), nil
}
// Convert apiextensionsv1.JSON to map
values := make(map[string]interface{})
if err := json.Unmarshal(hr.Spec.Values.Raw, &values); err != nil {
return nil, fmt.Errorf("failed to unmarshal values: %w", err)
}
return values, nil
}
// reconcileWorkloadMonitors creates or updates WorkloadMonitors based on CRD templates
func (r *WorkloadMonitorFromCRDReconciler) reconcileWorkloadMonitors(
ctx context.Context,
hr *helmv2.HelmRelease,
crd *cozyv1alpha1.CozystackResourceDefinition,
values map[string]interface{},
) error {
logger := log.FromContext(ctx)
// Get chart version from HelmRelease
chartVersion := ""
if hr.Status.History != nil && len(hr.Status.History) > 0 {
chartVersion = hr.Status.History[0].ChartVersion
}
// Template context
templateData := map[string]interface{}{
"Release": map[string]interface{}{
"Name": hr.Name,
"Namespace": hr.Namespace,
},
"Chart": map[string]interface{}{
"Version": chartVersion,
},
"Values": values,
}
// Track which monitors we should have
expectedMonitors := make(map[string]bool)
// Process each WorkloadMonitor template
for _, tmpl := range crd.Spec.WorkloadMonitors {
// Check condition
if tmpl.Condition != "" {
shouldCreate, err := evaluateCondition(tmpl.Condition, templateData)
if err != nil {
logger.Error(err, "failed to evaluate condition", "template", tmpl.Name, "condition", tmpl.Condition)
continue
}
if !shouldCreate {
logger.V(1).Info("Skipping WorkloadMonitor due to condition", "template", tmpl.Name)
continue
}
}
// Render monitor name
monitorName, err := renderTemplate(tmpl.Name, templateData)
if err != nil {
logger.Error(err, "failed to render monitor name", "template", tmpl.Name)
continue
}
expectedMonitors[monitorName] = true
// Render selector values
selector := make(map[string]string)
for key, valueTmpl := range tmpl.Selector {
renderedValue, err := renderTemplate(valueTmpl, templateData)
if err != nil {
logger.Error(err, "failed to render selector value", "key", key, "template", valueTmpl)
continue
}
selector[key] = renderedValue
}
// Render replicas
var replicas *int32
if tmpl.Replicas != "" {
replicasStr, err := renderTemplate(tmpl.Replicas, templateData)
if err != nil {
logger.Error(err, "failed to render replicas", "template", tmpl.Replicas)
} else {
if replicasInt, err := strconv.ParseInt(replicasStr, 10, 32); err == nil {
replicas = pointer.Int32(int32(replicasInt))
}
}
}
// Render minReplicas
var minReplicas *int32
if tmpl.MinReplicas != "" {
minReplicasStr, err := renderTemplate(tmpl.MinReplicas, templateData)
if err != nil {
logger.Error(err, "failed to render minReplicas", "template", tmpl.MinReplicas)
} else {
if minReplicasInt, err := strconv.ParseInt(minReplicasStr, 10, 32); err == nil {
minReplicas = pointer.Int32(int32(minReplicasInt))
}
}
}
// Create or update WorkloadMonitor
monitor := &cozyv1alpha1.WorkloadMonitor{
ObjectMeta: metav1.ObjectMeta{
Name: monitorName,
Namespace: hr.Namespace,
},
}
_, err = controllerutil.CreateOrUpdate(ctx, r.Client, monitor, func() error {
// Set labels
if monitor.Labels == nil {
monitor.Labels = make(map[string]string)
}
monitor.Labels[WorkloadMonitorOwnerLabel] = "true"
monitor.Labels[WorkloadMonitorSourceLabel] = hr.Name
// Set owner reference to HelmRelease for automatic cleanup
if err := controllerutil.SetControllerReference(hr, monitor, r.Scheme); err != nil {
return err
}
// Update spec
monitor.Spec.Selector = selector
monitor.Spec.Kind = tmpl.Kind
monitor.Spec.Type = tmpl.Type
monitor.Spec.Version = chartVersion
monitor.Spec.Replicas = replicas
monitor.Spec.MinReplicas = minReplicas
return nil
})
if err != nil {
logger.Error(err, "failed to create/update WorkloadMonitor", "name", monitorName)
continue
}
logger.V(1).Info("WorkloadMonitor reconciled", "name", monitorName)
}
// Cleanup WorkloadMonitors that are no longer in templates
if err := r.cleanupUnexpectedMonitors(ctx, hr, expectedMonitors); err != nil {
logger.Error(err, "failed to cleanup unexpected WorkloadMonitors")
return err
}
return nil
}
// cleanupWorkloadMonitors removes all WorkloadMonitors created for a HelmRelease
func (r *WorkloadMonitorFromCRDReconciler) cleanupWorkloadMonitors(ctx context.Context, hr *helmv2.HelmRelease) error {
return r.cleanupUnexpectedMonitors(ctx, hr, make(map[string]bool))
}
// cleanupUnexpectedMonitors removes WorkloadMonitors that are no longer expected
func (r *WorkloadMonitorFromCRDReconciler) cleanupUnexpectedMonitors(
ctx context.Context,
hr *helmv2.HelmRelease,
expectedMonitors map[string]bool,
) error {
logger := log.FromContext(ctx)
// List all WorkloadMonitors in the namespace that we created
var monitorList cozyv1alpha1.WorkloadMonitorList
labelSelector := labels.SelectorFromSet(labels.Set{
WorkloadMonitorOwnerLabel: "true",
WorkloadMonitorSourceLabel: hr.Name,
})
if err := r.List(ctx, &monitorList,
client.InNamespace(hr.Namespace),
client.MatchingLabelsSelector{Selector: labelSelector},
); err != nil {
return err
}
// Delete monitors that are not expected
for i := range monitorList.Items {
monitor := &monitorList.Items[i]
if !expectedMonitors[monitor.Name] {
logger.Info("Deleting unexpected WorkloadMonitor", "name", monitor.Name)
if err := r.Delete(ctx, monitor); err != nil && !errors.IsNotFound(err) {
logger.Error(err, "failed to delete WorkloadMonitor", "name", monitor.Name)
}
}
}
return nil
}
// renderTemplate renders a Go template string with the given data
func renderTemplate(tmplStr string, data interface{}) (string, error) {
// Check if it's already a simple value (no template markers)
if !strings.Contains(tmplStr, "{{") {
return tmplStr, nil
}
// Add Sprig functions for compatibility with Helm templates
tmpl, err := template.New("").Funcs(getTemplateFuncs()).Parse(tmplStr)
if err != nil {
return "", fmt.Errorf("failed to parse template: %w", err)
}
var buf bytes.Buffer
if err := tmpl.Execute(&buf, data); err != nil {
return "", fmt.Errorf("failed to execute template: %w", err)
}
return strings.TrimSpace(buf.String()), nil
}
// evaluateCondition evaluates a template condition (should return "true" or non-empty for true)
func evaluateCondition(condition string, data interface{}) (bool, error) {
result, err := renderTemplate(condition, data)
if err != nil {
return false, err
}
// Check for truthy values
result = strings.TrimSpace(strings.ToLower(result))
return result == "true" || result == "1" || result == "yes", nil
}
// getTemplateFuncs returns template functions compatible with Helm
func getTemplateFuncs() template.FuncMap {
return template.FuncMap{
// Math functions
"add": func(a, b int) int { return a + b },
"sub": func(a, b int) int { return a - b },
"mul": func(a, b int) int { return a * b },
"div": func(a, b int) int {
if b == 0 {
return 0
}
return a / b
},
"add1": func(a int) int { return a + 1 },
"sub1": func(a int) int { return a - 1 },
// String functions
"upper": strings.ToUpper,
"lower": strings.ToLower,
"trim": strings.TrimSpace,
"trimAll": func(cutset, s string) string { return strings.Trim(s, cutset) },
"replace": func(old, new string, n int, s string) string { return strings.Replace(s, old, new, n) },
// Logic functions
"default": func(defaultVal, val interface{}) interface{} {
if val == nil || val == "" {
return defaultVal
}
return val
},
"empty": func(val interface{}) bool {
return val == nil || val == ""
},
"not": func(val bool) bool {
return !val
},
}
}
// SetupWithManager sets up the controller with the Manager
func (r *WorkloadMonitorFromCRDReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
Named("workloadmonitor-from-crd-controller").
For(&helmv2.HelmRelease{}).
Owns(&cozyv1alpha1.WorkloadMonitor{}).
Watches(
&cozyv1alpha1.CozystackResourceDefinition{},
handler.EnqueueRequestsFromMapFunc(r.mapCRDToHelmReleases),
).
Complete(r)
}
// mapCRDToHelmReleases maps CRD changes to HelmRelease reconcile requests
func (r *WorkloadMonitorFromCRDReconciler) mapCRDToHelmReleases(ctx context.Context, obj client.Object) []reconcile.Request {
crd, ok := obj.(*cozyv1alpha1.CozystackResourceDefinition)
if !ok {
return nil
}
// List all HelmReleases
var hrList helmv2.HelmReleaseList
if err := r.List(ctx, &hrList); err != nil {
return nil
}
var requests []reconcile.Request
for i := range hrList.Items {
hr := &hrList.Items[i]
// Skip tenant HelmReleases
if strings.HasPrefix(hr.Name, "tenant-") {
continue
}
// Match by chart name and prefix
if crd.Spec.Release.Chart.Name == hr.Spec.Chart.Spec.Chart {
if strings.HasPrefix(hr.Name, crd.Spec.Release.Prefix) {
requests = append(requests, reconcile.Request{
NamespacedName: types.NamespacedName{
Name: hr.Name,
Namespace: hr.Namespace,
},
})
}
}
}
return requests
}

View File

@@ -1,28 +0,0 @@
---
apiVersion: cozystack.io/v1alpha1
kind: WorkloadMonitor
metadata:
name: {{ $.Release.Name }}
spec:
replicas: {{ .Values.replicas }}
minReplicas: 1
kind: clickhouse
type: clickhouse
selector:
app.kubernetes.io/instance: {{ $.Release.Name }}
version: {{ $.Chart.Version }}
{{- if .Values.clickhouseKeeper.enabled }}
---
apiVersion: cozystack.io/v1alpha1
kind: WorkloadMonitor
metadata:
name: {{ $.Release.Name }}-keeper
spec:
replicas: {{ .Values.clickhouseKeeper.replicas }}
minReplicas: 1
kind: clickhouse
type: clickhouse
selector:
app: {{ $.Release.Name }}-keeper
version: {{ $.Chart.Version }}
{{- end }}

View File

@@ -1,13 +0,0 @@
---
apiVersion: cozystack.io/v1alpha1
kind: WorkloadMonitor
metadata:
name: {{ $.Release.Name }}
spec:
replicas: {{ .Values.replicas }}
minReplicas: 1
kind: ferretdb
type: ferretdb
selector:
app.kubernetes.io/instance: {{ $.Release.Name }}
version: {{ $.Chart.Version }}

View File

@@ -1,20 +0,0 @@
{{- if .Values.monitoring.enabled }}
---
apiVersion: cozystack.io/v1alpha1
kind: WorkloadMonitor
metadata:
name: {{ .Release.Name }}
labels:
app.kubernetes.io/name: foundationdb
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
replicas: {{ .Values.cluster.processCounts.storage }}
minReplicas: {{ include "foundationdb.minReplicas" . }}
kind: foundationdb
type: foundationdb
selector:
foundationdb.org/fdb-cluster-name: {{ .Release.Name }}
foundationdb.org/fdb-process-class: storage
version: {{ .Chart.Version }}
{{- end }}

View File

@@ -1,39 +0,0 @@
---
apiVersion: cozystack.io/v1alpha1
kind: WorkloadMonitor
metadata:
name: {{ $.Release.Name }}-haproxy
spec:
replicas: {{ .Values.haproxy.replicas }}
minReplicas: 1
kind: http-cache
type: http-cache
selector:
app: {{ $.Release.Name }}-haproxy
version: {{ $.Chart.Version }}
---
apiVersion: cozystack.io/v1alpha1
kind: WorkloadMonitor
metadata:
name: {{ $.Release.Name }}-nginx
spec:
replicas: {{ .Values.nginx.replicas }}
minReplicas: 1
kind: http-cache
type: http-cache
selector:
app: {{ $.Release.Name }}-nginx-cache
version: {{ $.Chart.Version }}
---
apiVersion: cozystack.io/v1alpha1
kind: WorkloadMonitor
metadata:
name: {{ $.Release.Name }}
spec:
replicas: {{ .Values.replicas }}
minReplicas: 1
kind: http-cache
type: http-cache
selector:
app.kubernetes.io/instance: {{ $.Release.Name }}
version: {{ $.Chart.Version }}

View File

@@ -1,30 +0,0 @@
---
apiVersion: cozystack.io/v1alpha1
kind: WorkloadMonitor
metadata:
name: {{ $.Release.Name }}
spec:
replicas: {{ .Values.replicas }}
minReplicas: 1
kind: kafka
type: kafka
selector:
app.kubernetes.io/instance: {{ $.Release.Name }}
app.kubernetes.io/name: kafka
version: {{ $.Chart.Version }}
---
apiVersion: cozystack.io/v1alpha1
kind: WorkloadMonitor
metadata:
name: {{ $.Release.Name }}-zookeeper
spec:
replicas: {{ .Values.replicas }}
minReplicas: 1
kind: kafka
type: zookeeper
selector:
app.kubernetes.io/instance: {{ $.Release.Name }}
app.kubernetes.io/name: zookeeper
version: {{ $.Chart.Version }}

View File

@@ -1,13 +0,0 @@
---
apiVersion: cozystack.io/v1alpha1
kind: WorkloadMonitor
metadata:
name: {{ $.Release.Name }}
spec:
replicas: {{ .Values.replicas }}
minReplicas: 1
kind: mysql
type: mysql
selector:
app.kubernetes.io/instance: {{ $.Release.Name }}
version: {{ $.Chart.Version }}

View File

@@ -1,13 +0,0 @@
---
apiVersion: cozystack.io/v1alpha1
kind: WorkloadMonitor
metadata:
name: {{ $.Release.Name }}
spec:
replicas: {{ .Values.replicas }}
minReplicas: 1
kind: nats
type: nats
selector:
app.kubernetes.io/instance: {{ $.Release.Name }}-system
version: {{ $.Chart.Version }}

View File

@@ -79,17 +79,3 @@ spec:
policy.cozystack.io/allow-to-apiserver: "true"
app.kubernetes.io/name: postgres.apps.cozystack.io
app.kubernetes.io/instance: {{ $.Release.Name }}
---
apiVersion: cozystack.io/v1alpha1
kind: WorkloadMonitor
metadata:
name: {{ $.Release.Name }}
spec:
replicas: {{ .Values.replicas }}
minReplicas: 1
kind: postgres
type: postgres
selector:
app.kubernetes.io/name: postgres.apps.cozystack.io
app.kubernetes.io/instance: {{ $.Release.Name }}
version: {{ $.Chart.Version }}

View File

@@ -1,13 +0,0 @@
---
apiVersion: cozystack.io/v1alpha1
kind: WorkloadMonitor
metadata:
name: {{ $.Release.Name }}
spec:
replicas: {{ .Values.replicas }}
minReplicas: 1
kind: rabbitmq
type: rabbitmq
selector:
app.kubernetes.io/name: {{ $.Release.Name }}
version: {{ $.Chart.Version }}

View File

@@ -68,34 +68,3 @@ spec:
auth:
secretPath: {{ .Release.Name }}-auth
{{- end }}
---
apiVersion: cozystack.io/v1alpha1
kind: WorkloadMonitor
metadata:
name: {{ $.Release.Name }}-redis
namespace: {{ $.Release.Namespace }}
spec:
minReplicas: 1
replicas: {{ .Values.replicas }}
kind: redis
type: redis
selector:
app.kubernetes.io/component: redis
app.kubernetes.io/instance: {{ $.Release.Name }}
version: {{ $.Chart.Version }}
---
apiVersion: cozystack.io/v1alpha1
kind: WorkloadMonitor
metadata:
name: {{ $.Release.Name }}-sentinel
namespace: {{ $.Release.Namespace }}
spec:
minReplicas: 2
replicas: 3
kind: redis
type: sentinel
selector:
app.kubernetes.io/component: sentinel
app.kubernetes.io/instance: {{ $.Release.Name }}
version: {{ $.Chart.Version }}

View File

@@ -1,13 +0,0 @@
---
apiVersion: cozystack.io/v1alpha1
kind: WorkloadMonitor
metadata:
name: {{ $.Release.Name }}
spec:
replicas: {{ .Values.replicas }}
minReplicas: 1
kind: tcp-balancer
type: haproxy
selector:
app.kubernetes.io/instance: {{ $.Release.Name }}
version: {{ $.Chart.Version }}

View File

@@ -1,12 +0,0 @@
apiVersion: cozystack.io/v1alpha1
kind: WorkloadMonitor
metadata:
name: {{ $.Release.Name }}
spec:
replicas: 0
minReplicas: 0
kind: vm-disk
type: vm-disk
selector:
app.kubernetes.io/instance: {{ .Release.Name }}
version: {{ $.Chart.Version }}

View File

@@ -1,12 +0,0 @@
apiVersion: cozystack.io/v1alpha1
kind: WorkloadMonitor
metadata:
name: {{ $.Release.Name }}
spec:
replicas: {{ .Values.replicas }}
minReplicas: 1
kind: vpn
type: vpn
selector:
app.kubernetes.io/instance: {{ .Release.Name }}
version: {{ $.Chart.Version }}

View File

@@ -1,16 +0,0 @@
---
apiVersion: cozystack.io/v1alpha1
kind: WorkloadMonitor
metadata:
name: {{ $.Release.Name }}
namespace: {{ $.Release.Namespace }}
spec:
replicas: {{ .Values.replicas }}
minReplicas: {{ div .Values.replicas 2 | add1 }}
kind: ingress
type: controller
selector:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx-system
app.kubernetes.io/name: ingress-nginx
version: {{ $.Chart.Version }}

View File

@@ -671,6 +671,62 @@ spec:
x-kubernetes-map-type: atomic
type: array
type: object
workloadMonitors:
description: |-
WorkloadMonitors configuration for this resource
List of WorkloadMonitor templates to be created for each application instance
items:
description: |-
WorkloadMonitorTemplate defines a template for creating WorkloadMonitor resources
for application instances. Fields support Go template syntax with the following variables:
- {{ .Release.Name }}: The name of the Helm release
- {{ .Release.Namespace }}: The namespace of the Helm release
- {{ .Chart.Version }}: The version of the Helm chart
- {{ .Values.<path> }}: Any value from the Helm values
properties:
condition:
description: |-
Condition is a Go template expression that must evaluate to "true" for the monitor to be created.
Example: "{{ .Values.clickhouseKeeper.enabled }}"
If empty, the monitor is always created.
type: string
kind:
description: Kind specifies the kind of the workload (e.g.,
"postgres", "kafka")
type: string
minReplicas:
description: |-
MinReplicas is a Go template expression that evaluates to the minimum number of replicas.
Example: "1" or "{{ div .Values.replicas 2 | add1 }}"
type: string
name:
description: |-
Name is the name of the WorkloadMonitor.
Supports Go template syntax (e.g., "{{ .Release.Name }}-keeper")
type: string
replicas:
description: |-
Replicas is a Go template expression that evaluates to the desired number of replicas.
Example: "{{ .Values.replicas }}" or "{{ .Values.clickhouseKeeper.replicas }}"
type: string
selector:
additionalProperties:
type: string
description: |-
Selector is a map of label key-value pairs for matching workloads.
Supports Go template syntax in values (e.g., "app.kubernetes.io/instance: {{ .Release.Name }}")
type: object
type:
description: Type specifies the type of the workload (e.g.,
"postgres", "zookeeper")
type: string
required:
- kind
- name
- selector
- type
type: object
type: array
required:
- application
- release

View File

@@ -37,3 +37,19 @@ spec:
include:
- resourceNames:
- chendpoint-clickhouse-{{ .name }}
workloadMonitors:
- name: "{{ .Release.Name }}"
kind: clickhouse
type: clickhouse
selector:
app.kubernetes.io/instance: "{{ .Release.Name }}"
replicas: "{{ .Values.replicas }}"
minReplicas: "1"
- name: "{{ .Release.Name }}-keeper"
kind: clickhouse
type: clickhouse
selector:
app: "{{ .Release.Name }}-keeper"
replicas: "{{ .Values.clickhouseKeeper.replicas }}"
minReplicas: "1"
condition: "{{ .Values.clickhouseKeeper.enabled }}"

View File

@@ -38,3 +38,11 @@ spec:
include:
- resourceNames:
- ferretdb-{{ .name }}
workloadMonitors:
- name: "{{ .Release.Name }}"
kind: ferretdb
type: ferretdb
selector:
app.kubernetes.io/instance: "{{ .Release.Name }}"
replicas: "{{ .Values.replicas }}"
minReplicas: "1"

View File

@@ -28,3 +28,13 @@ spec:
- database
icon: PHN2ZyB3aWR0aD0iMTQ0IiBoZWlnaHQ9IjE0NCIgdmlld0JveD0iMCAwIDE0NCAxNDQiIGZpbGw9Im5vbmUiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+CjxyZWN0IHdpZHRoPSIxNDQiIGhlaWdodD0iMTQ0IiByeD0iMjQiIGZpbGw9InVybCgjcGFpbnQwX3JhZGlhbF84NThfMzA3MikiLz4KPHBhdGggZD0iTTEzNS43ODQgNzUuNjQ0NkwxMzUuOTM5IDg3Ljc2MzhMODkuNjg0NiA4MS41MzYyTDYyLjA4NjggODQuNTA3OUwzNS4zNDE3IDgxLjQzMjlMOC43NTE2NyA4NC41ODU0TDguNzI1ODMgODEuNTEwNEwzNS4zNjc2IDc3LjU4MjZWNjQuMTcxM0w2Mi4yOTM1IDcwLjczNDhMNjIuMzQ1MiA4MS4yNzc4TDg5LjQ3NzkgNzcuNjg2TDg5LjQwMDQgNjQuMTk3MkwxMzUuNzg0IDc1LjY0NDZaIiBmaWxsPSJ3aGl0ZSIvPgo8cGF0aCBkPSJNODkuNDc3OCA4Ni4wMzI1TDEzNS44ODggOTAuODM4OFYxMDIuNzI2SDguNjQ4MjVMOC41MTkwNCA5OS41NzNIMzUuMjY0MUMzNS4yNjQxIDk5LjU3MyAzNS4yNjQxIDkwLjczNTUgMzUuMjY0MSA4Ni4wNTgzQzQ0LjI1NjcgODYuOTM2OSA2Mi4wODY3IDg4LjY5NDEgNjIuMDg2NyA4OC42OTQxVjk5LjI2MjlIODkuNDc3OFY4Ni4wMzI1WiIgZmlsbD0id2hpdGUiLz4KPHBhdGggZD0iTTYyLjI5MzQgNjYuODg0Nkw2Mi4yMTU4IDYzLjYyODZDNjIuMjE1OCA2My42Mjg2IDc5LjgxMzMgNTguMzU3MSA4OC45MDkyIDU1LjY2OTdDODguOTA5MiA1MS4zMDI2IDg4LjkwOTIgNDcuMDkwNiA4OC45MDkyIDQyQzEwNC44NzkgNDguNDA4NSAxMjAuMjI4IDU0LjYxMDIgMTM1LjczMyA2MC44Mzc4QzEzNS43MzMgNjQuNzEzOSAxMzUuNzMzIDY4LjQzNSAxMzUuNzMzIDcyLjU2OTVDMTE5Ljg0MSA2OC4yMDI0IDEwNC4yODQgNjMuOTEyOSA4OS4xNjc2IDU5Ljc1MjVDNzkuOTY4NCA2Mi4yMDc0IDYyLjI5MzQgNjYuODg0NiA2Mi4yOTM0IDY2Ljg4NDZaIiBmaWxsPSJ3aGl0ZSIvPgo8cGF0aCBkPSJNMzUuMzk2MiA4MS43MDczTDguODA2MTIgODQuODU5OEw4Ljc4MDI3IDgxLjc4NDhMMzUuNDIyIDc3Ljg1N1Y2NC40NDU3TDYyLjM0OCA3MS4wMDkzTDYyLjM5OTYgODEuNTUyMkw4OS41MzIzIDc3Ljk2MDRMODkuNDU0OCA2NC40NzE2TDEzNS44MzkgNzUuOTE5TDEzNS45OTQgODguMDM4Mkw4OS43MzkxIDgxLjgxMDZMNjIuMTQxMiA4NC43ODIzTDM1LjM5NjIgODEuNzA3M1oiIGZpbGw9IndoaXRlIi8+CjxwYXRoIGQ9Ik04OS41MzIzIDg2LjMwNjlMMTM1Ljk0MiA5MS4xMTMzVjEwM0g4LjcwMjdMOC41NzM0OSA5OS44NDc0SDM1LjMxODZDMzUuMzE4NiA5OS44NDc0IDM1LjMxODYgOTEuMDA5OSAzNS4zMTg2IDg2LjMzMjhDNDQuMzExMSA4Ny4yMTE0IDYyLjE0MTIgODguOTY4NSA2Mi4xNDEyIDg4Ljk2ODVWOTkuNTM3M0g4OS41MzIzVjg2LjMwNjlaIiBmaWxsPSJ3aGl0ZSIvPgo8cGF0aCBkPSJNNjIuMzQ4MyA2Ny4xNTlMNjIuMjcwOCA2My45MDMxQzYyLjI3MDggNjMuOTAzMSA3OS44NjgyIDU4LjYzMTYgODguOTY0MiA1NS45NDQyQzg4Ljk2NDIgNTEuNTc3MSA4OC45NjQyIDQ3LjM2NTEgODguOTY0MiA0Mi4yNzQ0QzEwNC45MzQgNDguNjgyOSAxMjAuMjgzIDU0Ljg4NDcgMTM1Ljc4NyA2MS4xMTIzQzEzNS43ODcgNjQuOTg4NCAxMzUuNzg3IDY4LjcwOTQgMTM1Ljc4NyA3Mi44NDM5QzExOS44OTUgNjguNDc2OSAxMDQuMzM5IDY0LjE4NzMgODkuMjIyNiA2MC4wMjdDODAuMDIzMyA2Mi40ODE4IDYyLjM0ODMgNjcuMTU5IDYyLjM0ODMgNjcuMTU5WiIgZmlsbD0id2hpdGUiLz4KPGRlZnM+CjxyYWRpYWxHcmFkaWVudCBpZD0icGFpbnQwX3JhZGlhbF84NThfMzA3MiIgY3g9IjAiIGN5PSIwIiByPSIxIiBncmFkaWVudFVuaXRzPSJ1c2VyU3BhY2VPblVzZSIgZ3JhZGllbnRUcmFuc2Zvcm09InRyYW5zbGF0ZSgtMjkuNSAtMTgpIHJvdGF0ZSgzOS42OTYzKSBzY2FsZSgzMDIuMTY4IDI3NS4yNzEpIj4KPHN0b3Agc3RvcC1jb2xvcj0iI0JFRERGRiIvPgo8c3RvcCBvZmZzZXQ9IjAuMjU5NjE1IiBzdG9wLWNvbG9yPSIjOUVDQ0ZEIi8+CjxzdG9wIG9mZnNldD0iMC41OTEzNDYiIHN0b3AtY29sb3I9IiMzRjlBRkIiLz4KPHN0b3Agb2Zmc2V0PSIxIiBzdG9wLWNvbG9yPSIjMEI3MEUwIi8+CjwvcmFkaWFsR3JhZGllbnQ+CjwvZGVmcz4KPC9zdmc+Cg==
# keysOrder: []
workloadMonitors:
- name: "{{ .Release.Name }}"
kind: foundationdb
type: foundationdb
selector:
foundationdb.org/fdb-cluster-name: "{{ .Release.Name }}"
foundationdb.org/fdb-process-class: storage
replicas: "{{ .Values.cluster.processCounts.storage }}"
minReplicas: "{{ include \"foundationdb.minReplicas\" . }}"
condition: "{{ .Values.monitoring.enabled }}"

View File

@@ -32,3 +32,25 @@ spec:
secrets:
exclude: []
include: []
workloadMonitors:
- name: "{{ .Release.Name }}-haproxy"
kind: http-cache
type: http-cache
selector:
app: "{{ .Release.Name }}-haproxy"
replicas: "{{ .Values.haproxy.replicas }}"
minReplicas: "1"
- name: "{{ .Release.Name }}-nginx"
kind: http-cache
type: http-cache
selector:
app: "{{ .Release.Name }}-nginx-cache"
replicas: "{{ .Values.nginx.replicas }}"
minReplicas: "1"
- name: "{{ .Release.Name }}"
kind: http-cache
type: http-cache
selector:
app.kubernetes.io/instance: "{{ .Release.Name }}"
replicas: "{{ .Values.replicas }}"
minReplicas: "1"

View File

@@ -37,3 +37,13 @@ spec:
include:
- resourceNames:
- "{{ slice .namespace 7 }}-ingress-controller"
workloadMonitors:
- name: "{{ .Release.Name }}"
kind: ingress
type: controller
selector:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx-system
app.kubernetes.io/name: ingress-nginx
replicas: "{{ .Values.replicas }}"
minReplicas: "{{ div .Values.replicas 2 | add1 }}"

View File

@@ -38,3 +38,20 @@ spec:
include:
- resourceNames:
- kafka-{{ .name }}-kafka-bootstrap
workloadMonitors:
- name: "{{ .Release.Name }}"
kind: kafka
type: kafka
selector:
app.kubernetes.io/instance: "{{ .Release.Name }}"
app.kubernetes.io/name: kafka
replicas: "{{ .Values.kafka.replicas }}"
minReplicas: "1"
- name: "{{ .Release.Name }}-zookeeper"
kind: kafka
type: zookeeper
selector:
app.kubernetes.io/instance: "{{ .Release.Name }}"
app.kubernetes.io/name: zookeeper
replicas: "{{ .Values.zookeeper.replicas }}"
minReplicas: "1"

View File

@@ -39,3 +39,11 @@ spec:
- resourceNames:
- mysql-{{ .name }}-primary
- mysql-{{ .name }}-secondary
workloadMonitors:
- name: "{{ .Release.Name }}"
kind: mysql
type: mysql
selector:
app.kubernetes.io/instance: "{{ .Release.Name }}"
replicas: "{{ .Values.replicas }}"
minReplicas: "1"

View File

@@ -38,3 +38,11 @@ spec:
include:
- resourceNames:
- nats-{{ .name }}
workloadMonitors:
- name: "{{ .Release.Name }}"
kind: nats
type: nats
selector:
app.kubernetes.io/instance: "{{ .Release.Name }}-system"
replicas: "{{ .Values.replicas }}"
minReplicas: "1"

View File

@@ -49,3 +49,12 @@ spec:
- postgres-{{ .name }}-ro
- postgres-{{ .name }}-rw
- postgres-{{ .name }}-external-write
workloadMonitors:
- name: "{{ .Release.Name }}"
kind: postgres
type: postgres
selector:
app.kubernetes.io/name: postgres.apps.cozystack.io
app.kubernetes.io/instance: "{{ .Release.Name }}"
replicas: "{{ .Values.replicas }}"
minReplicas: "1"

View File

@@ -40,3 +40,11 @@ spec:
include:
- resourceNames:
- rabbitmq-{{ .name }}
workloadMonitors:
- name: "{{ .Release.Name }}"
kind: rabbitmq
type: rabbitmq
selector:
app.kubernetes.io/name: "{{ .Release.Name }}"
replicas: "{{ .Values.replicas }}"
minReplicas: "1"

View File

@@ -41,3 +41,20 @@ spec:
- rfrm-redis-{{ .name }}
- rfrs-redis-{{ .name }}
- redis-{{ .name }}-external-lb
workloadMonitors:
- name: "{{ .Release.Name }}-redis"
kind: redis
type: redis
selector:
app.kubernetes.io/component: redis
app.kubernetes.io/instance: "{{ .Release.Name }}"
replicas: "{{ .Values.replicas }}"
minReplicas: "1"
- name: "{{ .Release.Name }}-sentinel"
kind: redis
type: sentinel
selector:
app.kubernetes.io/component: sentinel
app.kubernetes.io/instance: "{{ .Release.Name }}"
replicas: "3"
minReplicas: "2"

View File

@@ -31,3 +31,11 @@ spec:
secrets:
exclude: []
include: []
workloadMonitors:
- name: "{{ .Release.Name }}"
kind: tcp-balancer
type: haproxy
selector:
app.kubernetes.io/instance: "{{ .Release.Name }}"
replicas: "{{ .Values.replicas }}"
minReplicas: "1"

View File

@@ -32,3 +32,11 @@ spec:
secrets:
exclude: []
include: []
workloadMonitors:
- name: "{{ .Release.Name }}"
kind: vm-disk
type: vm-disk
selector:
app.kubernetes.io/instance: "{{ .Release.Name }}"
replicas: "0"
minReplicas: "0"

View File

@@ -38,3 +38,11 @@ spec:
include:
- resourceNames:
- vpn-{{ .name }}-vpn
workloadMonitors:
- name: "{{ .Release.Name }}"
kind: vpn
type: vpn
selector:
app.kubernetes.io/instance: "{{ .Release.Name }}"
replicas: "{{ .Values.replicas }}"
minReplicas: "1"