Compare commits

..

1 Commits

Author SHA1 Message Date
Timofei Larkin
7c8823a835 [platform] Cozy values secret replicator
Signed-off-by: Timofei Larkin <lllamnyp@gmail.com>
2026-01-06 18:10:47 +03:00
102 changed files with 1007 additions and 2178 deletions

View File

@@ -1,4 +1,4 @@
.PHONY: manifests assets unit-tests helm-unit-tests
.PHONY: manifests repos assets unit-tests helm-unit-tests
build-deps:
@command -V find docker skopeo jq gh helm > /dev/null
@@ -18,7 +18,6 @@ build: build-deps
make -C packages/system/backup-controller image
make -C packages/system/lineage-controller-webhook image
make -C packages/system/cilium image
make -C packages/system/linstor image
make -C packages/system/kubeovn-webhook image
make -C packages/system/kubeovn-plunger image
make -C packages/system/dashboard image
@@ -26,15 +25,21 @@ build: build-deps
make -C packages/system/kamaji image
make -C packages/system/bucket image
make -C packages/system/objectstorage-controller image
make -C packages/system/grafana-operator image
make -C packages/core/testing image
make -C packages/core/talos image
make -C packages/core/platform image
make -C packages/core/installer image
make manifests
repos:
rm -rf _out
make -C packages/system repo
make -C packages/apps repo
make -C packages/extra repo
manifests:
mkdir -p _out/assets
(cd packages/core/installer/; helm template --namespace cozy-installer installer .) > _out/assets/cozystack-installer.yaml
(cd packages/core/installer/; helm template -n cozy-installer installer .) > _out/assets/cozystack-installer.yaml
assets:
make -C packages/core/talos assets

View File

@@ -17,7 +17,6 @@ limitations under the License.
package v1alpha1
import (
helmv2 "github.com/fluxcd/helm-controller/api/v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -62,6 +61,24 @@ type CozystackResourceDefinitionSpec struct {
Dashboard *CozystackResourceDefinitionDashboard `json:"dashboard,omitempty"`
}
type CozystackResourceDefinitionChart struct {
// Name of the Helm chart
Name string `json:"name"`
// Source reference for the Helm chart
SourceRef SourceRef `json:"sourceRef"`
}
type SourceRef struct {
// Kind of the source reference
// +kubebuilder:default:="HelmRepository"
Kind string `json:"kind"`
// Name of the source reference
Name string `json:"name"`
// Namespace of the source reference
// +kubebuilder:default:="cozy-public"
Namespace string `json:"namespace"`
}
type CozystackResourceDefinitionApplication struct {
// Kind of the application, used for UI and API
Kind string `json:"kind"`
@@ -74,8 +91,9 @@ type CozystackResourceDefinitionApplication struct {
}
type CozystackResourceDefinitionRelease struct {
// Reference to the chart source
ChartRef *helmv2.CrossNamespaceSourceReference `json:"chartRef"`
// Helm chart configuration
// +optional
Chart CozystackResourceDefinitionChart `json:"chart,omitempty"`
// Labels for the release
Labels map[string]string `json:"labels,omitempty"`
// Prefix for the release name

View File

@@ -21,7 +21,6 @@ limitations under the License.
package v1alpha1
import (
"github.com/fluxcd/helm-controller/api/v2"
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -119,6 +118,22 @@ func (in *CozystackResourceDefinitionApplication) DeepCopy() *CozystackResourceD
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CozystackResourceDefinitionChart) DeepCopyInto(out *CozystackResourceDefinitionChart) {
*out = *in
out.SourceRef = in.SourceRef
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CozystackResourceDefinitionChart.
func (in *CozystackResourceDefinitionChart) DeepCopy() *CozystackResourceDefinitionChart {
if in == nil {
return nil
}
out := new(CozystackResourceDefinitionChart)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CozystackResourceDefinitionDashboard) DeepCopyInto(out *CozystackResourceDefinitionDashboard) {
*out = *in
@@ -190,11 +205,7 @@ func (in *CozystackResourceDefinitionList) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CozystackResourceDefinitionRelease) DeepCopyInto(out *CozystackResourceDefinitionRelease) {
*out = *in
if in.ChartRef != nil {
in, out := &in.ChartRef, &out.ChartRef
*out = new(v2.CrossNamespaceSourceReference)
**out = **in
}
out.Chart = in.Chart
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make(map[string]string, len(*in))
@@ -611,6 +622,21 @@ func (in Selector) DeepCopy() Selector {
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SourceRef) DeepCopyInto(out *SourceRef) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceRef.
func (in *SourceRef) DeepCopy() *SourceRef {
if in == nil {
return nil
}
out := new(SourceRef)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Variant) DeepCopyInto(out *Variant) {
*out = *in

View File

@@ -0,0 +1,29 @@
package main
import (
"flag"
"log"
"net/http"
"path/filepath"
)
func main() {
addr := flag.String("address", ":8123", "Address to listen on")
dir := flag.String("dir", "/cozystack/assets", "Directory to serve files from")
flag.Parse()
absDir, err := filepath.Abs(*dir)
if err != nil {
log.Fatalf("Error getting absolute path for %s: %v", *dir, err)
}
fs := http.FileServer(http.Dir(absDir))
http.Handle("/", fs)
log.Printf("Server starting on %s, serving directory %s", *addr, absDir)
err = http.ListenAndServe(*addr, nil)
if err != nil {
log.Fatalf("Server failed to start: %v", err)
}
}

View File

@@ -199,6 +199,16 @@ func main() {
}
}
if err := (&cozyvaluesreplicator.SecretReplicatorReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
SourceNamespace: cozyValuesSecretNamespace,
SecretName: cozyValuesSecretName,
TargetNamespaceSelector: targetNSSelector,
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "CozyValuesReplicator")
os.Exit(1)
}
// Setup PackageSource reconciler
if err := (&operator.PackageSourceReconciler{
Client: mgr.GetClient(),
@@ -217,18 +227,6 @@ func main() {
os.Exit(1)
}
// Setup CozyValuesReplicator reconciler
if err := (&cozyvaluesreplicator.SecretReplicatorReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
SourceNamespace: cozyValuesSecretNamespace,
SecretName: cozyValuesSecretName,
TargetNamespaceSelector: targetNSSelector,
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "CozyValuesReplicator")
os.Exit(1)
}
// +kubebuilder:scaffold:builder
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {

View File

@@ -73,7 +73,7 @@ func (r *CozystackResourceDefinitionHelmReconciler) updateHelmReleasesForCRD(ctx
labelSelector := client.MatchingLabels{
"apps.cozystack.io/application.kind": applicationKind,
"apps.cozystack.io/application.group": applicationGroup,
"cozystack.io/ui": "true",
"cozystack.io/ui": "true",
}
// List all HelmReleases with matching labels
@@ -130,30 +130,55 @@ func (r *CozystackResourceDefinitionHelmReconciler) updateHelmReleaseChart(ctx c
hrCopy := hr.DeepCopy()
updated := false
// Validate ChartRef configuration exists
if crd.Spec.Release.ChartRef == nil ||
crd.Spec.Release.ChartRef.Kind == "" ||
crd.Spec.Release.ChartRef.Name == "" ||
crd.Spec.Release.ChartRef.Namespace == "" {
logger.Error(fmt.Errorf("invalid ChartRef in CRD"), "Skipping HelmRelease chartRef update: ChartRef is nil or incomplete",
"crd", crd.Name)
// Validate Chart configuration exists
if crd.Spec.Release.Chart.Name == "" {
logger.V(4).Info("Skipping HelmRelease chart update: Chart.Name is empty", "crd", crd.Name)
return nil
}
// Use ChartRef directly from CRD
expectedChartRef := crd.Spec.Release.ChartRef
// Validate SourceRef fields
if crd.Spec.Release.Chart.SourceRef.Kind == "" ||
crd.Spec.Release.Chart.SourceRef.Name == "" ||
crd.Spec.Release.Chart.SourceRef.Namespace == "" {
logger.Error(fmt.Errorf("invalid SourceRef in CRD"), "Skipping HelmRelease chart update: SourceRef fields are incomplete",
"crd", crd.Name,
"kind", crd.Spec.Release.Chart.SourceRef.Kind,
"name", crd.Spec.Release.Chart.SourceRef.Name,
"namespace", crd.Spec.Release.Chart.SourceRef.Namespace)
return nil
}
// Check if chartRef needs to be updated
if hrCopy.Spec.ChartRef == nil {
hrCopy.Spec.ChartRef = expectedChartRef
// Clear the old chart field when switching to chartRef
hrCopy.Spec.Chart = nil
updated = true
} else if hrCopy.Spec.ChartRef.Kind != expectedChartRef.Kind ||
hrCopy.Spec.ChartRef.Name != expectedChartRef.Name ||
hrCopy.Spec.ChartRef.Namespace != expectedChartRef.Namespace {
hrCopy.Spec.ChartRef = expectedChartRef
// Get version and reconcileStrategy from CRD or use defaults
version := ">= 0.0.0-0"
reconcileStrategy := "Revision"
// TODO: Add Version and ReconcileStrategy fields to CozystackResourceDefinitionChart if needed
// Build expected SourceRef
expectedSourceRef := helmv2.CrossNamespaceObjectReference{
Kind: crd.Spec.Release.Chart.SourceRef.Kind,
Name: crd.Spec.Release.Chart.SourceRef.Name,
Namespace: crd.Spec.Release.Chart.SourceRef.Namespace,
}
if hrCopy.Spec.Chart == nil {
// Need to create Chart spec
hrCopy.Spec.Chart = &helmv2.HelmChartTemplate{
Spec: helmv2.HelmChartTemplateSpec{
Chart: crd.Spec.Release.Chart.Name,
Version: version,
ReconcileStrategy: reconcileStrategy,
SourceRef: expectedSourceRef,
},
}
updated = true
} else {
// Update existing Chart spec
if hrCopy.Spec.Chart.Spec.Chart != crd.Spec.Release.Chart.Name ||
hrCopy.Spec.Chart.Spec.SourceRef != expectedSourceRef {
hrCopy.Spec.Chart.Spec.Chart = crd.Spec.Release.Chart.Name
hrCopy.Spec.Chart.Spec.SourceRef = expectedSourceRef
updated = true
}
}
// Check and update valuesFrom configuration
@@ -165,7 +190,7 @@ func (r *CozystackResourceDefinitionHelmReconciler) updateHelmReleaseChart(ctx c
}
if updated {
logger.V(4).Info("Updating HelmRelease chartRef", "name", hr.Name, "namespace", hr.Namespace)
logger.V(4).Info("Updating HelmRelease chart", "name", hr.Name, "namespace", hr.Namespace)
if err := r.Update(ctx, hrCopy); err != nil {
return fmt.Errorf("failed to update HelmRelease: %w", err)
}
@@ -173,3 +198,4 @@ func (r *CozystackResourceDefinitionHelmReconciler) updateHelmReleaseChart(ctx c
return nil
}

View File

@@ -1,42 +1,22 @@
/*
Copyright 2025 The Cozystack Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cozyvaluesreplicator
import (
"context"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
// SecretReplicatorReconciler replicates a source secret to namespaces matching a label selector.
// Reconciler fields this setup relies on.
type SecretReplicatorReconciler struct {
client.Client
Scheme *runtime.Scheme
@@ -53,7 +33,7 @@ type SecretReplicatorReconciler struct {
func (r *SecretReplicatorReconciler) SetupWithManager(mgr ctrl.Manager) error {
// 1) Primary watch for requirement (b):
// Reconcile any Secret named r.SecretName in any namespace (includes source too).
// This keeps Secrets in cache and causes "copy changed -> reconcile it" to happen.
// This keeps Secrets in cache and causes copy changed -> reconcile it to happen.
secretNameOnly := predicate.NewPredicateFuncs(func(obj client.Object) bool {
return obj.GetName() == r.SecretName
})
@@ -119,7 +99,7 @@ func (r *SecretReplicatorReconciler) SetupWithManager(mgr ctrl.Manager) error {
})
// Only trigger from namespace events where the label match may be (or become) true.
// (You can keep this simple; it's fine if it fires on any update—your Reconcile should be idempotent.)
// (You can keep this simple; its fine if it fires on any update—your Reconcile should be idempotent.)
namespaceMayMatter := predicate.Funcs{
CreateFunc: func(e event.CreateEvent) bool {
ns, ok := e.Object.(*corev1.Namespace)
@@ -151,7 +131,7 @@ func (r *SecretReplicatorReconciler) SetupWithManager(mgr ctrl.Manager) error {
builder.WithPredicates(onlySourceSecret),
).
// (a) Watch Namespaces so they're cached and so "namespace appears / starts matching" enqueues reconcile.
// (a) Watch Namespaces so theyre cached and so namespace appears / starts matching enqueues reconcile.
Watches(
&corev1.Namespace{},
enqueueOnNamespaceMatch,
@@ -168,105 +148,13 @@ func isSourceSecret(obj client.Object, r *SecretReplicatorReconciler) bool {
}
func (r *SecretReplicatorReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logger := log.FromContext(ctx)
// Ignore requests that don't match our secret name or are for the source namespace
if req.Name != r.SecretName || req.Namespace == r.SourceNamespace {
return ctrl.Result{}, nil
}
// Verify the target namespace still exists and matches the selector
targetNamespace := &corev1.Namespace{}
if err := r.Get(ctx, types.NamespacedName{Name: req.Namespace}, targetNamespace); err != nil {
if apierrors.IsNotFound(err) {
// Namespace doesn't exist, nothing to do
return ctrl.Result{}, nil
}
logger.Error(err, "Failed to get target namespace", "namespace", req.Namespace)
return ctrl.Result{}, err
}
// Check if namespace still matches the selector
if r.TargetNamespaceSelector != nil && !r.TargetNamespaceSelector.Matches(labels.Set(targetNamespace.Labels)) {
// Namespace no longer matches selector, delete the replicated secret if it exists
replicatedSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: req.Namespace,
Name: req.Name,
},
}
if err := r.Delete(ctx, replicatedSecret); err != nil && !apierrors.IsNotFound(err) {
logger.Error(err, "Failed to delete replicated secret from non-matching namespace",
"namespace", req.Namespace, "secret", req.Name)
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
// Get the source secret
originalSecret := &corev1.Secret{}
if err := r.Get(ctx, types.NamespacedName{Namespace: r.SourceNamespace, Name: r.SecretName}, originalSecret); err != nil {
if apierrors.IsNotFound(err) {
// Source secret doesn't exist, delete the replicated secret if it exists
replicatedSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: req.Namespace,
Name: req.Name,
},
}
if err := r.Delete(ctx, replicatedSecret); err != nil && !apierrors.IsNotFound(err) {
logger.Error(err, "Failed to delete replicated secret after source secret deletion",
"namespace", req.Namespace, "secret", req.Name)
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
logger.Error(err, "Failed to get source secret",
"namespace", r.SourceNamespace, "secret", r.SecretName)
return ctrl.Result{}, err
}
// Create or update the replicated secret
replicatedSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: req.Namespace,
Name: req.Name,
},
}
_, err := controllerutil.CreateOrUpdate(ctx, r.Client, replicatedSecret, func() error {
// Copy the secret data and type from the source
replicatedSecret.Data = make(map[string][]byte)
for k, v := range originalSecret.Data {
replicatedSecret.Data[k] = v
}
replicatedSecret.Type = originalSecret.Type
// Copy labels and annotations from source (if any)
if originalSecret.Labels != nil {
if replicatedSecret.Labels == nil {
replicatedSecret.Labels = make(map[string]string)
}
for k, v := range originalSecret.Labels {
replicatedSecret.Labels[k] = v
}
}
if originalSecret.Annotations != nil {
if replicatedSecret.Annotations == nil {
replicatedSecret.Annotations = make(map[string]string)
}
for k, v := range originalSecret.Annotations {
replicatedSecret.Annotations[k] = v
}
}
return nil
})
if err != nil {
logger.Error(err, "Failed to create or update replicated secret",
"namespace", req.Namespace, "secret", req.Name)
return ctrl.Result{}, err
}
r.Get(ctx, types.NamespacedName{Namespace: r.SourceNamespace, Name: r.SecretName}, originalSecret)
replicatedSecret := originalSecret.DeepCopy()
replicatedSecret.Namespace = req.Namespace
r.Update(ctx, replicatedSecret)
return ctrl.Result{}, nil
}

View File

@@ -37,14 +37,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
const (
// AnnotationSkipCozystackValues disables injection of cozystack-values secret into HelmRelease
// This annotation should be placed on PackageSource
AnnotationSkipCozystackValues = "operator.cozystack.io/skip-cozystack-values"
// SecretCozystackValues is the name of the secret containing cluster and namespace configuration
SecretCozystackValues = "cozystack-values"
)
// PackageReconciler reconciles Package resources
type PackageReconciler struct {
client.Client
@@ -223,16 +215,6 @@ func (r *PackageReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct
},
}
// Add valuesFrom for cozystack-values secret unless disabled by annotation on PackageSource
if packageSource.GetAnnotations()[AnnotationSkipCozystackValues] != "true" {
hr.Spec.ValuesFrom = []helmv2.ValuesReference{
{
Kind: "Secret",
Name: SecretCozystackValues,
},
}
}
// Set ownerReference
gvk, err := apiutil.GVKForObject(pkg, r.Scheme)
if err != nil {
@@ -887,7 +869,6 @@ func (r *PackageReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
Named("cozystack-package").
For(&cozyv1alpha1.Package{}).
Owns(&helmv2.HelmRelease{}).
Watches(
&cozyv1alpha1.PackageSource{},
handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request {

View File

@@ -31,7 +31,9 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
// PackageSourceReconciler reconciles PackageSource resources
@@ -407,7 +409,26 @@ func (r *PackageSourceReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
Named("cozystack-packagesource").
For(&cozyv1alpha1.PackageSource{}).
Owns(&sourcewatcherv1beta1.ArtifactGenerator{}).
Watches(
&sourcewatcherv1beta1.ArtifactGenerator{},
handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request {
ag, ok := obj.(*sourcewatcherv1beta1.ArtifactGenerator)
if !ok {
return nil
}
// Find the PackageSource that owns this ArtifactGenerator by ownerReference
for _, ownerRef := range ag.OwnerReferences {
if ownerRef.Kind == "PackageSource" {
return []reconcile.Request{{
NamespacedName: types.NamespacedName{
Name: ownerRef.Name,
},
}}
}
}
return nil
}),
).
Complete(r)
}

View File

@@ -7,42 +7,51 @@ pre-checks:
../../../hack/pre-checks.sh
show:
cozyhr show --namespace $(NAMESPACE) $(NAME) --plain
cozyhr show -n $(NAMESPACE) $(NAME) --plain
apply:
cozyhr show --namespace $(NAMESPACE) $(NAME) --plain | kubectl apply --filename -
cozyhr show -n $(NAMESPACE) $(NAME) --plain | kubectl apply -f-
diff:
cozyhr show --namespace $(NAMESPACE) $(NAME) --plain | kubectl diff --filename -
cozyhr show -n $(NAMESPACE) $(NAME) --plain | kubectl diff -f -
image: pre-checks image-operator image-packages
image: pre-checks image-cozystack
image-cozystack:
docker buildx build -f images/cozystack/Dockerfile ../../.. \
--tag $(REGISTRY)/installer:$(call settag,$(TAG)) \
--cache-from type=registry,ref=$(REGISTRY)/installer:latest \
--cache-to type=inline \
--metadata-file images/installer.json \
$(BUILDX_ARGS)
IMAGE="$(REGISTRY)/installer:$(call settag,$(TAG))@$$(yq e '."containerimage.digest"' images/installer.json -o json -r)" \
yq -i '.cozystack.image = strenv(IMAGE)' values.yaml
rm -f images/installer.json
update-version:
TAG="$(call settag,$(TAG))" \
yq --inplace '.cozystackOperator.cozystackVersion = strenv(TAG)' values.yaml
yq -i '.cozystackOperator.cozystackVersion = strenv(TAG)' values.yaml
image-operator:
docker buildx build --file images/cozystack-operator/Dockerfile ../../.. \
--tag $(REGISTRY)/cozystack-operator:$(call settag,$(TAG)) \
--cache-from type=registry,ref=$(REGISTRY)/cozystack-operator:latest \
--cache-to type=inline \
--metadata-file images/cozystack-operator.json \
$(BUILDX_ARGS)
IMAGE="$(REGISTRY)/cozystack-operator:$(call settag,$(TAG))@$$(yq --exit-status '.["containerimage.digest"]' images/cozystack-operator.json --output-format json --raw-output)" \
yq --inplace '.cozystackOperator.image = strenv(IMAGE)' values.yaml
docker buildx build -f images/cozystack-operator/Dockerfile ../../.. \
--tag $(REGISTRY)/cozystack-operator:$(call settag,$(TAG)) \
--cache-from type=registry,ref=$(REGISTRY)/cozystack-operator:latest \
--cache-to type=inline \
--metadata-file images/cozystack-operator.json \
$(BUILDX_ARGS)
IMAGE="$(REGISTRY)/cozystack-operator:$(call settag,$(TAG))@$$(yq e '."containerimage.digest"' images/cozystack-operator.json -o json -r)" \
yq -i '.cozystackOperator.image = strenv(IMAGE)' values.yaml
rm -f images/cozystack-operator.json
image-packages: update-version
mkdir -p ../../../_out/assets images
flux push artifact \
oci://$(REGISTRY)/cozystack-packages:$(call settag,$(TAG)) \
--path=../../../packages \
--source=https://github.com/cozystack/cozystack \
--revision="$$(git describe --tags):$$(git rev-parse HEAD)" \
2>&1 | tee images/cozystack-packages.log
REPO="oci://$(REGISTRY)/cozystack-packages" \
DIGEST=$$(awk --field-separator @ '/artifact successfully pushed/ {print $$2}' images/cozystack-packages.log) && \
rm -f images/cozystack-packages.log && \
test -n "$$DIGEST" && \
yq --inplace '.cozystackOperator.platformSourceUrl = strenv(REPO)' values.yaml && \
yq --inplace '.cozystackOperator.platformSourceRef = "digest=" + strenv(DIGEST)' values.yaml
oci://$(REGISTRY)/platform-packages:$(call settag,$(TAG)) \
--path=../../../packages \
--source=https://github.com/cozystack/cozystack \
--revision="$$(git describe --tags):$$(git rev-parse HEAD)" \
2>&1 | tee images/cozystack-packages.log
export REPO="oci://$(REGISTRY)/platform-packages"; \
export DIGEST=$$(awk -F@ '/artifact successfully pushed/ {print $$2}' images/cozystack-packages.log; rm -f images/cozystack-packages.log); \
test -n "$$DIGEST" && yq -i '.cozystackOperator.platformSource = (strenv(REPO) + "@" + strenv(DIGEST))' values.yaml

View File

@@ -0,0 +1,41 @@
FROM golang:1.24-alpine AS k8s-await-election-builder
ARG K8S_AWAIT_ELECTION_GITREPO=https://github.com/LINBIT/k8s-await-election
ARG K8S_AWAIT_ELECTION_VERSION=0.4.1
# TARGETARCH is a docker special variable: https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope
ARG TARGETARCH
RUN apk add --no-cache git make
RUN git clone ${K8S_AWAIT_ELECTION_GITREPO} /usr/local/go/k8s-await-election/ \
&& cd /usr/local/go/k8s-await-election \
&& git reset --hard v${K8S_AWAIT_ELECTION_VERSION} \
&& make \
&& mv ./out/k8s-await-election-${TARGETARCH} /k8s-await-election
FROM golang:1.25-alpine AS builder
ARG TARGETOS
ARG TARGETARCH
RUN apk add --no-cache make git
RUN apk add helm --repository=https://dl-cdn.alpinelinux.org/alpine/edge/community
COPY . /src/
WORKDIR /src
RUN go mod download
FROM alpine:3.22
RUN wget -O- https://github.com/cozystack/cozyhr/raw/refs/heads/main/hack/install.sh | sh -s -- -v 1.5.0
RUN apk add --no-cache make kubectl helm coreutils git jq openssl
COPY --from=builder /src/scripts /cozystack/scripts
COPY --from=builder /src/packages/core /cozystack/packages/core
COPY --from=builder /src/packages/system /cozystack/packages/system
COPY --from=k8s-await-election-builder /k8s-await-election /usr/bin/k8s-await-election
WORKDIR /cozystack
ENTRYPOINT ["/usr/bin/k8s-await-election", "/cozystack/scripts/installer.sh" ]

View File

@@ -0,0 +1 @@
_out

View File

@@ -1,3 +1,4 @@
{{- if .Values.cozystackOperator.enabled }}
---
apiVersion: v1
kind: Namespace
@@ -82,8 +83,6 @@ apiVersion: cozystack.io/v1alpha1
kind: PackageSource
metadata:
name: cozystack.cozystack-platform
annotations:
operator.cozystack.io/skip-cozystack-values: "true"
spec:
sourceRef:
kind: OCIRepository
@@ -120,3 +119,4 @@ spec:
valuesFiles:
- values.yaml
- values-isp-hosted.yaml
{{- end }}

View File

@@ -0,0 +1,81 @@
{{- if not .Values.cozystackOperator.enabled }}
---
apiVersion: v1
kind: Namespace
metadata:
name: cozy-system
labels:
cozystack.io/system: "true"
pod-security.kubernetes.io/enforce: privileged
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cozystack
namespace: cozy-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cozystack
subjects:
- kind: ServiceAccount
name: cozystack
namespace: cozy-system
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cozystack
namespace: cozy-system
spec:
replicas: 1
selector:
matchLabels:
app: cozystack
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
template:
metadata:
labels:
app: cozystack
spec:
hostNetwork: true
serviceAccountName: cozystack
containers:
- name: cozystack
image: "{{ .Values.cozystack.image }}"
env:
- name: KUBERNETES_SERVICE_HOST
value: localhost
- name: INSTALL_FLUX
value: "true"
- name: KUBERNETES_SERVICE_PORT
value: "7445"
- name: K8S_AWAIT_ELECTION_ENABLED
value: "1"
- name: K8S_AWAIT_ELECTION_NAME
value: cozystack
- name: K8S_AWAIT_ELECTION_LOCK_NAME
value: cozystack
- name: K8S_AWAIT_ELECTION_LOCK_NAMESPACE
value: cozy-system
- name: K8S_AWAIT_ELECTION_IDENTITY
valueFrom:
fieldRef:
fieldPath: metadata.name
tolerations:
- key: "node.kubernetes.io/not-ready"
operator: "Exists"
effect: "NoSchedule"
- key: "node.cilium.io/agent-not-ready"
operator: "Exists"
effect: "NoSchedule"
{{- end }}

View File

@@ -1,5 +1,8 @@
cozystack:
image: ghcr.io/cozystack/cozystack/installer:v0.38.2@sha256:9ff92b655de6f9bea3cba4cd42dcffabd9aace6966dcfb1cc02dda2420ea4a15
cozystackOperator:
enabled: false
image: ghcr.io/cozystack/cozystack/cozystack-operator:latest@sha256:f7f6e0fd9e896b7bfa642d0bfa4378bc14e646bc5c2e86e2e09a82770ef33181
platformSourceUrl: 'oci://ghcr.io/cozystack/cozystack/cozystack-packages'
platformSourceUrl: 'oci://ghcr.io/cozystack/cozystack/platform-packages'
platformSourceRef: 'digest=sha256:0576491291b33936cdf770a5c5b5692add97339c1505fc67a92df9d69dfbfdf6'
cozystackVersion: latest

View File

@@ -4,26 +4,31 @@ NAMESPACE=cozy-system
include ../../../scripts/common-envs.mk
show:
cozyhr show --namespace $(NAMESPACE) $(NAME) --plain
cozyhr show -n $(NAMESPACE) $(NAME) --plain
apply:
cozyhr show --namespace $(NAMESPACE) $(NAME) --plain | kubectl apply --filename -
kubectl delete helmreleases.helm.toolkit.fluxcd.io --selector cozystack.io/marked-for-deletion=true --all-namespaces
cozyhr show -n $(NAMESPACE) $(NAME) --plain | kubectl apply -f-
kubectl delete helmreleases.helm.toolkit.fluxcd.io -l cozystack.io/marked-for-deletion=true -A
reconcile: apply
namespaces-show:
cozyhr show -n $(NAMESPACE) $(NAME) --plain -s templates/namespaces.yaml
namespaces-apply:
cozyhr show -n $(NAMESPACE) $(NAME) --plain -s templates/namespaces.yaml | kubectl apply -f-
diff:
cozyhr show --namespace $(NAMESPACE) $(NAME) --plain | kubectl diff --filename -
cozyhr show -n $(NAMESPACE) $(NAME) --plain | kubectl diff -f-
image: image-migrations
image-migrations:
docker buildx build --file images/migrations/Dockerfile . \
--tag $(REGISTRY)/platform-migrations:$(call settag,$(TAG)) \
--cache-from type=registry,ref=$(REGISTRY)/platform-migrations:latest \
image: image-assets
image-assets:
docker buildx build -f images/cozystack-assets/Dockerfile ../../.. \
--tag $(REGISTRY)/cozystack-assets:$(call settag,$(TAG)) \
--cache-from type=registry,ref=$(REGISTRY)/cozystack-assets:latest \
--cache-to type=inline \
--metadata-file images/migrations.json \
--metadata-file images/cozystack-assets.json \
$(BUILDX_ARGS)
IMAGE="$(REGISTRY)/platform-migrations:$(call settag,$(TAG))@$$(yq --exit-status '.["containerimage.digest"]' images/migrations.json --output-format json --raw-output)" \
yq --inplace '.migrations.image = strenv(IMAGE)' values.yaml
rm -f images/migrations.json
IMAGE="$(REGISTRY)/cozystack-assets:$(call settag,$(TAG))@$$(yq e '."containerimage.digest"' images/cozystack-assets.json -o json -r)" \
yq -i '.assets.image = strenv(IMAGE)' values.yaml
rm -f images/cozystack-assets.json

View File

@@ -1,638 +0,0 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.20.0
name: cozystackresourcedefinitions.cozystack.io
spec:
group: cozystack.io
names:
kind: CozystackResourceDefinition
listKind: CozystackResourceDefinitionList
plural: cozystackresourcedefinitions
singular: cozystackresourcedefinition
scope: Cluster
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: CozystackResourceDefinition is the Schema for the cozystackresourcedefinitions
API
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
properties:
application:
description: Application configuration
properties:
kind:
description: Kind of the application, used for UI and API
type: string
openAPISchema:
description: OpenAPI schema for the application, used for API
validation
type: string
plural:
description: Plural name of the application, used for UI and API
type: string
singular:
description: Singular name of the application, used for UI and
API
type: string
required:
- kind
- openAPISchema
- plural
- singular
type: object
dashboard:
description: Dashboard configuration for this resource
properties:
category:
description: Category used to group resources in the UI (e.g.,
"Storage", "Networking")
type: string
description:
description: Short description shown in catalogs or headers (e.g.,
"S3 compatible storage")
type: string
icon:
description: Icon encoded as a string (e.g., inline SVG, base64,
or data URI)
type: string
keysOrder:
description: Order of keys in the YAML view
items:
items:
type: string
type: array
type: array
module:
description: Whether this resource is a module (tenant module)
type: boolean
name:
description: Hard-coded name used in the UI (e.g., "bucket")
type: string
plural:
description: Plural human-readable name (e.g., "Buckets")
type: string
singular:
description: Human-readable name shown in the UI (e.g., "Bucket")
type: string
singularResource:
description: Whether this resource is singular (not a collection)
in the UI
type: boolean
tabs:
description: Which tabs to show for this resource
items:
description: DashboardTab enumerates allowed UI tabs.
enum:
- workloads
- ingresses
- services
- secrets
- yaml
type: string
type: array
tags:
description: Free-form tags for search and filtering
items:
type: string
type: array
weight:
description: Order weight for sorting resources in the UI (lower
first)
type: integer
required:
- category
- plural
- singular
type: object
ingresses:
description: Ingress selectors
properties:
exclude:
description: |-
Exclude contains an array of resource selectors that target resources.
If a resource matches the selector in any of the elements in the array, it is
hidden from the user, regardless of the matches in the include array.
items:
description: "CozystackResourceDefinitionResourceSelector extends
metav1.LabelSelector with resourceNames support.\nA resource
matches this selector only if it satisfies ALL criteria:\n-
Label selector conditions (matchExpressions and matchLabels)\n-
AND has a name that matches one of the names in resourceNames
(if specified)\n\nThe resourceNames field supports Go templates
with the following variables available:\n- {{ .name }}: The
name of the managing application (from apps.cozystack.io/application.name)\n-
{{ .kind }}: The lowercased kind of the managing application
(from apps.cozystack.io/application.kind)\n- {{ .namespace
}}: The namespace of the resource being processed\n\nExample
YAML:\n\n\tsecrets:\n\t include:\n\t - matchExpressions:\n\t
\ - key: badlabel\n\t operator: DoesNotExist\n\t matchLabels:\n\t
\ goodlabel: goodvalue\n\t resourceNames:\n\t -
\"{{ .name }}-secret\"\n\t - \"{{ .kind }}-{{ .name }}-tls\"\n\t
\ - \"specificname\""
properties:
matchExpressions:
description: matchExpressions is a list of label selector
requirements. The requirements are ANDed.
items:
description: |-
A label selector requirement is a selector that contains values, a key, and an operator that
relates the key and values.
properties:
key:
description: key is the label key that the selector
applies to.
type: string
operator:
description: |-
operator represents a key's relationship to a set of values.
Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: |-
values is an array of string values. If the operator is In or NotIn,
the values array must be non-empty. If the operator is Exists or DoesNotExist,
the values array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
x-kubernetes-list-type: atomic
required:
- key
- operator
type: object
type: array
x-kubernetes-list-type: atomic
matchLabels:
additionalProperties:
type: string
description: |-
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions, whose key field is "key", the
operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
resourceNames:
description: |-
ResourceNames is a list of resource names to match
If specified, the resource must have one of these exact names to match the selector
items:
type: string
type: array
type: object
x-kubernetes-map-type: atomic
type: array
include:
description: |-
Include contains an array of resource selectors that target resources.
If a resource matches the selector in any of the elements in the array, and
matches none of the selectors in the exclude array that resource is marked
as a tenant resource and is visible to users.
items:
description: "CozystackResourceDefinitionResourceSelector extends
metav1.LabelSelector with resourceNames support.\nA resource
matches this selector only if it satisfies ALL criteria:\n-
Label selector conditions (matchExpressions and matchLabels)\n-
AND has a name that matches one of the names in resourceNames
(if specified)\n\nThe resourceNames field supports Go templates
with the following variables available:\n- {{ .name }}: The
name of the managing application (from apps.cozystack.io/application.name)\n-
{{ .kind }}: The lowercased kind of the managing application
(from apps.cozystack.io/application.kind)\n- {{ .namespace
}}: The namespace of the resource being processed\n\nExample
YAML:\n\n\tsecrets:\n\t include:\n\t - matchExpressions:\n\t
\ - key: badlabel\n\t operator: DoesNotExist\n\t matchLabels:\n\t
\ goodlabel: goodvalue\n\t resourceNames:\n\t -
\"{{ .name }}-secret\"\n\t - \"{{ .kind }}-{{ .name }}-tls\"\n\t
\ - \"specificname\""
properties:
matchExpressions:
description: matchExpressions is a list of label selector
requirements. The requirements are ANDed.
items:
description: |-
A label selector requirement is a selector that contains values, a key, and an operator that
relates the key and values.
properties:
key:
description: key is the label key that the selector
applies to.
type: string
operator:
description: |-
operator represents a key's relationship to a set of values.
Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: |-
values is an array of string values. If the operator is In or NotIn,
the values array must be non-empty. If the operator is Exists or DoesNotExist,
the values array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
x-kubernetes-list-type: atomic
required:
- key
- operator
type: object
type: array
x-kubernetes-list-type: atomic
matchLabels:
additionalProperties:
type: string
description: |-
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions, whose key field is "key", the
operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
resourceNames:
description: |-
ResourceNames is a list of resource names to match
If specified, the resource must have one of these exact names to match the selector
items:
type: string
type: array
type: object
x-kubernetes-map-type: atomic
type: array
type: object
release:
description: Release configuration
properties:
chartRef:
description: Reference to the chart source
properties:
apiVersion:
description: APIVersion of the referent.
type: string
kind:
description: Kind of the referent.
enum:
- OCIRepository
- HelmChart
- ExternalArtifact
type: string
name:
description: Name of the referent.
maxLength: 253
minLength: 1
type: string
namespace:
description: |-
Namespace of the referent, defaults to the namespace of the Kubernetes
resource object that contains the reference.
maxLength: 63
minLength: 1
type: string
required:
- kind
- name
type: object
labels:
additionalProperties:
type: string
description: Labels for the release
type: object
prefix:
description: Prefix for the release name
type: string
required:
- chartRef
- prefix
type: object
secrets:
description: Secret selectors
properties:
exclude:
description: |-
Exclude contains an array of resource selectors that target resources.
If a resource matches the selector in any of the elements in the array, it is
hidden from the user, regardless of the matches in the include array.
items:
description: "CozystackResourceDefinitionResourceSelector extends
metav1.LabelSelector with resourceNames support.\nA resource
matches this selector only if it satisfies ALL criteria:\n-
Label selector conditions (matchExpressions and matchLabels)\n-
AND has a name that matches one of the names in resourceNames
(if specified)\n\nThe resourceNames field supports Go templates
with the following variables available:\n- {{ .name }}: The
name of the managing application (from apps.cozystack.io/application.name)\n-
{{ .kind }}: The lowercased kind of the managing application
(from apps.cozystack.io/application.kind)\n- {{ .namespace
}}: The namespace of the resource being processed\n\nExample
YAML:\n\n\tsecrets:\n\t include:\n\t - matchExpressions:\n\t
\ - key: badlabel\n\t operator: DoesNotExist\n\t matchLabels:\n\t
\ goodlabel: goodvalue\n\t resourceNames:\n\t -
\"{{ .name }}-secret\"\n\t - \"{{ .kind }}-{{ .name }}-tls\"\n\t
\ - \"specificname\""
properties:
matchExpressions:
description: matchExpressions is a list of label selector
requirements. The requirements are ANDed.
items:
description: |-
A label selector requirement is a selector that contains values, a key, and an operator that
relates the key and values.
properties:
key:
description: key is the label key that the selector
applies to.
type: string
operator:
description: |-
operator represents a key's relationship to a set of values.
Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: |-
values is an array of string values. If the operator is In or NotIn,
the values array must be non-empty. If the operator is Exists or DoesNotExist,
the values array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
x-kubernetes-list-type: atomic
required:
- key
- operator
type: object
type: array
x-kubernetes-list-type: atomic
matchLabels:
additionalProperties:
type: string
description: |-
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions, whose key field is "key", the
operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
resourceNames:
description: |-
ResourceNames is a list of resource names to match
If specified, the resource must have one of these exact names to match the selector
items:
type: string
type: array
type: object
x-kubernetes-map-type: atomic
type: array
include:
description: |-
Include contains an array of resource selectors that target resources.
If a resource matches the selector in any of the elements in the array, and
matches none of the selectors in the exclude array that resource is marked
as a tenant resource and is visible to users.
items:
description: "CozystackResourceDefinitionResourceSelector extends
metav1.LabelSelector with resourceNames support.\nA resource
matches this selector only if it satisfies ALL criteria:\n-
Label selector conditions (matchExpressions and matchLabels)\n-
AND has a name that matches one of the names in resourceNames
(if specified)\n\nThe resourceNames field supports Go templates
with the following variables available:\n- {{ .name }}: The
name of the managing application (from apps.cozystack.io/application.name)\n-
{{ .kind }}: The lowercased kind of the managing application
(from apps.cozystack.io/application.kind)\n- {{ .namespace
}}: The namespace of the resource being processed\n\nExample
YAML:\n\n\tsecrets:\n\t include:\n\t - matchExpressions:\n\t
\ - key: badlabel\n\t operator: DoesNotExist\n\t matchLabels:\n\t
\ goodlabel: goodvalue\n\t resourceNames:\n\t -
\"{{ .name }}-secret\"\n\t - \"{{ .kind }}-{{ .name }}-tls\"\n\t
\ - \"specificname\""
properties:
matchExpressions:
description: matchExpressions is a list of label selector
requirements. The requirements are ANDed.
items:
description: |-
A label selector requirement is a selector that contains values, a key, and an operator that
relates the key and values.
properties:
key:
description: key is the label key that the selector
applies to.
type: string
operator:
description: |-
operator represents a key's relationship to a set of values.
Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: |-
values is an array of string values. If the operator is In or NotIn,
the values array must be non-empty. If the operator is Exists or DoesNotExist,
the values array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
x-kubernetes-list-type: atomic
required:
- key
- operator
type: object
type: array
x-kubernetes-list-type: atomic
matchLabels:
additionalProperties:
type: string
description: |-
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions, whose key field is "key", the
operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
resourceNames:
description: |-
ResourceNames is a list of resource names to match
If specified, the resource must have one of these exact names to match the selector
items:
type: string
type: array
type: object
x-kubernetes-map-type: atomic
type: array
type: object
services:
description: Service selectors
properties:
exclude:
description: |-
Exclude contains an array of resource selectors that target resources.
If a resource matches the selector in any of the elements in the array, it is
hidden from the user, regardless of the matches in the include array.
items:
description: "CozystackResourceDefinitionResourceSelector extends
metav1.LabelSelector with resourceNames support.\nA resource
matches this selector only if it satisfies ALL criteria:\n-
Label selector conditions (matchExpressions and matchLabels)\n-
AND has a name that matches one of the names in resourceNames
(if specified)\n\nThe resourceNames field supports Go templates
with the following variables available:\n- {{ .name }}: The
name of the managing application (from apps.cozystack.io/application.name)\n-
{{ .kind }}: The lowercased kind of the managing application
(from apps.cozystack.io/application.kind)\n- {{ .namespace
}}: The namespace of the resource being processed\n\nExample
YAML:\n\n\tsecrets:\n\t include:\n\t - matchExpressions:\n\t
\ - key: badlabel\n\t operator: DoesNotExist\n\t matchLabels:\n\t
\ goodlabel: goodvalue\n\t resourceNames:\n\t -
\"{{ .name }}-secret\"\n\t - \"{{ .kind }}-{{ .name }}-tls\"\n\t
\ - \"specificname\""
properties:
matchExpressions:
description: matchExpressions is a list of label selector
requirements. The requirements are ANDed.
items:
description: |-
A label selector requirement is a selector that contains values, a key, and an operator that
relates the key and values.
properties:
key:
description: key is the label key that the selector
applies to.
type: string
operator:
description: |-
operator represents a key's relationship to a set of values.
Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: |-
values is an array of string values. If the operator is In or NotIn,
the values array must be non-empty. If the operator is Exists or DoesNotExist,
the values array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
x-kubernetes-list-type: atomic
required:
- key
- operator
type: object
type: array
x-kubernetes-list-type: atomic
matchLabels:
additionalProperties:
type: string
description: |-
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions, whose key field is "key", the
operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
resourceNames:
description: |-
ResourceNames is a list of resource names to match
If specified, the resource must have one of these exact names to match the selector
items:
type: string
type: array
type: object
x-kubernetes-map-type: atomic
type: array
include:
description: |-
Include contains an array of resource selectors that target resources.
If a resource matches the selector in any of the elements in the array, and
matches none of the selectors in the exclude array that resource is marked
as a tenant resource and is visible to users.
items:
description: "CozystackResourceDefinitionResourceSelector extends
metav1.LabelSelector with resourceNames support.\nA resource
matches this selector only if it satisfies ALL criteria:\n-
Label selector conditions (matchExpressions and matchLabels)\n-
AND has a name that matches one of the names in resourceNames
(if specified)\n\nThe resourceNames field supports Go templates
with the following variables available:\n- {{ .name }}: The
name of the managing application (from apps.cozystack.io/application.name)\n-
{{ .kind }}: The lowercased kind of the managing application
(from apps.cozystack.io/application.kind)\n- {{ .namespace
}}: The namespace of the resource being processed\n\nExample
YAML:\n\n\tsecrets:\n\t include:\n\t - matchExpressions:\n\t
\ - key: badlabel\n\t operator: DoesNotExist\n\t matchLabels:\n\t
\ goodlabel: goodvalue\n\t resourceNames:\n\t -
\"{{ .name }}-secret\"\n\t - \"{{ .kind }}-{{ .name }}-tls\"\n\t
\ - \"specificname\""
properties:
matchExpressions:
description: matchExpressions is a list of label selector
requirements. The requirements are ANDed.
items:
description: |-
A label selector requirement is a selector that contains values, a key, and an operator that
relates the key and values.
properties:
key:
description: key is the label key that the selector
applies to.
type: string
operator:
description: |-
operator represents a key's relationship to a set of values.
Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: |-
values is an array of string values. If the operator is In or NotIn,
the values array must be non-empty. If the operator is Exists or DoesNotExist,
the values array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
x-kubernetes-list-type: atomic
required:
- key
- operator
type: object
type: array
x-kubernetes-list-type: atomic
matchLabels:
additionalProperties:
type: string
description: |-
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions, whose key field is "key", the
operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
resourceNames:
description: |-
ResourceNames is a list of resource names to match
If specified, the resource must have one of these exact names to match the selector
items:
type: string
type: array
type: object
x-kubernetes-map-type: atomic
type: array
type: object
required:
- application
- release
type: object
type: object
served: true
storage: true

View File

@@ -0,0 +1,25 @@
FROM golang:1.25-alpine AS builder
ARG TARGETOS
ARG TARGETARCH
RUN apk add --no-cache make git
RUN apk add helm --repository=https://dl-cdn.alpinelinux.org/alpine/edge/community
COPY . /src/
WORKDIR /src
RUN go mod download
RUN go build -o /cozystack-assets-server -ldflags '-extldflags "-static" -w -s' ./cmd/cozystack-assets-server
RUN make repos
FROM alpine:3.22
COPY --from=builder /src/_out/repos /cozystack/assets/repos
COPY --from=builder /cozystack-assets-server /usr/bin/cozystack-assets-server
COPY --from=builder /src/dashboards /cozystack/assets/dashboards
WORKDIR /cozystack
ENTRYPOINT ["/usr/bin/cozystack-assets-server"]

View File

@@ -1,12 +0,0 @@
FROM alpine:3.22
RUN wget -O- https://github.com/cozystack/cozyhr/raw/refs/heads/main/hack/install.sh | sh -s -- -v 1.5.0
RUN apk add --no-cache kubectl helm coreutils git jq ca-certificates bash curl
COPY migrations /migrations
COPY run-migrations.sh /usr/bin/run-migrations.sh
WORKDIR /migrations
ENTRYPOINT ["/usr/bin/run-migrations.sh"]

View File

@@ -1,41 +0,0 @@
#!/bin/sh
set -euo pipefail
NAMESPACE="${NAMESPACE:-cozy-system}"
CURRENT_VERSION="${CURRENT_VERSION:-0}"
TARGET_VERSION="${TARGET_VERSION:-0}"
echo "Starting migrations from version $CURRENT_VERSION to $TARGET_VERSION"
# Check if ConfigMap exists
if ! kubectl get configmap --namespace "$NAMESPACE" cozystack-version >/dev/null 2>&1; then
echo "ConfigMap cozystack-version does not exist, creating it with version $TARGET_VERSION"
kubectl create configmap --namespace "$NAMESPACE" cozystack-version \
--from-literal=version="$TARGET_VERSION" \
--dry-run=client --output yaml | kubectl apply --filename -
echo "ConfigMap created with version $TARGET_VERSION"
exit 0
fi
# If current version is already at target, nothing to do
if [ "$CURRENT_VERSION" -ge "$TARGET_VERSION" ]; then
echo "Current version $CURRENT_VERSION is already at or above target version $TARGET_VERSION"
exit 0
fi
# Run migrations sequentially from current version to target version
for i in $(seq $((CURRENT_VERSION + 1)) $TARGET_VERSION); do
if [ -f "/migrations/$i" ]; then
echo "Running migration $i"
chmod +x /migrations/$i
/migrations/$i || {
echo "Migration $i failed"
exit 1
}
echo "Migration $i completed successfully"
else
echo "Migration $i not found, skipping"
fi
done
echo "All migrations completed successfully"

View File

@@ -0,0 +1,73 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: cozystack-assets
namespace: cozy-system
labels:
app: cozystack-assets
spec:
serviceName: cozystack-assets
replicas: 1
selector:
matchLabels:
app: cozystack-assets
template:
metadata:
labels:
app: cozystack-assets
spec:
hostNetwork: true
containers:
- name: assets-server
image: "{{ .Values.assets.image }}"
args:
- "-dir=/cozystack/assets"
- "-address=:8123"
ports:
- name: http
containerPort: 8123
hostPort: 8123
tolerations:
- operator: Exists
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cozystack-assets-reader
namespace: cozy-system
rules:
- apiGroups: [""]
resources:
- pods/proxy
resourceNames:
- cozystack-assets-0
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cozystack-assets-reader
namespace: cozy-system
subjects:
- kind: User
name: cozystack-assets-reader
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: Role
name: cozystack-assets-reader
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Service
metadata:
name: cozystack-assets
namespace: cozy-system
spec:
ports:
- name: http
port: 80
targetPort: 8123
selector:
app: cozystack-assets
type: ClusterIP

View File

@@ -0,0 +1,40 @@
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: cozystack-system
namespace: cozy-system
labels:
cozystack.io/repository: system
spec:
interval: 5m0s
url: https://{{ include "cozystack.kubernetesAPIEndpoint" . }}/api/v1/namespaces/cozy-system/pods/cozystack-assets-0/proxy/repos/system
certSecretRef:
name: cozystack-assets-tls
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: cozystack-apps
namespace: cozy-public
labels:
cozystack.io/ui: "true"
cozystack.io/repository: apps
spec:
interval: 5m0s
url: https://{{ include "cozystack.kubernetesAPIEndpoint" . }}/api/v1/namespaces/cozy-system/pods/cozystack-assets-0/proxy/repos/apps
certSecretRef:
name: cozystack-assets-tls
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: cozystack-extra
namespace: cozy-public
labels:
cozystack.io/repository: extra
spec:
interval: 5m0s
url: https://{{ include "cozystack.kubernetesAPIEndpoint" . }}/api/v1/namespaces/cozy-system/pods/cozystack-assets-0/proxy/repos/extra
certSecretRef:
name: cozystack-assets-tls

View File

@@ -1,69 +0,0 @@
{{- $shouldRunMigrationHook := false }}
{{- $currentVersion := 0 }}
{{- $targetVersion := .Values.migrations.targetVersion | int }}
{{- $configMap := lookup "v1" "ConfigMap" .Release.Namespace "cozystack-version" }}
{{- if $configMap }}
{{- $currentVersion = dig "data" "version" "0" $configMap | int }}
{{- if lt $currentVersion $targetVersion }}
{{- $shouldRunMigrationHook = true }}
{{- end }}
{{- else }}
{{- $shouldRunMigrationHook = true }}
{{- end }}
{{- if $shouldRunMigrationHook }}
---
apiVersion: batch/v1
kind: Job
metadata:
name: cozystack-migration-hook
annotations:
helm.sh/hook: pre-upgrade,pre-install
helm.sh/hook-weight: "1"
helm.sh/hook-delete-policy: before-hook-creation
spec:
backoffLimit: 3
template:
metadata:
labels:
policy.cozystack.io/allow-to-apiserver: "true"
spec:
serviceAccountName: cozystack-migration-hook
containers:
- name: migration
image: {{ .Values.migrations.image }}
env:
- name: NAMESPACE
value: {{ .Release.Namespace | quote }}
- name: CURRENT_VERSION
value: {{ $currentVersion | quote }}
- name: TARGET_VERSION
value: {{ $targetVersion | quote }}
restartPolicy: Never
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
helm.sh/hook: pre-upgrade,pre-install
helm.sh/hook-weight: "1"
helm.sh/hook-delete-policy: hook-succeeded,before-hook-creation
name: cozystack-migration-hook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: cozystack-migration-hook
namespace: {{ .Release.Namespace | quote }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cozystack-migration-hook
annotations:
helm.sh/hook: pre-upgrade,pre-install
helm.sh/hook-weight: "1"
helm.sh/hook-delete-policy: hook-succeeded,before-hook-creation
{{- end }}

View File

@@ -0,0 +1,6 @@
{{/*
{{- range $path, $_ := .Files.Glob "sources/*.yaml" }}
---
{{ $.Files.Get $path }}
{{- end }}
*/}}

View File

@@ -1,3 +1,2 @@
migrations:
image: ghcr.io/cozystack/cozystack/platform-migrations:latest
targetVersion: 22
assets:
image: ghcr.io/cozystack/cozystack/cozystack-assets:latest@sha256:19b166819d0205293c85d8351a3e038dc4c146b876a8e2ae21dce1d54f0b9e33

View File

@@ -1,6 +1,6 @@
{{- range (split "\n" (.Files.Get "dashboards.list")) }}
{{- $parts := split "/" . }}
{{- if eq (len $parts) 2 }}
{{- if eq (len $parts) 2 }}
---
apiVersion: grafana.integreatly.org/v1beta1
kind: GrafanaDashboard
@@ -11,6 +11,6 @@ spec:
instanceSelector:
matchLabels:
dashboards: grafana
url: http://grafana-dashboards.cozy-grafana-operator.svc/{{ . }}.json
url: http://cozystack-assets.cozy-system.svc/dashboards/{{ . }}.json
{{- end }}
{{- end }}

View File

@@ -13,10 +13,12 @@ spec:
prefix: ""
labels:
cozystack.io/ui: "true"
chartRef:
kind: OCIRepository
name: bootbox-rd
namespace: cozy-system
chart:
name: bootbox
sourceRef:
kind: HelmRepository
name: cozystack-extra
namespace: cozy-public
dashboard:
category: Administration
singular: BootBox

View File

@@ -13,10 +13,12 @@ spec:
prefix: bucket-
labels:
cozystack.io/ui: "true"
chartRef:
kind: OCIRepository
name: bucket-rd
namespace: cozy-system
chart:
name: bucket
sourceRef:
kind: HelmRepository
name: cozystack-apps
namespace: cozy-public
dashboard:
singular: Bucket
plural: Buckets

View File

@@ -13,10 +13,12 @@ spec:
prefix: clickhouse-
labels:
cozystack.io/ui: "true"
chartRef:
kind: OCIRepository
name: clickhouse-rd
namespace: cozy-system
chart:
name: clickhouse
sourceRef:
kind: HelmRepository
name: cozystack-apps
namespace: cozy-public
dashboard:
category: PaaS
singular: ClickHouse

View File

@@ -135,22 +135,29 @@ spec:
If a resource matches the selector in any of the elements in the array, it is
hidden from the user, regardless of the matches in the include array.
items:
description: "CozystackResourceDefinitionResourceSelector extends
metav1.LabelSelector with resourceNames support.\nA resource
matches this selector only if it satisfies ALL criteria:\n-
Label selector conditions (matchExpressions and matchLabels)\n-
AND has a name that matches one of the names in resourceNames
(if specified)\n\nThe resourceNames field supports Go templates
with the following variables available:\n- {{ .name }}: The
name of the managing application (from apps.cozystack.io/application.name)\n-
{{ .kind }}: The lowercased kind of the managing application
(from apps.cozystack.io/application.kind)\n- {{ .namespace
}}: The namespace of the resource being processed\n\nExample
YAML:\n\n\tsecrets:\n\t include:\n\t - matchExpressions:\n\t
\ - key: badlabel\n\t operator: DoesNotExist\n\t matchLabels:\n\t
\ goodlabel: goodvalue\n\t resourceNames:\n\t -
\"{{ .name }}-secret\"\n\t - \"{{ .kind }}-{{ .name }}-tls\"\n\t
\ - \"specificname\""
description: |-
CozystackResourceDefinitionResourceSelector extends metav1.LabelSelector with resourceNames support.
A resource matches this selector only if it satisfies ALL criteria:
- Label selector conditions (matchExpressions and matchLabels)
- AND has a name that matches one of the names in resourceNames (if specified)
The resourceNames field supports Go templates with the following variables available:
- {{ .name }}: The name of the managing application (from apps.cozystack.io/application.name)
- {{ .kind }}: The lowercased kind of the managing application (from apps.cozystack.io/application.kind)
- {{ .namespace }}: The namespace of the resource being processed
Example YAML:
secrets:
include:
- matchExpressions:
- key: badlabel
operator: DoesNotExist
matchLabels:
goodlabel: goodvalue
resourceNames:
- "{{ .name }}-secret"
- "{{ .kind }}-{{ .name }}-tls"
- "specificname"
properties:
matchExpressions:
description: matchExpressions is a list of label selector
@@ -210,22 +217,29 @@ spec:
matches none of the selectors in the exclude array that resource is marked
as a tenant resource and is visible to users.
items:
description: "CozystackResourceDefinitionResourceSelector extends
metav1.LabelSelector with resourceNames support.\nA resource
matches this selector only if it satisfies ALL criteria:\n-
Label selector conditions (matchExpressions and matchLabels)\n-
AND has a name that matches one of the names in resourceNames
(if specified)\n\nThe resourceNames field supports Go templates
with the following variables available:\n- {{ .name }}: The
name of the managing application (from apps.cozystack.io/application.name)\n-
{{ .kind }}: The lowercased kind of the managing application
(from apps.cozystack.io/application.kind)\n- {{ .namespace
}}: The namespace of the resource being processed\n\nExample
YAML:\n\n\tsecrets:\n\t include:\n\t - matchExpressions:\n\t
\ - key: badlabel\n\t operator: DoesNotExist\n\t matchLabels:\n\t
\ goodlabel: goodvalue\n\t resourceNames:\n\t -
\"{{ .name }}-secret\"\n\t - \"{{ .kind }}-{{ .name }}-tls\"\n\t
\ - \"specificname\""
description: |-
CozystackResourceDefinitionResourceSelector extends metav1.LabelSelector with resourceNames support.
A resource matches this selector only if it satisfies ALL criteria:
- Label selector conditions (matchExpressions and matchLabels)
- AND has a name that matches one of the names in resourceNames (if specified)
The resourceNames field supports Go templates with the following variables available:
- {{ .name }}: The name of the managing application (from apps.cozystack.io/application.name)
- {{ .kind }}: The lowercased kind of the managing application (from apps.cozystack.io/application.kind)
- {{ .namespace }}: The namespace of the resource being processed
Example YAML:
secrets:
include:
- matchExpressions:
- key: badlabel
operator: DoesNotExist
matchLabels:
goodlabel: goodvalue
resourceNames:
- "{{ .name }}-secret"
- "{{ .kind }}-{{ .name }}-tls"
- "specificname"
properties:
matchExpressions:
description: matchExpressions is a list of label selector
@@ -282,29 +296,34 @@ spec:
release:
description: Release configuration
properties:
chartRef:
description: Reference to the chart source
chart:
description: Helm chart configuration
properties:
kind:
default: OCIRepository
description: Kind of the source reference (e.g., OCIRepository,
GitRepository)
type: string
name:
description: Name of the source reference
type: string
namespace:
default: cozy-system
description: Namespace of the source reference
type: string
path:
description: Path within the source artifact where the chart
is located
description: Name of the Helm chart
type: string
sourceRef:
description: Source reference for the Helm chart
properties:
kind:
default: HelmRepository
description: Kind of the source reference
type: string
name:
description: Name of the source reference
type: string
namespace:
default: cozy-public
description: Namespace of the source reference
type: string
required:
- kind
- name
- namespace
type: object
required:
- kind
- name
- namespace
- sourceRef
type: object
labels:
additionalProperties:
@@ -315,7 +334,7 @@ spec:
description: Prefix for the release name
type: string
required:
- chartRef
- chart
- prefix
type: object
secrets:
@@ -327,22 +346,29 @@ spec:
If a resource matches the selector in any of the elements in the array, it is
hidden from the user, regardless of the matches in the include array.
items:
description: "CozystackResourceDefinitionResourceSelector extends
metav1.LabelSelector with resourceNames support.\nA resource
matches this selector only if it satisfies ALL criteria:\n-
Label selector conditions (matchExpressions and matchLabels)\n-
AND has a name that matches one of the names in resourceNames
(if specified)\n\nThe resourceNames field supports Go templates
with the following variables available:\n- {{ .name }}: The
name of the managing application (from apps.cozystack.io/application.name)\n-
{{ .kind }}: The lowercased kind of the managing application
(from apps.cozystack.io/application.kind)\n- {{ .namespace
}}: The namespace of the resource being processed\n\nExample
YAML:\n\n\tsecrets:\n\t include:\n\t - matchExpressions:\n\t
\ - key: badlabel\n\t operator: DoesNotExist\n\t matchLabels:\n\t
\ goodlabel: goodvalue\n\t resourceNames:\n\t -
\"{{ .name }}-secret\"\n\t - \"{{ .kind }}-{{ .name }}-tls\"\n\t
\ - \"specificname\""
description: |-
CozystackResourceDefinitionResourceSelector extends metav1.LabelSelector with resourceNames support.
A resource matches this selector only if it satisfies ALL criteria:
- Label selector conditions (matchExpressions and matchLabels)
- AND has a name that matches one of the names in resourceNames (if specified)
The resourceNames field supports Go templates with the following variables available:
- {{ .name }}: The name of the managing application (from apps.cozystack.io/application.name)
- {{ .kind }}: The lowercased kind of the managing application (from apps.cozystack.io/application.kind)
- {{ .namespace }}: The namespace of the resource being processed
Example YAML:
secrets:
include:
- matchExpressions:
- key: badlabel
operator: DoesNotExist
matchLabels:
goodlabel: goodvalue
resourceNames:
- "{{ .name }}-secret"
- "{{ .kind }}-{{ .name }}-tls"
- "specificname"
properties:
matchExpressions:
description: matchExpressions is a list of label selector
@@ -402,22 +428,29 @@ spec:
matches none of the selectors in the exclude array that resource is marked
as a tenant resource and is visible to users.
items:
description: "CozystackResourceDefinitionResourceSelector extends
metav1.LabelSelector with resourceNames support.\nA resource
matches this selector only if it satisfies ALL criteria:\n-
Label selector conditions (matchExpressions and matchLabels)\n-
AND has a name that matches one of the names in resourceNames
(if specified)\n\nThe resourceNames field supports Go templates
with the following variables available:\n- {{ .name }}: The
name of the managing application (from apps.cozystack.io/application.name)\n-
{{ .kind }}: The lowercased kind of the managing application
(from apps.cozystack.io/application.kind)\n- {{ .namespace
}}: The namespace of the resource being processed\n\nExample
YAML:\n\n\tsecrets:\n\t include:\n\t - matchExpressions:\n\t
\ - key: badlabel\n\t operator: DoesNotExist\n\t matchLabels:\n\t
\ goodlabel: goodvalue\n\t resourceNames:\n\t -
\"{{ .name }}-secret\"\n\t - \"{{ .kind }}-{{ .name }}-tls\"\n\t
\ - \"specificname\""
description: |-
CozystackResourceDefinitionResourceSelector extends metav1.LabelSelector with resourceNames support.
A resource matches this selector only if it satisfies ALL criteria:
- Label selector conditions (matchExpressions and matchLabels)
- AND has a name that matches one of the names in resourceNames (if specified)
The resourceNames field supports Go templates with the following variables available:
- {{ .name }}: The name of the managing application (from apps.cozystack.io/application.name)
- {{ .kind }}: The lowercased kind of the managing application (from apps.cozystack.io/application.kind)
- {{ .namespace }}: The namespace of the resource being processed
Example YAML:
secrets:
include:
- matchExpressions:
- key: badlabel
operator: DoesNotExist
matchLabels:
goodlabel: goodvalue
resourceNames:
- "{{ .name }}-secret"
- "{{ .kind }}-{{ .name }}-tls"
- "specificname"
properties:
matchExpressions:
description: matchExpressions is a list of label selector
@@ -480,22 +513,29 @@ spec:
If a resource matches the selector in any of the elements in the array, it is
hidden from the user, regardless of the matches in the include array.
items:
description: "CozystackResourceDefinitionResourceSelector extends
metav1.LabelSelector with resourceNames support.\nA resource
matches this selector only if it satisfies ALL criteria:\n-
Label selector conditions (matchExpressions and matchLabels)\n-
AND has a name that matches one of the names in resourceNames
(if specified)\n\nThe resourceNames field supports Go templates
with the following variables available:\n- {{ .name }}: The
name of the managing application (from apps.cozystack.io/application.name)\n-
{{ .kind }}: The lowercased kind of the managing application
(from apps.cozystack.io/application.kind)\n- {{ .namespace
}}: The namespace of the resource being processed\n\nExample
YAML:\n\n\tsecrets:\n\t include:\n\t - matchExpressions:\n\t
\ - key: badlabel\n\t operator: DoesNotExist\n\t matchLabels:\n\t
\ goodlabel: goodvalue\n\t resourceNames:\n\t -
\"{{ .name }}-secret\"\n\t - \"{{ .kind }}-{{ .name }}-tls\"\n\t
\ - \"specificname\""
description: |-
CozystackResourceDefinitionResourceSelector extends metav1.LabelSelector with resourceNames support.
A resource matches this selector only if it satisfies ALL criteria:
- Label selector conditions (matchExpressions and matchLabels)
- AND has a name that matches one of the names in resourceNames (if specified)
The resourceNames field supports Go templates with the following variables available:
- {{ .name }}: The name of the managing application (from apps.cozystack.io/application.name)
- {{ .kind }}: The lowercased kind of the managing application (from apps.cozystack.io/application.kind)
- {{ .namespace }}: The namespace of the resource being processed
Example YAML:
secrets:
include:
- matchExpressions:
- key: badlabel
operator: DoesNotExist
matchLabels:
goodlabel: goodvalue
resourceNames:
- "{{ .name }}-secret"
- "{{ .kind }}-{{ .name }}-tls"
- "specificname"
properties:
matchExpressions:
description: matchExpressions is a list of label selector
@@ -555,22 +595,29 @@ spec:
matches none of the selectors in the exclude array that resource is marked
as a tenant resource and is visible to users.
items:
description: "CozystackResourceDefinitionResourceSelector extends
metav1.LabelSelector with resourceNames support.\nA resource
matches this selector only if it satisfies ALL criteria:\n-
Label selector conditions (matchExpressions and matchLabels)\n-
AND has a name that matches one of the names in resourceNames
(if specified)\n\nThe resourceNames field supports Go templates
with the following variables available:\n- {{ .name }}: The
name of the managing application (from apps.cozystack.io/application.name)\n-
{{ .kind }}: The lowercased kind of the managing application
(from apps.cozystack.io/application.kind)\n- {{ .namespace
}}: The namespace of the resource being processed\n\nExample
YAML:\n\n\tsecrets:\n\t include:\n\t - matchExpressions:\n\t
\ - key: badlabel\n\t operator: DoesNotExist\n\t matchLabels:\n\t
\ goodlabel: goodvalue\n\t resourceNames:\n\t -
\"{{ .name }}-secret\"\n\t - \"{{ .kind }}-{{ .name }}-tls\"\n\t
\ - \"specificname\""
description: |-
CozystackResourceDefinitionResourceSelector extends metav1.LabelSelector with resourceNames support.
A resource matches this selector only if it satisfies ALL criteria:
- Label selector conditions (matchExpressions and matchLabels)
- AND has a name that matches one of the names in resourceNames (if specified)
The resourceNames field supports Go templates with the following variables available:
- {{ .name }}: The name of the managing application (from apps.cozystack.io/application.name)
- {{ .kind }}: The lowercased kind of the managing application (from apps.cozystack.io/application.kind)
- {{ .namespace }}: The namespace of the resource being processed
Example YAML:
secrets:
include:
- matchExpressions:
- key: badlabel
operator: DoesNotExist
matchLabels:
goodlabel: goodvalue
resourceNames:
- "{{ .name }}-secret"
- "{{ .kind }}-{{ .name }}-tls"
- "specificname"
properties:
matchExpressions:
description: matchExpressions is a list of label selector

View File

@@ -14,10 +14,12 @@ spec:
labels:
cozystack.io/ui: "true"
internal.cozystack.io/tenantmodule: "true"
chartRef:
kind: OCIRepository
name: etcd-rd
namespace: cozy-system
chart:
name: etcd
sourceRef:
kind: HelmRepository
name: cozystack-extra
namespace: cozy-public
dashboard:
category: Administration
singular: Etcd

View File

@@ -13,10 +13,12 @@ spec:
prefix: ferretdb-
labels:
cozystack.io/ui: "true"
chartRef:
kind: OCIRepository
name: ferretdb-rd
namespace: cozy-system
chart:
name: ferretdb
sourceRef:
kind: HelmRepository
name: cozystack-apps
namespace: cozy-public
dashboard:
category: PaaS
singular: FerretDB

View File

@@ -13,10 +13,12 @@ spec:
prefix: foundationdb-
labels:
cozystack.io/ui: "true"
chartRef:
kind: OCIRepository
name: foundationdb-rd
namespace: cozy-system
chart:
name: foundationdb
sourceRef:
kind: HelmRepository
name: cozystack-apps
namespace: cozy-public
dashboard:
category: PaaS
singular: FoundationDB

View File

@@ -1,7 +1,6 @@
export NAME=grafana-operator
export NAMESPACE=cozy-grafana-operator
include ../../../scripts/common-envs.mk
include ../../../scripts/package.mk
update:
@@ -9,14 +8,3 @@ update:
mkdir -p charts
curl -sSL https://github.com/grafana-operator/grafana-operator/archive/refs/heads/master.tar.gz | \
tar xzvf - --strip 3 -C charts grafana-operator-master/deploy/helm/grafana-operator
image:
docker buildx build --file images/grafana-dashboards/Dockerfile ../../.. \
--tag $(REGISTRY)/grafana-dashboards:$(call settag,$(TAG)) \
--cache-from type=registry,ref=$(REGISTRY)/grafana-dashboards:latest \
--cache-to type=inline \
--metadata-file images/grafana-dashboards.json \
$(BUILDX_ARGS)
echo "$(REGISTRY)/grafana-dashboards:$(call settag,$(TAG))@$$(yq --exit-status '.["containerimage.digest"]' images/grafana-dashboards.json --output-format json --raw-output)" \
> images/grafana-dashboards.tag
rm -f images/grafana-dashboards.json

View File

@@ -1 +0,0 @@
ghcr.io/cozystack/cozystack/grafana-dashboards:latest

View File

@@ -1,11 +0,0 @@
FROM alpine:3.22
RUN apk add --no-cache darkhttpd
COPY dashboards /var/www/dashboards
WORKDIR /var/www
EXPOSE 8080
CMD ["darkhttpd", "/var/www/dashboards", "--port", "8080", "--addr", "0.0.0.0"]

View File

@@ -1,3 +0,0 @@
# Exclude everything except dashboards directory
*
!dashboards/**

View File

@@ -1,41 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: grafana-dashboards
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: grafana-dashboards
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: dashboards
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: grafana-dashboards
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app.kubernetes.io/name: grafana-dashboards
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: dashboards
spec:
containers:
- name: dashboards
image: {{ $.Files.Get "images/grafana-dashboards.tag" | trim }}
ports:
- containerPort: 8080
name: http
protocol: TCP
livenessProbe:
httpGet:
path: /
port: 8080
initialDelaySeconds: 10
periodSeconds: 10
readinessProbe:
httpGet:
path: /
port: 8080
initialDelaySeconds: 5
periodSeconds: 5

View File

@@ -1,19 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: grafana-dashboards
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: grafana-dashboards
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: dashboards
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8080
protocol: TCP
name: http
selector:
app.kubernetes.io/name: grafana-dashboards
app.kubernetes.io/instance: {{ .Release.Name }}

View File

@@ -13,10 +13,12 @@ spec:
prefix: http-cache-
labels:
cozystack.io/ui: "true"
chartRef:
kind: OCIRepository
name: http-cache-rd
namespace: cozy-system
chart:
name: http-cache
sourceRef:
kind: HelmRepository
name: cozystack-apps
namespace: cozy-public
dashboard:
category: NaaS
singular: HTTP Cache

View File

@@ -14,10 +14,12 @@ spec:
labels:
cozystack.io/ui: "true"
internal.cozystack.io/tenantmodule: "true"
chartRef:
kind: OCIRepository
name: info-rd
namespace: cozy-system
chart:
name: info
sourceRef:
kind: HelmRepository
name: cozystack-extra
namespace: cozy-public
dashboard:
name: info
category: Administration

View File

@@ -14,10 +14,12 @@ spec:
labels:
cozystack.io/ui: "true"
internal.cozystack.io/tenantmodule: "true"
chartRef:
kind: OCIRepository
name: ingress-rd
namespace: cozy-system
chart:
name: ingress
sourceRef:
kind: HelmRepository
name: cozystack-extra
namespace: cozy-public
dashboard:
category: Administration
singular: Ingress

View File

@@ -13,10 +13,12 @@ spec:
prefix: kafka-
labels:
cozystack.io/ui: "true"
chartRef:
kind: OCIRepository
name: kafka-rd
namespace: cozy-system
chart:
name: kafka
sourceRef:
kind: HelmRepository
name: cozystack-apps
namespace: cozy-public
dashboard:
category: PaaS
singular: Kafka

View File

@@ -13,10 +13,12 @@ spec:
prefix: kubernetes-
labels:
cozystack.io/ui: "true"
chartRef:
kind: OCIRepository
name: kubernetes-rd
namespace: cozy-system
chart:
name: kubernetes
sourceRef:
kind: HelmRepository
name: cozystack-apps
namespace: cozy-public
dashboard:
category: IaaS
singular: Kubernetes

View File

@@ -1,23 +1,4 @@
export NAME=linstor
export NAMESPACE=cozy-$(NAME)
include ../../../scripts/common-envs.mk
include ../../../scripts/package.mk
LINSTOR_VERSION ?= 1.32.3
image:
docker buildx build images/piraeus-server \
--build-arg LINSTOR_VERSION=$(LINSTOR_VERSION) \
--build-arg K8S_AWAIT_ELECTION_VERSION=v0.4.2 \
--tag $(REGISTRY)/piraeus-server:$(call settag,$(LINSTOR_VERSION)) \
--tag $(REGISTRY)/piraeus-server:$(call settag,$(LINSTOR_VERSION)-$(TAG)) \
--cache-from type=registry,ref=$(REGISTRY)/piraeus-server:latest \
--cache-to type=inline \
--metadata-file images/piraeus-server.json \
$(BUILDX_ARGS)
REPOSITORY="$(REGISTRY)/piraeus-server" \
yq -i '.piraeusServer.image.repository = strenv(REPOSITORY)' values.yaml
TAG="$(call settag,$(LINSTOR_VERSION))@$$(yq e '."containerimage.digest"' images/piraeus-server.json -o json -r)" \
yq -i '.piraeusServer.image.tag = strenv(TAG)' values.yaml
rm -f images/piraeus-server.json

View File

@@ -10,125 +10,10 @@ trap terminate SIGINT SIGQUIT SIGTERM
echo "Starting Linstor per-satellite plunger"
INTERVAL_SEC="${INTERVAL_SEC:-30}"
STALL_ITERS="${STALL_ITERS:-4}"
STATE_FILE="${STATE_FILE:-/run/drbd-sync-watch.state}"
log() { printf '%s %s\n' "$(date -Is)" "$*" >&2; }
drbd_status_json() {
drbdsetup status --json 2>/dev/null || true
}
# Detect DRBD resources where resync is stuck:
# - at least one local device is Inconsistent
# - there is an active SyncTarget peer
# - there are other peers suspended with resync-suspended:dependency
# Output format: "<resource> <sync-peer> <percent-in-sync>"
drbd_stall_candidates() {
jq -r '
.[]?
| . as $r
| select(any($r.devices[]?; ."disk-state" == "Inconsistent"))
| (
[ $r.connections[]?
| . as $c
| $c.peer_devices[]?
| select(."replication-state" == "SyncTarget")
| { peer: $c.name, pct: (."percent-in-sync" // empty) }
] | .[0]?
) as $sync
| select($sync != null and ($sync.pct|tostring) != "")
| select(any($r.connections[]?.peer_devices[]?; ."resync-suspended" == "dependency"))
| "\($r.name) \($sync.peer) \($sync.pct)"
'
}
drbd_stall_load_state() {
[ -f "$STATE_FILE" ] && cat "$STATE_FILE" || true
}
drbd_stall_save_state() {
local tmp="${STATE_FILE}.tmp"
cat >"$tmp"
mv "$tmp" "$STATE_FILE"
}
# Break stalled resync by disconnecting the current SyncTarget peer.
# After reconnect, DRBD will typically pick another eligible peer and continue syncing.
drbd_stall_act() {
local res="$1"
local peer="$2"
local pct="$3"
log "STALL detected: res=$res sync_peer=$peer percent_in_sync=$pct -> disconnect/connect"
drbdadm disconnect "${res}:${peer}" && drbdadm connect "$res" || log "WARN: action failed for ${res}:${peer}"
}
# Track percent-in-sync progress across iterations.
# If progress does not change for STALL_ITERS loops, trigger reconnect.
drbd_fix_stalled_sync() {
local now prev json out
now="$(date +%s)"
prev="$(drbd_stall_load_state)"
json="$(drbd_status_json)"
[ -n "$json" ] || return 0
out="$(printf '%s' "$json" | drbd_stall_candidates)"
local new_state=""
local acts=""
while IFS= read -r line; do
[ -n "$line" ] || continue
set -- $line
local res="$1" peer="$2" pct="$3"
local key="${res} ${peer}"
local prev_line
prev_line="$(printf '%s\n' "$prev" | awk -v k="$key" '$1" "$2==k {print; exit}')"
local cnt last_act prev_pct prev_cnt prev_act
if [ -n "$prev_line" ]; then
set -- $prev_line
prev_pct="$3"
prev_cnt="$4"
prev_act="$5"
if [ "$pct" = "$prev_pct" ]; then
cnt=$((prev_cnt + 1))
else
cnt=1
fi
last_act="$prev_act"
else
cnt=1
last_act=0
fi
if [ "$cnt" -ge "$STALL_ITERS" ]; then
acts="${acts}${res} ${peer} ${pct}"$'\n'
cnt=0
last_act="$now"
fi
new_state="${new_state}${res} ${peer} ${pct} ${cnt} ${last_act}"$'\n'
done <<< "$out"
if [ -n "$acts" ]; then
while IFS= read -r a; do
[ -n "$a" ] || continue
set -- $a
drbd_stall_act "$1" "$2" "$3"
done <<< "$acts"
fi
printf '%s' "$new_state" | drbd_stall_save_state
}
while true; do
# timeout at the start of the loop to give a chance for the fresh linstor-satellite instance to cleanup itself
sleep "$INTERVAL_SEC" &
sleep 30 &
pid=$!
wait $pid
@@ -136,7 +21,7 @@ while true; do
# the `/` path could not be a backing file for a loop device, so it's a good indicator of a stuck loop device
# TODO describe the issue in more detail
# Using the direct /usr/sbin/losetup as the linstor-satellite image has own wrapper in /usr/local
stale_loopbacks=$(/usr/sbin/losetup --json | jq -r '.[][] | select(."back-file" == "/" or ."back-file" == "/ (deleted)").name')
stale_loopbacks=$(/usr/sbin/losetup --json | jq -r '.[][] | select(."back-file" == "/" or ."back-file" == "/ (deleted)").name' )
for stale_device in $stale_loopbacks; do (
echo "Detaching stuck loop device ${stale_device}"
set -x
@@ -154,7 +39,4 @@ while true; do
drbdadm up "${secondary}" || echo "Command failed"
); done
# Detect and fix stalled DRBD resync by switching SyncTarget peer
drbd_fix_stalled_sync || true
done

View File

@@ -1,172 +0,0 @@
ARG DISTRO=bookworm
ARG LINSTOR_VERSION
# ------------------------------------------------------------------------------
# Build linstor-server from source
FROM debian:bookworm AS builder
ARG LINSTOR_VERSION
ARG VERSION=${LINSTOR_VERSION}
ARG DISTRO
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update \
&& apt-get -y upgrade \
&& apt-get -y install build-essential git default-jdk-headless python3-all debhelper wget unzip && \
wget https://services.gradle.org/distributions/gradle-8.9-bin.zip -O /tmp/gradle.zip && \
unzip -d /opt /tmp/gradle.zip && \
rm /tmp/gradle.zip && \
ln -s /opt/gradle-8.9/bin/gradle /usr/local/bin/gradle
RUN git clone https://github.com/LINBIT/linstor-server.git /linstor-server
WORKDIR /linstor-server
RUN git checkout v${VERSION}
# Apply patches
COPY patches /patches
RUN git apply /patches/*.diff && \
git config user.email "build@cozystack.io" && \
git config user.name "Cozystack Builder" && \
git add -A && \
git commit -m "Apply patches"
# Initialize git submodules
RUN git submodule update --init --recursive || make check-submods
# Pre-download ALL dependencies before make tarball
# This ensures all transitive dependencies are cached, including optional ones like AWS SDK
RUN ./gradlew getProtoc
RUN ./gradlew generateJava
RUN ./gradlew --no-daemon --gradle-user-home .gradlehome downloadDependencies
# Manually create tarball without removing caches
# make tarball removes .gradlehome/caches/[0-9]* which deletes dependencies
# So we'll do the steps manually but keep the caches
RUN make check-submods versioninfo gen-java FORCE=1 VERSION=${VERSION}
RUN make server/jar.deps controller/jar.deps satellite/jar.deps jclcrypto/jar.deps FORCE=1 VERSION=${VERSION}
RUN make .filelist FORCE=1 VERSION=${VERSION} PRESERVE_DEBIAN=1
# Don't remove caches - we need them for offline build
RUN rm -Rf .gradlehome/wrapper .gradlehome/native .gradlehome/.tmp || true
RUN mkdir -p ./libs
RUN make tgz VERSION=${VERSION}
# Extract tarball and build DEB packages from it
RUN mv linstor-server-${VERSION}.tar.gz /linstor-server_${VERSION}.orig.tar.gz \
&& tar -C / -xvf /linstor-server_${VERSION}.orig.tar.gz
WORKDIR /linstor-server-${VERSION}
# Verify .gradlehome is present in extracted tarball
RUN test -d .gradlehome && echo ".gradlehome found in tarball" || (echo ".gradlehome not found in tarball!" && exit 1)
# Build DEB packages from tarball
# Override GRADLE_FLAGS to remove --offline flag, allowing Gradle to download missing dependencies
RUN sed -i 's/GRADLE_FLAGS = --offline/GRADLE_FLAGS =/' debian/rules || true
RUN LD_LIBRARY_PATH='' dpkg-buildpackage -rfakeroot -b -uc
# Copy built .deb packages to a location accessible from final image
# dpkg-buildpackage creates packages in parent directory
RUN mkdir -p /packages-output && \
find .. -maxdepth 1 -name "linstor-*.deb" -exec cp {} /packages-output/ \; && \
test -n "$(ls -A /packages-output)" || (echo "ERROR: No linstor .deb packages found after build." && exit 1)
# ------------------------------------------------------------------------------
# Final image
FROM debian:${DISTRO}
LABEL maintainer="Roland Kammerer <roland.kammerer@linbit.com>"
ARG LINSTOR_VERSION
ARG DISTRO
# Copy built .deb packages from builder stage
# dpkg-buildpackage creates packages in parent directory, we copied them to /packages-output
COPY --from=builder /packages-output/ /packages/
RUN { echo 'APT::Install-Recommends "false";' ; echo 'APT::Install-Suggests "false";' ; } > /etc/apt/apt.conf.d/99_piraeus
RUN --mount=type=cache,target=/var/cache,sharing=private \
--mount=type=cache,target=/var/lib/apt/lists,sharing=private \
--mount=type=tmpfs,target=/var/log \
# Install wget first for downloading keyring
apt-get update && apt-get install -y wget ca-certificates && \
# Enable contrib repos for zfsutils \
. /etc/os-release && \
sed -i -r 's/^Components: (.*)$/Components: \1 contrib/' /etc/apt/sources.list.d/debian.sources && \
echo "deb http://deb.debian.org/debian $VERSION_CODENAME-backports contrib" > /etc/apt/sources.list.d/backports.list && \
wget https://packages.linbit.com/public/linbit-keyring.deb -O /var/cache/linbit-keyring.deb && \
dpkg -i /var/cache/linbit-keyring.deb && \
echo "deb http://packages.linbit.com/public $VERSION_CODENAME misc" > /etc/apt/sources.list.d/linbit.list && \
apt-get update && \
# Install useful utilities and general dependencies
apt-get install -y udev drbd-utils jq net-tools iputils-ping iproute2 dnsutils netcat-traditional sysstat curl util-linux && \
# Install dependencies for optional features \
apt-get install -y \
# cryptsetup: luks layer
cryptsetup \
# e2fsprogs: LINSTOR can create file systems \
e2fsprogs \
# lsscsi: exos layer \
lsscsi \
# lvm2: manage lvm storage pools \
lvm2 \
# multipath-tools: exos layer \
multipath-tools \
# nvme-cli: nvme layer
nvme-cli \
# procps: used by LINSTOR to find orphaned send/receive processes \
procps \
# socat: used with thin-send-recv to send snapshots to another LINSTOR cluster
socat \
# thin-send-recv: used to send/receive snapshots of LVM thin volumes \
thin-send-recv \
# xfsprogs: LINSTOR can create file systems; xfs deps \
xfsprogs \
# zstd: used with thin-send-recv to send snapshots to another LINSTOR cluster \
zstd \
# zfsutils-linux: for zfs storage pools \
zfsutils-linux/$VERSION_CODENAME-backports \
&& \
# remove udev, no need for it in the container \
apt-get remove -y udev && \
# Install linstor packages from built .deb files and linstor-client from repository
apt-get install -y default-jre-headless python3-all python3-natsort linstor-client \
&& ls packages/*.deb >/dev/null && (dpkg -i packages/*.deb || apt-get install -f -y) \
&& rm -rf /packages \
&& sed -i 's/"-Djdk.tls.acknowledgeCloseNotify=true"//g' /usr/share/linstor-server/bin/Controller \
&& apt-get clean
# Log directory need to be group writable. OpenShift assigns random UID and GID, without extra RBAC changes we can only influence the GID.
RUN mkdir /var/log/linstor-controller && \
chown 0:1000 /var/log/linstor-controller && \
chmod -R 0775 /var/log/linstor-controller && \
# Ensure we log to files in containers, otherwise SOS reports won't show any logs at all
sed -i 's#<!-- <appender-ref ref="FILE" /> -->#<appender-ref ref="FILE" />#' /usr/share/linstor-server/lib/conf/logback.xml
RUN lvmconfig --type current --mergedconfig --config 'activation { udev_sync = 0 udev_rules = 0 monitoring = 0 } devices { global_filter = [ "r|^/dev/drbd|" ] obtain_device_list_from_udev = 0}' > /etc/lvm/lvm.conf.new && mv /etc/lvm/lvm.conf.new /etc/lvm/lvm.conf
RUN echo 'global { usage-count no; }' > /etc/drbd.d/global_common.conf
# controller
EXPOSE 3376/tcp 3377/tcp 3370/tcp 3371/tcp
# satellite
EXPOSE 3366/tcp 3367/tcp
RUN wget https://raw.githubusercontent.com/piraeusdatastore/piraeus/refs/heads/master/dockerfiles/piraeus-server/entry.sh -O /usr/bin/piraeus-entry.sh \
&& chmod +x /usr/bin/piraeus-entry.sh
ARG K8S_AWAIT_ELECTION_VERSION=v0.4.2
# TARGETARCH is a docker special variable: https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope
ARG TARGETARCH
RUN wget https://github.com/LINBIT/k8s-await-election/releases/download/${K8S_AWAIT_ELECTION_VERSION}/k8s-await-election-${K8S_AWAIT_ELECTION_VERSION}-linux-${TARGETARCH}.tar.gz -O - | tar -xvz -C /usr/bin/
ARG LOSETUP_CONTAINER_VERSION=v1.0.1
RUN wget "https://github.com/LINBIT/losetup-container/releases/download/${LOSETUP_CONTAINER_VERSION}/losetup-container-$(uname -m)-unknown-linux-gnu.tar.gz" -O - | tar -xvz -C /usr/local/sbin && \
printf '#!/bin/sh\nLOSETUP_CONTAINER_ORIGINAL_LOSETUP=%s exec /usr/local/sbin/losetup-container "$@"\n' $(command -v losetup) > /usr/local/sbin/losetup && \
chmod +x /usr/local/sbin/losetup
RUN wget "https://dl.k8s.io/$(wget -O - https://dl.k8s.io/release/stable.txt)/bin/linux/${TARGETARCH}/kubectl" -O /usr/local/bin/kubectl && chmod +x /usr/local/bin/kubectl
CMD ["startSatellite"]
ENTRYPOINT ["/usr/bin/k8s-await-election", "/usr/bin/piraeus-entry.sh"]

View File

@@ -1,12 +0,0 @@
# LINSTOR Server Patches
Custom patches for piraeus-server (linstor-server) v1.32.3.
- **adjust-on-resfile-change.diff** — Use actual device path in res file during toggle-disk; fix LUKS data offset
- Upstream: [#473](https://github.com/LINBIT/linstor-server/pull/473), [#472](https://github.com/LINBIT/linstor-server/pull/472)
- **allow-toggle-disk-retry.diff** — Allow retry and cancellation of failed toggle-disk operations
- Upstream: [#475](https://github.com/LINBIT/linstor-server/pull/475)
- **force-metadata-check-on-disk-add.diff** — Create metadata during toggle-disk from diskless to diskful
- Upstream: [#474](https://github.com/LINBIT/linstor-server/pull/474)
- **skip-adjust-when-device-inaccessible.diff** — Skip DRBD adjust/res file regeneration when child layer device is inaccessible
- Upstream: [#471](https://github.com/LINBIT/linstor-server/pull/471)

View File

@@ -1,48 +0,0 @@
diff --git a/satellite/src/main/java/com/linbit/linstor/layer/drbd/utils/ConfFileBuilder.java b/satellite/src/main/java/com/linbit/linstor/layer/drbd/utils/ConfFileBuilder.java
index 36c52ccf8..c0bb7b967 100644
--- a/satellite/src/main/java/com/linbit/linstor/layer/drbd/utils/ConfFileBuilder.java
+++ b/satellite/src/main/java/com/linbit/linstor/layer/drbd/utils/ConfFileBuilder.java
@@ -894,12 +894,16 @@ public class ConfFileBuilder
if (((Volume) vlmData.getVolume()).getFlags().isUnset(localAccCtx, Volume.Flags.DELETE))
{
final String disk;
+ // Check if we're in toggle-disk operation (adding disk to diskless resource)
+ boolean isDiskAdding = vlmData.getVolume().getAbsResource().getStateFlags().isSomeSet(
+ localAccCtx, Resource.Flags.DISK_ADD_REQUESTED, Resource.Flags.DISK_ADDING);
if ((!isPeerRsc && vlmData.getDataDevice() == null) ||
(isPeerRsc &&
// FIXME: vlmData.getRscLayerObject().getFlags should be used here
vlmData.getVolume().getAbsResource().disklessForDrbdPeers(accCtx)
) ||
- (!isPeerRsc &&
+ // For toggle-disk: if dataDevice is set and we're adding disk, use the actual device
+ (!isPeerRsc && !isDiskAdding &&
// FIXME: vlmData.getRscLayerObject().getFlags should be used here
vlmData.getVolume().getAbsResource().isDrbdDiskless(accCtx)
)
diff --git a/satellite/src/main/java/com/linbit/linstor/layer/luks/CryptSetupCommands.java b/satellite/src/main/java/com/linbit/linstor/layer/luks/CryptSetupCommands.java
index 54dd5c19f..018de58cf 100644
--- a/satellite/src/main/java/com/linbit/linstor/layer/luks/CryptSetupCommands.java
+++ b/satellite/src/main/java/com/linbit/linstor/layer/luks/CryptSetupCommands.java
@@ -34,6 +34,9 @@ public class CryptSetupCommands implements Luks
private static final Version V2_1_0 = new Version(2, 1, 0);
private static final Version V2_0_0 = new Version(2, 0, 0);
private static final String PBDKF_MAX_MEMORY_KIB = "262144"; // 256 MiB
+ // Fixed LUKS2 data offset in 512-byte sectors (16 MiB = 32768 sectors)
+ // This ensures consistent LUKS header size across all nodes regardless of system defaults
+ private static final String LUKS2_DATA_OFFSET_SECTORS = "32768";
@SuppressWarnings("unused")
private final ErrorReporter errorReporter;
@@ -78,6 +81,11 @@ public class CryptSetupCommands implements Luks
command.add(CRYPTSETUP);
command.add("-q");
command.add("luksFormat");
+ // Always specify explicit offset to ensure consistent LUKS header size across all nodes
+ // Without this, different systems may create LUKS with different header sizes (16MiB vs 32MiB)
+ // which causes "Low.dev. smaller than requested DRBD-dev. size" errors during toggle-disk
+ command.add("--offset");
+ command.add(LUKS2_DATA_OFFSET_SECTORS);
if (version.greaterOrEqual(V2_0_0))
{
command.add("--pbkdf-memory");

View File

@@ -1,235 +0,0 @@
diff --git a/controller/src/main/java/com/linbit/linstor/core/apicallhandler/controller/CtrlRscToggleDiskApiCallHandler.java b/controller/src/main/java/com/linbit/linstor/core/apicallhandler/controller/CtrlRscToggleDiskApiCallHandler.java
index 1a6f7b7f0..bd447e049 100644
--- a/controller/src/main/java/com/linbit/linstor/core/apicallhandler/controller/CtrlRscToggleDiskApiCallHandler.java
+++ b/controller/src/main/java/com/linbit/linstor/core/apicallhandler/controller/CtrlRscToggleDiskApiCallHandler.java
@@ -58,7 +58,9 @@ import com.linbit.linstor.stateflags.StateFlags;
import com.linbit.linstor.storage.StorageException;
import com.linbit.linstor.storage.data.adapter.drbd.DrbdRscData;
import com.linbit.linstor.storage.interfaces.categories.resource.AbsRscLayerObject;
+import com.linbit.linstor.storage.interfaces.categories.resource.VlmProviderObject;
import com.linbit.linstor.storage.kinds.DeviceLayerKind;
+import com.linbit.linstor.storage.kinds.DeviceProviderKind;
import com.linbit.linstor.storage.utils.LayerUtils;
import com.linbit.linstor.tasks.AutoDiskfulTask;
import com.linbit.linstor.utils.layer.LayerRscUtils;
@@ -317,21 +319,90 @@ public class CtrlRscToggleDiskApiCallHandler implements CtrlSatelliteConnectionL
Resource rsc = ctrlApiDataLoader.loadRsc(nodeName, rscName, true);
+ // Allow retry of the same operation if the previous attempt failed
+ // (the requested flag remains set for retry on reconnection, but we should also allow manual retry)
+ // Also allow cancellation of a failed operation by requesting the opposite operation
if (hasDiskAddRequested(rsc))
{
- throw new ApiRcException(ApiCallRcImpl.simpleEntry(
- ApiConsts.FAIL_RSC_BUSY,
- "Addition of disk to resource already requested",
- true
- ));
+ if (removeDisk)
+ {
+ // User wants to cancel the failed add-disk operation and go back to diskless
+ // Use the existing disk removal flow to properly cleanup storage on satellite
+ errorReporter.logInfo(
+ "Toggle Disk cancel on %s/%s - cancelling failed DISK_ADD_REQUESTED, reverting to diskless",
+ nodeNameStr, rscNameStr);
+ unmarkDiskAddRequested(rsc);
+ // Also clear DISK_ADDING if it was set
+ unmarkDiskAdding(rsc);
+
+ // Set storage pool to diskless pool (overwrite the diskful pool that was set)
+ Props rscProps = ctrlPropsHelper.getProps(rsc);
+ rscProps.map().put(ApiConsts.KEY_STOR_POOL_NAME, LinStor.DISKLESS_STOR_POOL_NAME);
+
+ // Set DISK_REMOVE_REQUESTED to use the existing disk removal flow
+ // This will:
+ // 1. updateAndAdjustDisk sets DISK_REMOVING flag
+ // 2. Satellite sees DISK_REMOVING and deletes LUKS/storage devices
+ // 3. finishOperation rebuilds layer stack as diskless
+ // We keep the existing layer data so satellite can properly cleanup
+ markDiskRemoveRequested(rsc);
+
+ ctrlTransactionHelper.commit();
+
+ // Use existing disk removal flow - this will properly cleanup storage on satellite
+ return Flux
+ .<ApiCallRc>just(ApiCallRcImpl.singleApiCallRc(
+ ApiConsts.MODIFIED,
+ "Cancelling disk addition, reverting to diskless"
+ ))
+ .concatWith(updateAndAdjustDisk(nodeName, rscName, true, toggleIntoTiebreakerRef, context))
+ .concatWith(ctrlRscDfnApiCallHandler.get().updateProps(rsc.getResourceDefinition()));
+ }
+ // If adding disk and DISK_ADD_REQUESTED is already set, treat as retry
+ // First clean up partially created storage by removing and recreating layer data
+ errorReporter.logInfo(
+ "Toggle Disk retry on %s/%s - DISK_ADD_REQUESTED already set, cleaning up and retrying",
+ nodeNameStr, rscNameStr);
+
+ // Remove old layer data and recreate to ensure clean state
+ // This forces satellite to delete any partially created storage and start fresh
+ LayerPayload payload = new LayerPayload();
+ copyDrbdNodeIdIfExists(rsc, payload);
+ List<DeviceLayerKind> layerList = removeLayerData(rsc);
+ ctrlLayerStackHelper.ensureStackDataExists(rsc, layerList, payload);
+
+ ctrlTransactionHelper.commit();
+ return Flux
+ .<ApiCallRc>just(new ApiCallRcImpl())
+ .concatWith(updateAndAdjustDisk(nodeName, rscName, false, toggleIntoTiebreakerRef, context))
+ .concatWith(ctrlRscDfnApiCallHandler.get().updateProps(rsc.getResourceDefinition()));
}
if (hasDiskRemoveRequested(rsc))
{
- throw new ApiRcException(ApiCallRcImpl.simpleEntry(
- ApiConsts.FAIL_RSC_BUSY,
- "Removal of disk from resource already requested",
- true
- ));
+ if (!removeDisk)
+ {
+ // User wants to cancel the failed remove-disk operation
+ errorReporter.logInfo(
+ "Toggle Disk cancel on %s/%s - cancelling failed DISK_REMOVE_REQUESTED",
+ nodeNameStr, rscNameStr);
+ unmarkDiskRemoveRequested(rsc);
+ ctrlTransactionHelper.commit();
+ return Flux.<ApiCallRc>just(
+ ApiCallRcImpl.singleApiCallRc(
+ ApiConsts.MODIFIED,
+ "Cancelled disk removal request"
+ )
+ );
+ }
+ // If removing disk and DISK_REMOVE_REQUESTED is already set, treat as retry
+ errorReporter.logInfo(
+ "Toggle Disk retry on %s/%s - DISK_REMOVE_REQUESTED already set, continuing operation",
+ nodeNameStr, rscNameStr);
+ ctrlTransactionHelper.commit();
+ return Flux
+ .<ApiCallRc>just(new ApiCallRcImpl())
+ .concatWith(updateAndAdjustDisk(nodeName, rscName, true, toggleIntoTiebreakerRef, context))
+ .concatWith(ctrlRscDfnApiCallHandler.get().updateProps(rsc.getResourceDefinition()));
}
if (!removeDisk && !ctrlVlmCrtApiHelper.isDiskless(rsc))
@@ -342,17 +413,43 @@ public class CtrlRscToggleDiskApiCallHandler implements CtrlSatelliteConnectionL
true
));
}
+ ResourceDefinition rscDfn = rsc.getResourceDefinition();
+ AccessContext peerCtx = peerAccCtx.get();
+
if (removeDisk && ctrlVlmCrtApiHelper.isDiskless(rsc))
{
+ // Resource is marked as diskless - check if it has orphaned storage layers that need cleanup
+ AbsRscLayerObject<Resource> layerData = getLayerData(peerCtx, rsc);
+ if (layerData != null && (LayerUtils.hasLayer(layerData, DeviceLayerKind.LUKS) ||
+ hasNonDisklessStorageLayer(layerData)))
+ {
+ // Resource is marked as diskless but has orphaned storage layers - need cleanup
+ // Use the existing disk removal flow to properly cleanup storage on satellite
+ errorReporter.logInfo(
+ "Toggle Disk cleanup on %s/%s - resource is diskless but has orphaned storage layers, cleaning up",
+ nodeNameStr, rscNameStr);
+
+ // Set DISK_REMOVE_REQUESTED to use the existing disk removal flow
+ // This will trigger proper satellite cleanup via DISK_REMOVING flag
+ markDiskRemoveRequested(rsc);
+
+ ctrlTransactionHelper.commit();
+
+ // Use existing disk removal flow - this will properly cleanup storage on satellite
+ return Flux
+ .<ApiCallRc>just(ApiCallRcImpl.singleApiCallRc(
+ ApiConsts.MODIFIED,
+ "Cleaning up orphaned storage layers"
+ ))
+ .concatWith(updateAndAdjustDisk(nodeName, rscName, true, toggleIntoTiebreakerRef, context))
+ .concatWith(ctrlRscDfnApiCallHandler.get().updateProps(rsc.getResourceDefinition()));
+ }
throw new ApiRcException(ApiCallRcImpl.simpleEntry(
ApiConsts.WARN_RSC_ALREADY_DISKLESS,
"Resource already diskless",
true
));
}
-
- ResourceDefinition rscDfn = rsc.getResourceDefinition();
- AccessContext peerCtx = peerAccCtx.get();
if (removeDisk)
{
// Prevent removal of the last disk
@@ -1324,6 +1421,30 @@ public class CtrlRscToggleDiskApiCallHandler implements CtrlSatelliteConnectionL
}
}
+ private void unmarkDiskAddRequested(Resource rsc)
+ {
+ try
+ {
+ rsc.getStateFlags().disableFlags(apiCtx, Resource.Flags.DISK_ADD_REQUESTED);
+ }
+ catch (AccessDeniedException | DatabaseException exc)
+ {
+ throw new ImplementationError(exc);
+ }
+ }
+
+ private void unmarkDiskRemoveRequested(Resource rsc)
+ {
+ try
+ {
+ rsc.getStateFlags().disableFlags(apiCtx, Resource.Flags.DISK_REMOVE_REQUESTED);
+ }
+ catch (AccessDeniedException | DatabaseException exc)
+ {
+ throw new ImplementationError(exc);
+ }
+ }
+
private void markDiskAdded(Resource rscData)
{
try
@@ -1389,6 +1510,41 @@ public class CtrlRscToggleDiskApiCallHandler implements CtrlSatelliteConnectionL
return layerData;
}
+ /**
+ * Check if the layer stack has a non-diskless STORAGE layer.
+ * This is used to detect orphaned storage layers that need cleanup.
+ */
+ private boolean hasNonDisklessStorageLayer(AbsRscLayerObject<Resource> layerDataRef)
+ {
+ boolean hasNonDiskless = false;
+ if (layerDataRef != null)
+ {
+ if (layerDataRef.getLayerKind() == DeviceLayerKind.STORAGE)
+ {
+ for (VlmProviderObject<Resource> vlmData : layerDataRef.getVlmLayerObjects().values())
+ {
+ if (vlmData.getProviderKind() != DeviceProviderKind.DISKLESS)
+ {
+ hasNonDiskless = true;
+ break;
+ }
+ }
+ }
+ if (!hasNonDiskless)
+ {
+ for (AbsRscLayerObject<Resource> child : layerDataRef.getChildren())
+ {
+ if (hasNonDisklessStorageLayer(child))
+ {
+ hasNonDiskless = true;
+ break;
+ }
+ }
+ }
+ }
+ return hasNonDiskless;
+ }
+
private LockGuard createLockGuard()
{
return lockGuardFactory.buildDeferred(LockType.WRITE, LockObj.NODES_MAP, LockObj.RSC_DFN_MAP);

View File

@@ -1,63 +0,0 @@
diff --git a/satellite/src/main/java/com/linbit/linstor/layer/drbd/DrbdLayer.java b/satellite/src/main/java/com/linbit/linstor/layer/drbd/DrbdLayer.java
index a302ee835..01967a31f 100644
--- a/satellite/src/main/java/com/linbit/linstor/layer/drbd/DrbdLayer.java
+++ b/satellite/src/main/java/com/linbit/linstor/layer/drbd/DrbdLayer.java
@@ -371,10 +371,13 @@ public class DrbdLayer implements DeviceLayer
boolean isDiskless = drbdRscData.getAbsResource().isDrbdDiskless(workerCtx);
StateFlags<Flags> rscFlags = drbdRscData.getAbsResource().getStateFlags();
boolean isDiskRemoving = rscFlags.isSet(workerCtx, Resource.Flags.DISK_REMOVING);
+ // Check if we're in toggle-disk operation (adding disk to diskless resource)
+ boolean isDiskAdding = rscFlags.isSomeSet(workerCtx, Resource.Flags.DISK_ADD_REQUESTED, Resource.Flags.DISK_ADDING);
boolean contProcess = isDiskless;
- boolean processChildren = !isDiskless || isDiskRemoving;
+ // Process children when: has disk, removing disk, OR adding disk (toggle-disk)
+ boolean processChildren = !isDiskless || isDiskRemoving || isDiskAdding;
// do not process children when ONLY DRBD_DELETE flag is set (DELETE flag is still unset)
processChildren &= (!rscFlags.isSet(workerCtx, Resource.Flags.DRBD_DELETE) ||
rscFlags.isSet(workerCtx, Resource.Flags.DELETE));
@@ -570,7 +573,11 @@ public class DrbdLayer implements DeviceLayer
{
// hasMetaData needs to be run after child-resource processed
List<DrbdVlmData<Resource>> createMetaData = new ArrayList<>();
- if (!drbdRscData.getAbsResource().isDrbdDiskless(workerCtx) && !skipDisk)
+ // Check if we're in toggle-disk operation (adding disk to diskless resource)
+ boolean isDiskAddingForMd = drbdRscData.getAbsResource().getStateFlags()
+ .isSomeSet(workerCtx, Resource.Flags.DISK_ADD_REQUESTED, Resource.Flags.DISK_ADDING);
+ // Create metadata when: has disk OR adding disk (toggle-disk), and skipDisk is disabled
+ if ((!drbdRscData.getAbsResource().isDrbdDiskless(workerCtx) || isDiskAddingForMd) && !skipDisk)
{
// do not try to create meta data while the resource is diskless or skipDisk is enabled
for (DrbdVlmData<Resource> drbdVlmData : checkMetaData)
@@ -988,8 +995,10 @@ public class DrbdLayer implements DeviceLayer
{
List<DrbdVlmData<Resource>> checkMetaData = new ArrayList<>();
Resource rsc = drbdRscData.getAbsResource();
+ // Include DISK_ADD_REQUESTED/DISK_ADDING for toggle-disk scenario where we need to check/create metadata
if (!rsc.isDrbdDiskless(workerCtx) ||
- rsc.getStateFlags().isSet(workerCtx, Resource.Flags.DISK_REMOVING)
+ rsc.getStateFlags().isSet(workerCtx, Resource.Flags.DISK_REMOVING) ||
+ rsc.getStateFlags().isSomeSet(workerCtx, Resource.Flags.DISK_ADD_REQUESTED, Resource.Flags.DISK_ADDING)
)
{
// using a dedicated list to prevent concurrentModificationException
@@ -1177,9 +1186,16 @@ public class DrbdLayer implements DeviceLayer
boolean hasMetaData;
+ // Check if we need to verify/create metadata
+ // Force metadata check when:
+ // 1. checkMetaData is enabled
+ // 2. volume doesn't have disk yet (diskless -> diskful transition)
+ // 3. DISK_ADD_REQUESTED/DISK_ADDING flag is set (retry scenario where storage exists but no metadata)
+ boolean isDiskAddingState = drbdVlmData.getRscLayerObject().getAbsResource().getStateFlags()
+ .isSomeSet(workerCtx, Resource.Flags.DISK_ADD_REQUESTED, Resource.Flags.DISK_ADDING);
if (drbdVlmData.checkMetaData() ||
- // when adding a disk, DRBD believes that it is diskless but we still need to create metadata
- !drbdVlmData.hasDisk())
+ !drbdVlmData.hasDisk() ||
+ isDiskAddingState)
{
if (mdUtils.hasMetaData())
{

View File

@@ -1,93 +0,0 @@
diff --git a/satellite/src/main/java/com/linbit/linstor/layer/drbd/DrbdLayer.java b/satellite/src/main/java/com/linbit/linstor/layer/drbd/DrbdLayer.java
index 01967a3..871d830 100644
--- a/satellite/src/main/java/com/linbit/linstor/layer/drbd/DrbdLayer.java
+++ b/satellite/src/main/java/com/linbit/linstor/layer/drbd/DrbdLayer.java
@@ -592,7 +592,29 @@ public class DrbdLayer implements DeviceLayer
// The .res file might not have been generated in the prepare method since it was
// missing information from the child-layers. Now that we have processed them, we
// need to make sure the .res file exists in all circumstances.
- regenerateResFile(drbdRscData);
+ // However, if the underlying devices are not accessible (e.g., LUKS device is closed
+ // during resource deletion), we skip regenerating the res file to avoid errors
+ boolean canRegenerateResFile = true;
+ if (!skipDisk && !drbdRscData.getAbsResource().isDrbdDiskless(workerCtx))
+ {
+ AbsRscLayerObject<Resource> dataChild = drbdRscData.getChildBySuffix(RscLayerSuffixes.SUFFIX_DATA);
+ if (dataChild != null)
+ {
+ for (DrbdVlmData<Resource> drbdVlmData : drbdRscData.getVlmLayerObjects().values())
+ {
+ VlmProviderObject<Resource> childVlm = dataChild.getVlmProviderObject(drbdVlmData.getVlmNr());
+ if (childVlm == null || !childVlm.exists() || childVlm.getDevicePath() == null)
+ {
+ canRegenerateResFile = false;
+ break;
+ }
+ }
+ }
+ }
+ if (canRegenerateResFile)
+ {
+ regenerateResFile(drbdRscData);
+ }
// createMetaData needs rendered resFile
for (DrbdVlmData<Resource> drbdVlmData : createMetaData)
@@ -766,19 +788,47 @@ public class DrbdLayer implements DeviceLayer
if (drbdRscData.isAdjustRequired())
{
- try
+ // Check if underlying devices are accessible before adjusting
+ // This is important for encrypted resources (LUKS) where the device
+ // might be closed during deletion
+ boolean canAdjust = true;
+ if (!skipDisk && !drbdRscData.getAbsResource().isDrbdDiskless(workerCtx))
{
- drbdUtils.adjust(
- drbdRscData,
- false,
- skipDisk,
- false
- );
+ AbsRscLayerObject<Resource> dataChild = drbdRscData.getChildBySuffix(RscLayerSuffixes.SUFFIX_DATA);
+ if (dataChild != null)
+ {
+ for (DrbdVlmData<Resource> drbdVlmData : drbdRscData.getVlmLayerObjects().values())
+ {
+ VlmProviderObject<Resource> childVlm = dataChild.getVlmProviderObject(drbdVlmData.getVlmNr());
+ if (childVlm == null || !childVlm.exists() || childVlm.getDevicePath() == null)
+ {
+ canAdjust = false;
+ break;
+ }
+ }
+ }
}
- catch (ExtCmdFailedException extCmdExc)
+
+ if (canAdjust)
+ {
+ try
+ {
+ drbdUtils.adjust(
+ drbdRscData,
+ false,
+ skipDisk,
+ false
+ );
+ }
+ catch (ExtCmdFailedException extCmdExc)
+ {
+ restoreBackupResFile(drbdRscData);
+ throw extCmdExc;
+ }
+ }
+ else
{
- restoreBackupResFile(drbdRscData);
- throw extCmdExc;
+ drbdRscData.setAdjustRequired(false);
}
}

View File

@@ -0,0 +1,24 @@
{{- define "cozy.linstor.version" -}}
{{- $piraeusConfigMap := lookup "v1" "ConfigMap" "cozy-linstor" "piraeus-operator-image-config"}}
{{- if not $piraeusConfigMap }}
{{- fail "Piraeus controller is not yet installed, ConfigMap cozy-linstor/piraeus-operator-image-config is missing" }}
{{- end }}
{{- $piraeusImagesConfig := $piraeusConfigMap | dig "data" "0_piraeus_datastore_images.yaml" nil | required "No image config" | fromYaml }}
base: {{ $piraeusImagesConfig.base | required "No image base in piraeus config" }}
controller:
image: {{ $piraeusImagesConfig | dig "components" "linstor-controller" "image" nil | required "No controller image" }}
tag: {{ $piraeusImagesConfig | dig "components" "linstor-controller" "tag" nil | required "No controller tag" }}
satellite:
image: {{ $piraeusImagesConfig | dig "components" "linstor-satellite" "image" nil | required "No satellite image" }}
tag: {{ $piraeusImagesConfig | dig "components" "linstor-satellite" "tag" nil | required "No satellite tag" }}
{{- end -}}
{{- define "cozy.linstor.version.controller" -}}
{{- $version := (include "cozy.linstor.version" .) | fromYaml }}
{{- printf "%s/%s:%s" $version.base $version.controller.image $version.controller.tag }}
{{- end -}}
{{- define "cozy.linstor.version.satellite" -}}
{{- $version := (include "cozy.linstor.version" .) | fromYaml }}
{{- printf "%s/%s:%s" $version.base $version.satellite.image $version.satellite.tag }}
{{- end -}}

View File

@@ -27,10 +27,8 @@ spec:
podTemplate:
spec:
containers:
- name: linstor-controller
image: {{ .Values.piraeusServer.image.repository }}:{{ .Values.piraeusServer.image.tag }}
- name: plunger
image: {{ .Values.piraeusServer.image.repository }}:{{ .Values.piraeusServer.image.tag }}
image: {{ include "cozy.linstor.version.controller" . }}
command:
- "/scripts/plunger-controller.sh"
securityContext:

View File

@@ -13,7 +13,6 @@ spec:
hostNetwork: true
containers:
- name: linstor-satellite
image: {{ .Values.piraeusServer.image.repository }}:{{ .Values.piraeusServer.image.tag }}
securityContext:
# real-world installations need some debugging from time to time
readOnlyRootFilesystem: false

View File

@@ -11,7 +11,7 @@ spec:
spec:
containers:
- name: plunger
image: {{ .Values.piraeusServer.image.repository }}:{{ .Values.piraeusServer.image.tag }}
image: {{ include "cozy.linstor.version.satellite" . }}
command:
- "/scripts/plunger-satellite.sh"
securityContext:
@@ -48,7 +48,7 @@ spec:
name: script-volume
readOnly: true
- name: drbd-logger
image: {{ .Values.piraeusServer.image.repository }}:{{ .Values.piraeusServer.image.tag }}
image: {{ include "cozy.linstor.version.satellite" . }}
command:
- "/scripts/plunger-drbd-logger.sh"
securityContext:

View File

@@ -1,4 +1 @@
piraeusServer:
image:
repository: ghcr.io/cozystack/cozystack/piraeus-server
tag: latest@sha256:417532baa2801288147cd9ac9ae260751c1a7754f0b829725d09b72a770c111a

View File

@@ -14,10 +14,12 @@ spec:
labels:
cozystack.io/ui: "true"
internal.cozystack.io/tenantmodule: "true"
chartRef:
kind: OCIRepository
name: monitoring-rd
namespace: cozy-system
chart:
name: monitoring
sourceRef:
kind: HelmRepository
name: cozystack-extra
namespace: cozy-public
dashboard:
category: Administration
singular: Monitoring

View File

@@ -13,10 +13,12 @@ spec:
prefix: mysql-
labels:
cozystack.io/ui: "true"
chartRef:
kind: OCIRepository
name: mysql-rd
namespace: cozy-system
chart:
name: mysql
sourceRef:
kind: HelmRepository
name: cozystack-apps
namespace: cozy-public
dashboard:
category: PaaS
singular: MySQL

View File

@@ -13,10 +13,12 @@ spec:
prefix: nats-
labels:
cozystack.io/ui: "true"
chartRef:
kind: OCIRepository
name: nats-rd
namespace: cozy-system
chart:
name: nats
sourceRef:
kind: HelmRepository
name: cozystack-apps
namespace: cozy-public
dashboard:
category: PaaS
singular: NATS

View File

@@ -13,10 +13,12 @@ spec:
prefix: postgres-
labels:
cozystack.io/ui: "true"
chartRef:
kind: OCIRepository
name: postgres-rd
namespace: cozy-system
chart:
name: postgres
sourceRef:
kind: HelmRepository
name: cozystack-apps
namespace: cozy-public
dashboard:
category: PaaS
singular: PostgreSQL

View File

@@ -13,10 +13,12 @@ spec:
prefix: rabbitmq-
labels:
cozystack.io/ui: "true"
chartRef:
kind: OCIRepository
name: rabbitmq-rd
namespace: cozy-system
chart:
name: rabbitmq
sourceRef:
kind: HelmRepository
name: cozystack-apps
namespace: cozy-public
dashboard:
category: PaaS
singular: RabbitMQ

View File

@@ -13,10 +13,12 @@ spec:
prefix: redis-
labels:
cozystack.io/ui: "true"
chartRef:
kind: OCIRepository
name: redis-rd
namespace: cozy-system
chart:
name: redis
sourceRef:
kind: HelmRepository
name: cozystack-apps
namespace: cozy-public
dashboard:
category: PaaS
singular: Redis

View File

@@ -14,10 +14,12 @@ spec:
labels:
cozystack.io/ui: "true"
internal.cozystack.io/tenantmodule: "true"
chartRef:
kind: OCIRepository
name: seaweedfs-rd
namespace: cozy-system
chart:
name: seaweedfs
sourceRef:
kind: HelmRepository
name: cozystack-extra
namespace: cozy-public
dashboard:
category: Administration
singular: SeaweedFS

View File

@@ -13,10 +13,12 @@ spec:
prefix: tcp-balancer-
labels:
cozystack.io/ui: "true"
chartRef:
kind: OCIRepository
name: tcp-balancer-rd
namespace: cozy-system
chart:
name: tcp-balancer
sourceRef:
kind: HelmRepository
name: cozystack-apps
namespace: cozy-public
dashboard:
category: NaaS
singular: TCP Balancer

View File

@@ -13,10 +13,12 @@ spec:
prefix: tenant-
labels:
cozystack.io/ui: "true"
chartRef:
kind: OCIRepository
name: tenant-rd
namespace: cozy-system
chart:
name: tenant
sourceRef:
kind: HelmRepository
name: cozystack-apps
namespace: cozy-public
dashboard:
category: Administration
singular: Tenant

View File

@@ -13,10 +13,12 @@ spec:
prefix: virtual-machine-
labels:
cozystack.io/ui: "true"
chartRef:
kind: OCIRepository
name: virtual-machine-rd
namespace: cozy-system
chart:
name: virtual-machine
sourceRef:
kind: HelmRepository
name: cozystack-apps
namespace: cozy-public
dashboard:
category: IaaS
singular: Virtual Machine

View File

@@ -13,10 +13,12 @@ spec:
prefix: "virtualprivatecloud-"
labels:
cozystack.io/ui: "true"
chartRef:
kind: OCIRepository
name: virtualprivatecloud-rd
namespace: cozy-system
chart:
name: virtualprivatecloud
sourceRef:
kind: HelmRepository
name: cozystack-apps
namespace: cozy-public
dashboard:
category: IaaS
singular: VPC

View File

@@ -13,10 +13,12 @@ spec:
prefix: vm-disk-
labels:
cozystack.io/ui: "true"
chartRef:
kind: OCIRepository
name: vm-disk-rd
namespace: cozy-system
chart:
name: vm-disk
sourceRef:
kind: HelmRepository
name: cozystack-apps
namespace: cozy-public
dashboard:
category: IaaS
singular: VM Disk

View File

@@ -13,10 +13,12 @@ spec:
prefix: vm-instance-
labels:
cozystack.io/ui: "true"
chartRef:
kind: OCIRepository
name: vm-instance-rd
namespace: cozy-system
chart:
name: vm-instance
sourceRef:
kind: HelmRepository
name: cozystack-apps
namespace: cozy-public
dashboard:
category: IaaS
singular: VM Instance

View File

@@ -13,10 +13,12 @@ spec:
prefix: vpn-
labels:
cozystack.io/ui: "true"
chartRef:
kind: OCIRepository
name: vpn-rd
namespace: cozy-system
chart:
name: vpn
sourceRef:
kind: HelmRepository
name: cozystack-apps
namespace: cozy-public
dashboard:
category: NaaS
singular: VPN

View File

@@ -169,10 +169,13 @@ func (o *CozyServerOptions) Complete() error {
Release: config.ReleaseConfig{
Prefix: crd.Spec.Release.Prefix,
Labels: crd.Spec.Release.Labels,
ChartRef: config.ChartRefConfig{
Kind: crd.Spec.Release.ChartRef.Kind,
Name: crd.Spec.Release.ChartRef.Name,
Namespace: crd.Spec.Release.ChartRef.Namespace,
Chart: config.ChartConfig{
Name: crd.Spec.Release.Chart.Name,
SourceRef: config.SourceRefConfig{
Kind: crd.Spec.Release.Chart.SourceRef.Kind,
Name: crd.Spec.Release.Chart.SourceRef.Name,
Namespace: crd.Spec.Release.Chart.SourceRef.Namespace,
},
},
},
}

View File

@@ -38,13 +38,19 @@ type ApplicationConfig struct {
// ReleaseConfig contains the release settings.
type ReleaseConfig struct {
Prefix string `yaml:"prefix"`
Labels map[string]string `yaml:"labels"`
ChartRef ChartRefConfig `yaml:"chartRef"`
Prefix string `yaml:"prefix"`
Labels map[string]string `yaml:"labels"`
Chart ChartConfig `yaml:"chart"`
}
// ChartRefConfig references a Flux source artifact for the Helm chart.
type ChartRefConfig struct {
// ChartConfig contains the chart settings.
type ChartConfig struct {
Name string `yaml:"name"`
SourceRef SourceRefConfig `yaml:"sourceRef"`
}
// SourceRefConfig contains the reference to the chart source.
type SourceRefConfig struct {
Kind string `yaml:"kind"`
Name string `yaml:"name"`
Namespace string `yaml:"namespace"`

View File

@@ -2714,13 +2714,6 @@ func schema_pkg_apis_meta_v1_DeleteOptions(ref common.ReferenceCallback) common.
},
},
},
"ignoreStoreReadErrorWithClusterBreakingPotential": {
SchemaProps: spec.SchemaProps{
Description: "if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it",
Type: []string{"boolean"},
Format: "",
},
},
},
},
},
@@ -4608,46 +4601,16 @@ func schema_k8sio_apimachinery_pkg_version_Info(ref common.ReferenceCallback) co
Properties: map[string]spec.Schema{
"major": {
SchemaProps: spec.SchemaProps{
Description: "Major is the major version of the binary version",
Default: "",
Type: []string{"string"},
Format: "",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"minor": {
SchemaProps: spec.SchemaProps{
Description: "Minor is the minor version of the binary version",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"emulationMajor": {
SchemaProps: spec.SchemaProps{
Description: "EmulationMajor is the major version of the emulation version",
Type: []string{"string"},
Format: "",
},
},
"emulationMinor": {
SchemaProps: spec.SchemaProps{
Description: "EmulationMinor is the minor version of the emulation version",
Type: []string{"string"},
Format: "",
},
},
"minCompatibilityMajor": {
SchemaProps: spec.SchemaProps{
Description: "MinCompatibilityMajor is the major version of the minimum compatibility version",
Type: []string{"string"},
Format: "",
},
},
"minCompatibilityMinor": {
SchemaProps: spec.SchemaProps{
Description: "MinCompatibilityMinor is the minor version of the minimum compatibility version",
Type: []string{"string"},
Format: "",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"gitVersion": {

View File

@@ -22,11 +22,6 @@ const (
HRLabel = "helm.toolkit.fluxcd.io/name"
)
// AppMapper maps HelmRelease to application metadata.
type AppMapper interface {
Map(*helmv2.HelmRelease) (apiVersion, kind, prefix string, err error)
}
type ObjectID struct {
APIVersion string
Kind string

View File

@@ -4,10 +4,8 @@ import (
"context"
"fmt"
"os"
"strings"
"testing"
helmv2 "github.com/fluxcd/helm-controller/api/v2"
"github.com/go-logr/logr"
"github.com/go-logr/zapr"
"go.uber.org/zap"
@@ -43,41 +41,12 @@ func init() {
ctx = logr.NewContext(context.Background(), l)
}
// labelsMapper implements AppMapper using HelmRelease labels.
type labelsMapper struct{}
func (m *labelsMapper) Map(hr *helmv2.HelmRelease) (string, string, string, error) {
if hr.Labels == nil {
return "", "", "", fmt.Errorf("cannot map helm release %s/%s: labels are nil", hr.Namespace, hr.Name)
}
appKind, ok := hr.Labels["apps.cozystack.io/application.kind"]
if !ok {
return "", "", "", fmt.Errorf("cannot map helm release %s/%s: missing application.kind label", hr.Namespace, hr.Name)
}
appGroup, ok := hr.Labels["apps.cozystack.io/application.group"]
if !ok {
return "", "", "", fmt.Errorf("cannot map helm release %s/%s: missing application.group label", hr.Namespace, hr.Name)
}
appName, ok := hr.Labels["apps.cozystack.io/application.name"]
if !ok {
return "", "", "", fmt.Errorf("cannot map helm release %s/%s: missing application.name label", hr.Namespace, hr.Name)
}
apiVersion := fmt.Sprintf("%s/v1alpha1", appGroup)
prefix := strings.TrimSuffix(hr.Name, appName)
return apiVersion, appKind, prefix, nil
}
func TestWalkingOwnershipGraph(t *testing.T) {
obj, err := dynClient.Resource(schema.GroupVersionResource{"", "v1", "pods"}).Namespace(os.Args[1]).Get(ctx, os.Args[2], metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
nodes := WalkOwnershipGraph(ctx, dynClient, mapper, &labelsMapper{}, obj)
nodes := WalkOwnershipGraph(ctx, dynClient, mapper, &stubMapper{}, obj)
for _, node := range nodes {
fmt.Printf("%#v\n", node)
}

49
pkg/lineage/mapper.go Normal file
View File

@@ -0,0 +1,49 @@
package lineage
import (
"fmt"
"strings"
helmv2 "github.com/fluxcd/helm-controller/api/v2"
)
type AppMapper interface {
Map(*helmv2.HelmRelease) (apiVersion, kind, prefix string, err error)
}
type stubMapper struct{}
var stubMapperMap = map[string]string{
"cozystack-extra/bootbox": "apps.cozystack.io/v1alpha1/BootBox/",
"cozystack-apps/bucket": "apps.cozystack.io/v1alpha1/Bucket/bucket-",
"cozystack-apps/clickhouse": "apps.cozystack.io/v1alpha1/ClickHouse/clickhouse-",
"cozystack-extra/etcd": "apps.cozystack.io/v1alpha1/Etcd/",
"cozystack-apps/ferretdb": "apps.cozystack.io/v1alpha1/FerretDB/ferretdb-",
"cozystack-apps/http-cache": "apps.cozystack.io/v1alpha1/HTTPCache/http-cache-",
"cozystack-extra/info": "apps.cozystack.io/v1alpha1/Info/",
"cozystack-extra/ingress": "apps.cozystack.io/v1alpha1/Ingress/",
"cozystack-apps/kafka": "apps.cozystack.io/v1alpha1/Kafka/kafka-",
"cozystack-apps/kubernetes": "apps.cozystack.io/v1alpha1/Kubernetes/kubernetes-",
"cozystack-extra/monitoring": "apps.cozystack.io/v1alpha1/Monitoring/",
"cozystack-apps/mysql": "apps.cozystack.io/v1alpha1/MySQL/mysql-",
"cozystack-apps/nats": "apps.cozystack.io/v1alpha1/NATS/nats-",
"cozystack-apps/postgres": "apps.cozystack.io/v1alpha1/Postgres/postgres-",
"cozystack-apps/rabbitmq": "apps.cozystack.io/v1alpha1/RabbitMQ/rabbitmq-",
"cozystack-apps/redis": "apps.cozystack.io/v1alpha1/Redis/redis-",
"cozystack-extra/seaweedfs": "apps.cozystack.io/v1alpha1/SeaweedFS/",
"cozystack-apps/tcp-balancer": "apps.cozystack.io/v1alpha1/TCPBalancer/tcp-balancer-",
"cozystack-apps/tenant": "apps.cozystack.io/v1alpha1/Tenant/tenant-",
"cozystack-apps/virtual-machine": "apps.cozystack.io/v1alpha1/VirtualMachine/virtual-machine-",
"cozystack-apps/vm-disk": "apps.cozystack.io/v1alpha1/VMDisk/vm-disk-",
"cozystack-apps/vm-instance": "apps.cozystack.io/v1alpha1/VMInstance/vm-instance-",
"cozystack-apps/vpn": "apps.cozystack.io/v1alpha1/VPN/vpn-",
}
func (s *stubMapper) Map(hr *helmv2.HelmRelease) (string, string, string, error) {
val, ok := stubMapperMap[hr.Spec.Chart.Spec.SourceRef.Name+"/"+hr.Spec.Chart.Spec.Chart]
if !ok {
return "", "", "", fmt.Errorf("cannot map helm release %s/%s to dynamic app", hr.Namespace, hr.Name)
}
split := strings.Split(val, "/")
return strings.Join(split[:2], "/"), split[2], split[3], nil
}

View File

@@ -963,10 +963,17 @@ func (r *REST) convertApplicationToHelmRelease(app *appsv1alpha1.Application) (*
UID: app.UID,
},
Spec: helmv2.HelmReleaseSpec{
ChartRef: &helmv2.CrossNamespaceSourceReference{
Kind: r.releaseConfig.ChartRef.Kind,
Name: r.releaseConfig.ChartRef.Name,
Namespace: r.releaseConfig.ChartRef.Namespace,
Chart: &helmv2.HelmChartTemplate{
Spec: helmv2.HelmChartTemplateSpec{
Chart: r.releaseConfig.Chart.Name,
Version: ">= 0.0.0-0",
ReconcileStrategy: "Revision",
SourceRef: helmv2.CrossNamespaceObjectReference{
Kind: r.releaseConfig.Chart.SourceRef.Kind,
Name: r.releaseConfig.Chart.SourceRef.Name,
Namespace: r.releaseConfig.Chart.SourceRef.Namespace,
},
},
},
Interval: metav1.Duration{Duration: 5 * time.Minute},
Install: &helmv2.Install{
@@ -1078,19 +1085,11 @@ func (r *REST) buildTableFromApplication(app appsv1alpha1.Application) metav1.Ta
return table
}
// getVersion extracts and returns only the revision from the version string
// If version is in format "0.1.4+abcdef", returns "abcdef"
// Otherwise returns the original string or "<unknown>" if empty
// getVersion returns the application version or a placeholder if unknown
func getVersion(version string) string {
if version == "" {
return "<unknown>"
}
// Check if version contains "+" separator
if idx := strings.LastIndex(version, "+"); idx >= 0 && idx < len(version)-1 {
// Return only the part after "+"
return version[idx+1:]
}
// If no "+" found, return original version
return version
}

View File

@@ -20,7 +20,6 @@ import (
"context"
"fmt"
"net/http"
"strings"
"sync"
"time"
@@ -667,19 +666,11 @@ func (r *REST) buildTableFromTenantModule(module corev1alpha1.TenantModule) meta
return table
}
// getVersion extracts and returns only the revision from the version string
// If version is in format "0.1.4+abcdef", returns "abcdef"
// Otherwise returns the original string or "<unknown>" if empty
// getVersion returns the module version or a placeholder if unknown
func getVersion(version string) string {
if version == "" {
return "<unknown>"
}
// Check if version contains "+" separator
if idx := strings.LastIndex(version, "+"); idx >= 0 && idx < len(version)-1 {
// Return only the part after "+"
return version[idx+1:]
}
// If no "+" found, return original version
return version
}

71
scripts/installer.sh Executable file
View File

@@ -0,0 +1,71 @@
#!/bin/sh
set -o pipefail
set -e
BUNDLE=$(set -x; kubectl get configmap -n cozy-system cozystack -o 'go-template={{index .data "bundle-name"}}')
VERSION=$(find scripts/migrations -mindepth 1 -maxdepth 1 -type f | sort -V | awk -F/ 'END {print $NF+1}')
run_migrations() {
if ! kubectl get configmap -n cozy-system cozystack-version; then
kubectl create configmap -n cozy-system cozystack-version --from-literal=version="$VERSION" --dry-run=client -o yaml | kubectl create -f-
return
fi
current_version=$(kubectl get configmap -n cozy-system cozystack-version -o jsonpath='{.data.version}') || true
until [ "$current_version" = "$VERSION" ]; do
echo "run migration: $current_version --> $VERSION"
chmod +x scripts/migrations/$current_version
scripts/migrations/$current_version
current_version=$(kubectl get configmap -n cozy-system cozystack-version -o jsonpath='{.data.version}')
done
}
install_flux() {
if [ "$INSTALL_FLUX" != "true" ]; then
return
fi
make -C packages/core/flux-aio apply
wait_for_crds helmreleases.helm.toolkit.fluxcd.io helmrepositories.source.toolkit.fluxcd.io
}
wait_for_crds() {
timeout 60 sh -c "until kubectl get crd $*; do sleep 1; done"
}
cd "$(dirname "$0")/.."
# Run migrations
run_migrations
# Install namespaces
make -C packages/core/platform namespaces-apply
# Install fluxcd
install_flux
# Install fluxcd certificates
./scripts/issue-flux-certificates.sh
# Install platform chart
make -C packages/core/platform reconcile
# Reconcile Helm repositories
kubectl annotate helmrepositories.source.toolkit.fluxcd.io -A -l cozystack.io/repository reconcile.fluxcd.io/requestedAt=$(date +"%Y-%m-%dT%H:%M:%SZ") --overwrite
# Unsuspend all Cozystack managed charts
kubectl get hr -A -o go-template='{{ range .items }}{{ if .spec.suspend }}{{ .spec.chart.spec.sourceRef.namespace }}/{{ .spec.chart.spec.sourceRef.name }} {{ .metadata.namespace }} {{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' | while read repo namespace name; do
case "$repo" in
cozy-system/cozystack-system|cozy-public/cozystack-extra|cozy-public/cozystack-apps)
kubectl patch hr -n "$namespace" "$name" -p '{"spec": {"suspend": null}}' --type=merge --field-manager=flux-client-side-apply
;;
esac
done
# Update all Cozystack managed charts to latest version
kubectl get hr -A -l cozystack.io/ui=true --no-headers | awk '{print "kubectl patch helmrelease -n " $1 " " $2 " --type=merge -p '\''{\"spec\":{\"chart\":{\"spec\":{\"version\":\">= 0.0.0-0\"}}}}'\'' "}' | sh -x
# Reconcile platform chart
trap 'exit' INT TERM
while true; do
sleep 60 & wait
make -C packages/core/platform reconcile
done

Some files were not shown because too many files have changed in this diff Show More