mirror of
https://github.com/cozystack/cozystack.git
synced 2026-03-03 21:48:57 +00:00
Compare commits
6 Commits
fix/migrat
...
feat/cozyc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
488b1bf27b | ||
|
|
f4e9660b43 | ||
|
|
d9cfd5ac9e | ||
|
|
5762ac4139 | ||
|
|
38f446c0d3 | ||
|
|
4de8e91864 |
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@@ -1 +1 @@
|
||||
* @kvaps @lllamnyp @lexfrei @androndo @IvanHunters @sircthulhu
|
||||
* @kvaps @lllamnyp @lexfrei @androndo @IvanHunters
|
||||
|
||||
15
Makefile
15
Makefile
@@ -58,7 +58,10 @@ manifests:
|
||||
cozypkg:
|
||||
go build -ldflags "-X github.com/cozystack/cozystack/cmd/cozypkg/cmd.Version=v$(COZYSTACK_VERSION)" -o _out/bin/cozypkg ./cmd/cozypkg
|
||||
|
||||
assets: assets-talos assets-cozypkg
|
||||
cozyctl:
|
||||
go build -ldflags "-X github.com/cozystack/cozystack/cmd/cozyctl/cmd.Version=v$(COZYSTACK_VERSION)" -o _out/bin/cozyctl ./cmd/cozyctl
|
||||
|
||||
assets: assets-talos assets-cozypkg assets-cozyctl
|
||||
|
||||
assets-talos:
|
||||
make -C packages/core/talos assets
|
||||
@@ -73,6 +76,16 @@ assets-cozypkg-%:
|
||||
cp LICENSE _out/bin/cozypkg-$*/LICENSE
|
||||
tar -C _out/bin/cozypkg-$* -czf _out/assets/cozypkg-$*.tar.gz LICENSE cozypkg$(EXT)
|
||||
|
||||
assets-cozyctl: assets-cozyctl-linux-amd64 assets-cozyctl-linux-arm64 assets-cozyctl-darwin-amd64 assets-cozyctl-darwin-arm64 assets-cozyctl-windows-amd64 assets-cozyctl-windows-arm64
|
||||
(cd _out/assets/ && sha256sum cozyctl-*.tar.gz) > _out/assets/cozyctl-checksums.txt
|
||||
|
||||
assets-cozyctl-%:
|
||||
$(eval EXT := $(if $(filter windows,$(firstword $(subst -, ,$*))),.exe,))
|
||||
mkdir -p _out/assets
|
||||
GOOS=$(firstword $(subst -, ,$*)) GOARCH=$(lastword $(subst -, ,$*)) go build -ldflags "-X github.com/cozystack/cozystack/cmd/cozyctl/cmd.Version=v$(COZYSTACK_VERSION)" -o _out/bin/cozyctl-$*/cozyctl$(EXT) ./cmd/cozyctl
|
||||
cp LICENSE _out/bin/cozyctl-$*/LICENSE
|
||||
tar -C _out/bin/cozyctl-$* -czf _out/assets/cozyctl-$*.tar.gz LICENSE cozyctl$(EXT)
|
||||
|
||||
test:
|
||||
make -C packages/core/testing apply
|
||||
make -C packages/core/testing test
|
||||
|
||||
114
cmd/cozyctl/cmd/client.go
Normal file
114
cmd/cozyctl/cmd/client.go
Normal file
@@ -0,0 +1,114 @@
|
||||
/*
|
||||
Copyright 2025 The Cozystack Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/client-go/dynamic"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
func buildRestConfig() (*rest.Config, error) {
|
||||
rules := clientcmd.NewDefaultClientConfigLoadingRules()
|
||||
if globalFlags.kubeconfig != "" {
|
||||
rules.ExplicitPath = globalFlags.kubeconfig
|
||||
}
|
||||
overrides := &clientcmd.ConfigOverrides{}
|
||||
if globalFlags.context != "" {
|
||||
overrides.CurrentContext = globalFlags.context
|
||||
}
|
||||
config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, overrides).ClientConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load kubeconfig: %w", err)
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
|
||||
func newScheme() *runtime.Scheme {
|
||||
scheme := runtime.NewScheme()
|
||||
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
|
||||
utilruntime.Must(cozyv1alpha1.AddToScheme(scheme))
|
||||
return scheme
|
||||
}
|
||||
|
||||
func newClients() (client.Client, dynamic.Interface, error) {
|
||||
config, err := buildRestConfig()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
scheme := newScheme()
|
||||
|
||||
typedClient, err := client.New(config, client.Options{Scheme: scheme})
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create k8s client: %w", err)
|
||||
}
|
||||
|
||||
dynClient, err := dynamic.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create dynamic client: %w", err)
|
||||
}
|
||||
|
||||
return typedClient, dynClient, nil
|
||||
}
|
||||
|
||||
func getNamespace() (string, error) {
|
||||
if globalFlags.namespace != "" {
|
||||
return globalFlags.namespace, nil
|
||||
}
|
||||
|
||||
rules := clientcmd.NewDefaultClientConfigLoadingRules()
|
||||
if globalFlags.kubeconfig != "" {
|
||||
rules.ExplicitPath = globalFlags.kubeconfig
|
||||
}
|
||||
overrides := &clientcmd.ConfigOverrides{}
|
||||
if globalFlags.context != "" {
|
||||
overrides.CurrentContext = globalFlags.context
|
||||
}
|
||||
|
||||
clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, overrides)
|
||||
ns, _, err := clientConfig.Namespace()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to determine namespace: %w", err)
|
||||
}
|
||||
if ns == "" {
|
||||
ns = "default"
|
||||
}
|
||||
return ns, nil
|
||||
}
|
||||
|
||||
// getRestConfig is a convenience function when only the rest.Config is needed
|
||||
// (used by buildRestConfig but also available for other callers).
|
||||
func getRestConfig() (*rest.Config, error) {
|
||||
if globalFlags.kubeconfig != "" || globalFlags.context != "" {
|
||||
return buildRestConfig()
|
||||
}
|
||||
config, err := ctrl.GetConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get kubeconfig: %w", err)
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
43
cmd/cozyctl/cmd/console.go
Normal file
43
cmd/cozyctl/cmd/console.go
Normal file
@@ -0,0 +1,43 @@
|
||||
/*
|
||||
Copyright 2025 The Cozystack Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var consoleCmd = &cobra.Command{
|
||||
Use: "console <type> <name>",
|
||||
Short: "Open a serial console to a VirtualMachine",
|
||||
Long: `Open a serial console to a VirtualMachine using virtctl. Only valid for VirtualMachine or VMInstance kinds.`,
|
||||
Args: cobra.ExactArgs(2),
|
||||
RunE: runConsole,
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(consoleCmd)
|
||||
}
|
||||
|
||||
func runConsole(cmd *cobra.Command, args []string) error {
|
||||
vmName, ns, err := resolveVMArgs(args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
virtctlArgs := []string{"virtctl", "console", vmName, "-n", ns}
|
||||
return execVirtctl(virtctlArgs)
|
||||
}
|
||||
112
cmd/cozyctl/cmd/discovery.go
Normal file
112
cmd/cozyctl/cmd/discovery.go
Normal file
@@ -0,0 +1,112 @@
|
||||
/*
|
||||
Copyright 2025 The Cozystack Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
// AppDefInfo holds resolved information about an ApplicationDefinition.
|
||||
type AppDefInfo struct {
|
||||
Name string // e.g. "postgres"
|
||||
Kind string // e.g. "Postgres"
|
||||
Plural string // e.g. "postgreses"
|
||||
Singular string // e.g. "postgres"
|
||||
Prefix string // e.g. "postgres-"
|
||||
IsModule bool
|
||||
}
|
||||
|
||||
// AppDefRegistry provides fast lookup of ApplicationDefinitions by plural, singular, or kind.
|
||||
type AppDefRegistry struct {
|
||||
byPlural map[string]*AppDefInfo
|
||||
bySingular map[string]*AppDefInfo
|
||||
byKind map[string]*AppDefInfo
|
||||
all []*AppDefInfo
|
||||
}
|
||||
|
||||
// discoverAppDefs lists all ApplicationDefinitions from the cluster and builds a registry.
|
||||
func discoverAppDefs(ctx context.Context, typedClient client.Client) (*AppDefRegistry, error) {
|
||||
var list cozyv1alpha1.ApplicationDefinitionList
|
||||
if err := typedClient.List(ctx, &list); err != nil {
|
||||
return nil, fmt.Errorf("failed to list ApplicationDefinitions: %w", err)
|
||||
}
|
||||
|
||||
reg := &AppDefRegistry{
|
||||
byPlural: make(map[string]*AppDefInfo),
|
||||
bySingular: make(map[string]*AppDefInfo),
|
||||
byKind: make(map[string]*AppDefInfo),
|
||||
}
|
||||
|
||||
for i := range list.Items {
|
||||
ad := &list.Items[i]
|
||||
info := &AppDefInfo{
|
||||
Name: ad.Name,
|
||||
Kind: ad.Spec.Application.Kind,
|
||||
Plural: ad.Spec.Application.Plural,
|
||||
Singular: ad.Spec.Application.Singular,
|
||||
Prefix: ad.Spec.Release.Prefix,
|
||||
IsModule: ad.Spec.Dashboard != nil && ad.Spec.Dashboard.Module,
|
||||
}
|
||||
reg.all = append(reg.all, info)
|
||||
reg.byPlural[strings.ToLower(info.Plural)] = info
|
||||
reg.bySingular[strings.ToLower(info.Singular)] = info
|
||||
reg.byKind[strings.ToLower(info.Kind)] = info
|
||||
}
|
||||
|
||||
return reg, nil
|
||||
}
|
||||
|
||||
// Resolve looks up an AppDefInfo by name (case-insensitive), checking plural, singular, then kind.
|
||||
func (r *AppDefRegistry) Resolve(name string) *AppDefInfo {
|
||||
lower := strings.ToLower(name)
|
||||
if info, ok := r.byPlural[lower]; ok {
|
||||
return info
|
||||
}
|
||||
if info, ok := r.bySingular[lower]; ok {
|
||||
return info
|
||||
}
|
||||
if info, ok := r.byKind[lower]; ok {
|
||||
return info
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ResolveModule looks up an AppDefInfo among modules only.
|
||||
func (r *AppDefRegistry) ResolveModule(name string) *AppDefInfo {
|
||||
lower := strings.ToLower(name)
|
||||
for _, info := range r.all {
|
||||
if !info.IsModule {
|
||||
continue
|
||||
}
|
||||
if strings.ToLower(info.Plural) == lower ||
|
||||
strings.ToLower(info.Singular) == lower ||
|
||||
strings.ToLower(info.Kind) == lower {
|
||||
return info
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// All returns all discovered AppDefInfo entries.
|
||||
func (r *AppDefRegistry) All() []*AppDefInfo {
|
||||
return r.all
|
||||
}
|
||||
361
cmd/cozyctl/cmd/get.go
Normal file
361
cmd/cozyctl/cmd/get.go
Normal file
@@ -0,0 +1,361 @@
|
||||
/*
|
||||
Copyright 2025 The Cozystack Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
appsv1alpha1 "github.com/cozystack/cozystack/pkg/apis/apps/v1alpha1"
|
||||
corev1alpha1 "github.com/cozystack/cozystack/pkg/apis/core/v1alpha1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
var getCmdFlags struct {
|
||||
target string
|
||||
}
|
||||
|
||||
var getCmd = &cobra.Command{
|
||||
Use: "get <type> [name]",
|
||||
Short: "Display one or many resources",
|
||||
Long: `Display one or many resources.
|
||||
|
||||
Built-in types:
|
||||
ns, namespaces Tenant namespaces (cluster-scoped)
|
||||
modules Tenant modules
|
||||
pvc, pvcs PersistentVolumeClaims
|
||||
|
||||
Sub-resource types (use -t to filter by parent application):
|
||||
secrets Secrets
|
||||
services, svc Services
|
||||
ingresses, ing Ingresses
|
||||
workloads WorkloadMonitors
|
||||
|
||||
Application types are discovered dynamically from ApplicationDefinitions.
|
||||
Use -t type/name to filter sub-resources by a specific application.`,
|
||||
Args: cobra.RangeArgs(1, 2),
|
||||
RunE: runGet,
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(getCmd)
|
||||
getCmd.Flags().StringVarP(&getCmdFlags.target, "target", "t", "", "Filter sub-resources by application type/name")
|
||||
}
|
||||
|
||||
func runGet(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
resourceType := args[0]
|
||||
var resourceName string
|
||||
if len(args) > 1 {
|
||||
resourceName = args[1]
|
||||
}
|
||||
|
||||
switch strings.ToLower(resourceType) {
|
||||
case "ns", "namespace", "namespaces":
|
||||
return getNamespaces(ctx, resourceName)
|
||||
case "module", "modules":
|
||||
return getModules(ctx, resourceName)
|
||||
case "pvc", "pvcs", "persistentvolumeclaim", "persistentvolumeclaims":
|
||||
return getPVCs(ctx, resourceName)
|
||||
case "secret", "secrets":
|
||||
return getSubResources(ctx, "secrets", resourceName)
|
||||
case "service", "services", "svc":
|
||||
return getSubResources(ctx, "services", resourceName)
|
||||
case "ingress", "ingresses", "ing":
|
||||
return getSubResources(ctx, "ingresses", resourceName)
|
||||
case "workload", "workloads":
|
||||
return getSubResources(ctx, "workloads", resourceName)
|
||||
default:
|
||||
return getApplications(ctx, resourceType, resourceName)
|
||||
}
|
||||
}
|
||||
|
||||
func getNamespaces(ctx context.Context, name string) error {
|
||||
_, dynClient, err := newClients()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gvr := schema.GroupVersionResource{Group: "core.cozystack.io", Version: "v1alpha1", Resource: "tenantnamespaces"}
|
||||
|
||||
if name != "" {
|
||||
item, err := dynClient.Resource(gvr).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get namespace %q: %w", name, err)
|
||||
}
|
||||
printNamespaces([]unstructured.Unstructured{*item})
|
||||
return nil
|
||||
}
|
||||
|
||||
list, err := dynClient.Resource(gvr).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list namespaces: %w", err)
|
||||
}
|
||||
if len(list.Items) == 0 {
|
||||
printNoResources(os.Stderr, "namespaces")
|
||||
return nil
|
||||
}
|
||||
printNamespaces(list.Items)
|
||||
return nil
|
||||
}
|
||||
|
||||
func getModules(ctx context.Context, name string) error {
|
||||
_, dynClient, err := newClients()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ns, err := getNamespace()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gvr := schema.GroupVersionResource{Group: "core.cozystack.io", Version: "v1alpha1", Resource: "tenantmodules"}
|
||||
|
||||
if name != "" {
|
||||
item, err := dynClient.Resource(gvr).Namespace(ns).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get module %q: %w", name, err)
|
||||
}
|
||||
printModules([]unstructured.Unstructured{*item})
|
||||
return nil
|
||||
}
|
||||
|
||||
list, err := dynClient.Resource(gvr).Namespace(ns).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list modules: %w", err)
|
||||
}
|
||||
if len(list.Items) == 0 {
|
||||
printNoResources(os.Stderr, "modules")
|
||||
return nil
|
||||
}
|
||||
printModules(list.Items)
|
||||
return nil
|
||||
}
|
||||
|
||||
func getPVCs(ctx context.Context, name string) error {
|
||||
_, dynClient, err := newClients()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ns, err := getNamespace()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gvr := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "persistentvolumeclaims"}
|
||||
|
||||
if name != "" {
|
||||
item, err := dynClient.Resource(gvr).Namespace(ns).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get PVC %q: %w", name, err)
|
||||
}
|
||||
printPVCs([]unstructured.Unstructured{*item})
|
||||
return nil
|
||||
}
|
||||
|
||||
list, err := dynClient.Resource(gvr).Namespace(ns).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list PVCs: %w", err)
|
||||
}
|
||||
if len(list.Items) == 0 {
|
||||
printNoResources(os.Stderr, "PVCs")
|
||||
return nil
|
||||
}
|
||||
printPVCs(list.Items)
|
||||
return nil
|
||||
}
|
||||
|
||||
func getSubResources(ctx context.Context, subType string, name string) error {
|
||||
typedClient, dynClient, err := newClients()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ns, err := getNamespace()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
labelSelector, err := buildSubResourceSelector(ctx, typedClient, getCmdFlags.target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch subType {
|
||||
case "secrets":
|
||||
return getFilteredSecrets(ctx, dynClient, ns, name, labelSelector)
|
||||
case "services":
|
||||
return getFilteredServices(ctx, dynClient, ns, name, labelSelector)
|
||||
case "ingresses":
|
||||
return getFilteredIngresses(ctx, dynClient, ns, name, labelSelector)
|
||||
case "workloads":
|
||||
return getFilteredWorkloads(ctx, dynClient, ns, name, labelSelector)
|
||||
default:
|
||||
return fmt.Errorf("unknown sub-resource type: %s", subType)
|
||||
}
|
||||
}
|
||||
|
||||
func buildSubResourceSelector(ctx context.Context, typedClient client.Client, target string) (string, error) {
|
||||
var selectors []string
|
||||
|
||||
if target == "" {
|
||||
selectors = append(selectors, corev1alpha1.TenantResourceLabelKey+"="+corev1alpha1.TenantResourceLabelValue)
|
||||
return strings.Join(selectors, ","), nil
|
||||
}
|
||||
|
||||
parts := strings.SplitN(target, "/", 2)
|
||||
if len(parts) != 2 {
|
||||
return "", fmt.Errorf("invalid target format %q, expected type/name", target)
|
||||
}
|
||||
targetType, targetName := parts[0], parts[1]
|
||||
|
||||
// Discover ApplicationDefinitions to resolve the target type
|
||||
registry, err := discoverAppDefs(ctx, typedClient)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Check if this is a module reference
|
||||
if strings.ToLower(targetType) == "module" {
|
||||
info := registry.ResolveModule(targetName)
|
||||
if info == nil {
|
||||
return "", fmt.Errorf("unknown module %q", targetName)
|
||||
}
|
||||
selectors = append(selectors,
|
||||
appsv1alpha1.ApplicationKindLabel+"="+info.Kind,
|
||||
appsv1alpha1.ApplicationNameLabel+"="+targetName,
|
||||
corev1alpha1.TenantResourceLabelKey+"="+corev1alpha1.TenantResourceLabelValue,
|
||||
)
|
||||
return strings.Join(selectors, ","), nil
|
||||
}
|
||||
|
||||
info := registry.Resolve(targetType)
|
||||
if info == nil {
|
||||
return "", fmt.Errorf("unknown application type %q", targetType)
|
||||
}
|
||||
|
||||
selectors = append(selectors,
|
||||
appsv1alpha1.ApplicationKindLabel+"="+info.Kind,
|
||||
appsv1alpha1.ApplicationNameLabel+"="+targetName,
|
||||
corev1alpha1.TenantResourceLabelKey+"="+corev1alpha1.TenantResourceLabelValue,
|
||||
)
|
||||
return strings.Join(selectors, ","), nil
|
||||
}
|
||||
|
||||
func getFilteredSecrets(ctx context.Context, dynClient dynamic.Interface, ns, name, labelSelector string) error {
|
||||
gvr := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"}
|
||||
return getFilteredResources(ctx, dynClient, gvr, ns, name, labelSelector, "secrets", printSecrets)
|
||||
}
|
||||
|
||||
func getFilteredServices(ctx context.Context, dynClient dynamic.Interface, ns, name, labelSelector string) error {
|
||||
gvr := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"}
|
||||
return getFilteredResources(ctx, dynClient, gvr, ns, name, labelSelector, "services", printServices)
|
||||
}
|
||||
|
||||
func getFilteredIngresses(ctx context.Context, dynClient dynamic.Interface, ns, name, labelSelector string) error {
|
||||
gvr := schema.GroupVersionResource{Group: "networking.k8s.io", Version: "v1", Resource: "ingresses"}
|
||||
return getFilteredResources(ctx, dynClient, gvr, ns, name, labelSelector, "ingresses", printIngresses)
|
||||
}
|
||||
|
||||
func getFilteredWorkloads(ctx context.Context, dynClient dynamic.Interface, ns, name, labelSelector string) error {
|
||||
gvr := schema.GroupVersionResource{Group: "cozystack.io", Version: "v1alpha1", Resource: "workloadmonitors"}
|
||||
return getFilteredResources(ctx, dynClient, gvr, ns, name, labelSelector, "workloads", printWorkloads)
|
||||
}
|
||||
|
||||
func getFilteredResources(
|
||||
ctx context.Context,
|
||||
dynClient dynamic.Interface,
|
||||
gvr schema.GroupVersionResource,
|
||||
ns, name, labelSelector string,
|
||||
typeName string,
|
||||
printer func([]unstructured.Unstructured),
|
||||
) error {
|
||||
if name != "" {
|
||||
item, err := dynClient.Resource(gvr).Namespace(ns).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get %s %q: %w", typeName, name, err)
|
||||
}
|
||||
printer([]unstructured.Unstructured{*item})
|
||||
return nil
|
||||
}
|
||||
|
||||
list, err := dynClient.Resource(gvr).Namespace(ns).List(ctx, metav1.ListOptions{
|
||||
LabelSelector: labelSelector,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list %s: %w", typeName, err)
|
||||
}
|
||||
if len(list.Items) == 0 {
|
||||
printNoResources(os.Stderr, typeName)
|
||||
return nil
|
||||
}
|
||||
printer(list.Items)
|
||||
return nil
|
||||
}
|
||||
|
||||
func getApplications(ctx context.Context, resourceType, name string) error {
|
||||
typedClient, dynClient, err := newClients()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ns, err := getNamespace()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
registry, err := discoverAppDefs(ctx, typedClient)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
info := registry.Resolve(resourceType)
|
||||
if info == nil {
|
||||
return fmt.Errorf("unknown resource type %q\nUse 'cozyctl get --help' for available types", resourceType)
|
||||
}
|
||||
|
||||
gvr := schema.GroupVersionResource{Group: "apps.cozystack.io", Version: "v1alpha1", Resource: info.Plural}
|
||||
|
||||
if name != "" {
|
||||
item, err := dynClient.Resource(gvr).Namespace(ns).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get %s %q: %w", info.Singular, name, err)
|
||||
}
|
||||
printApplications([]unstructured.Unstructured{*item})
|
||||
return nil
|
||||
}
|
||||
|
||||
list, err := dynClient.Resource(gvr).Namespace(ns).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list %s: %w", info.Plural, err)
|
||||
}
|
||||
if len(list.Items) == 0 {
|
||||
printNoResources(os.Stderr, info.Plural)
|
||||
return nil
|
||||
}
|
||||
printApplications(list.Items)
|
||||
return nil
|
||||
}
|
||||
43
cmd/cozyctl/cmd/migrate.go
Normal file
43
cmd/cozyctl/cmd/migrate.go
Normal file
@@ -0,0 +1,43 @@
|
||||
/*
|
||||
Copyright 2025 The Cozystack Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var migrateCmd = &cobra.Command{
|
||||
Use: "migrate <type> <name>",
|
||||
Short: "Live-migrate a VirtualMachine to another node",
|
||||
Long: `Live-migrate a VirtualMachine to another node using virtctl. Only valid for VirtualMachine or VMInstance kinds.`,
|
||||
Args: cobra.ExactArgs(2),
|
||||
RunE: runMigrate,
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(migrateCmd)
|
||||
}
|
||||
|
||||
func runMigrate(cmd *cobra.Command, args []string) error {
|
||||
vmName, ns, err := resolveVMArgs(args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
virtctlArgs := []string{"virtctl", "migrate", vmName, "-n", ns}
|
||||
return execVirtctl(virtctlArgs)
|
||||
}
|
||||
51
cmd/cozyctl/cmd/portforward.go
Normal file
51
cmd/cozyctl/cmd/portforward.go
Normal file
@@ -0,0 +1,51 @@
|
||||
/*
|
||||
Copyright 2025 The Cozystack Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var portForwardCmd = &cobra.Command{
|
||||
Use: "port-forward <type/name> [ports...]",
|
||||
Short: "Forward ports to a VirtualMachineInstance",
|
||||
Long: `Forward ports to a VirtualMachineInstance using virtctl. Only valid for VirtualMachine or VMInstance kinds.`,
|
||||
Args: cobra.MinimumNArgs(2),
|
||||
RunE: runPortForward,
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(portForwardCmd)
|
||||
}
|
||||
|
||||
func runPortForward(cmd *cobra.Command, args []string) error {
|
||||
vmName, ns, err := resolveVMArgs(args[:1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ports := args[1:]
|
||||
if len(ports) == 0 {
|
||||
return fmt.Errorf("at least one port is required")
|
||||
}
|
||||
|
||||
virtctlArgs := []string{"virtctl", "port-forward", "vmi/" + vmName, "-n", ns}
|
||||
virtctlArgs = append(virtctlArgs, ports...)
|
||||
return execVirtctl(virtctlArgs)
|
||||
}
|
||||
250
cmd/cozyctl/cmd/printer.go
Normal file
250
cmd/cozyctl/cmd/printer.go
Normal file
@@ -0,0 +1,250 @@
|
||||
/*
|
||||
Copyright 2025 The Cozystack Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
)
|
||||
|
||||
func newTabWriter() *tabwriter.Writer {
|
||||
return tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)
|
||||
}
|
||||
|
||||
func printApplications(items []unstructured.Unstructured) {
|
||||
w := newTabWriter()
|
||||
defer w.Flush()
|
||||
|
||||
fmt.Fprintln(w, "NAME\tVERSION\tREADY\tSTATUS")
|
||||
for _, item := range items {
|
||||
name := item.GetName()
|
||||
version, _, _ := unstructured.NestedString(item.Object, "appVersion")
|
||||
ready, status := extractCondition(item)
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", name, version, ready, truncate(status, 48))
|
||||
}
|
||||
}
|
||||
|
||||
func printNamespaces(items []unstructured.Unstructured) {
|
||||
w := newTabWriter()
|
||||
defer w.Flush()
|
||||
|
||||
fmt.Fprintln(w, "NAME")
|
||||
for _, item := range items {
|
||||
fmt.Fprintln(w, item.GetName())
|
||||
}
|
||||
}
|
||||
|
||||
func printModules(items []unstructured.Unstructured) {
|
||||
w := newTabWriter()
|
||||
defer w.Flush()
|
||||
|
||||
fmt.Fprintln(w, "NAME\tVERSION\tREADY\tSTATUS")
|
||||
for _, item := range items {
|
||||
name := item.GetName()
|
||||
version, _, _ := unstructured.NestedString(item.Object, "appVersion")
|
||||
ready, status := extractCondition(item)
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", name, version, ready, truncate(status, 48))
|
||||
}
|
||||
}
|
||||
|
||||
func printPVCs(items []unstructured.Unstructured) {
|
||||
w := newTabWriter()
|
||||
defer w.Flush()
|
||||
|
||||
fmt.Fprintln(w, "NAME\tSTATUS\tVOLUME\tCAPACITY\tSTORAGECLASS")
|
||||
for _, item := range items {
|
||||
name := item.GetName()
|
||||
phase, _, _ := unstructured.NestedString(item.Object, "status", "phase")
|
||||
volume, _, _ := unstructured.NestedString(item.Object, "spec", "volumeName")
|
||||
capacity := ""
|
||||
if cap, ok, _ := unstructured.NestedStringMap(item.Object, "status", "capacity"); ok {
|
||||
capacity = cap["storage"]
|
||||
}
|
||||
sc, _, _ := unstructured.NestedString(item.Object, "spec", "storageClassName")
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", name, phase, volume, capacity, sc)
|
||||
}
|
||||
}
|
||||
|
||||
func printSecrets(items []unstructured.Unstructured) {
|
||||
w := newTabWriter()
|
||||
defer w.Flush()
|
||||
|
||||
fmt.Fprintln(w, "NAME\tTYPE\tDATA")
|
||||
for _, item := range items {
|
||||
name := item.GetName()
|
||||
secretType, _, _ := unstructured.NestedString(item.Object, "type")
|
||||
data, _, _ := unstructured.NestedMap(item.Object, "data")
|
||||
fmt.Fprintf(w, "%s\t%s\t%d\n", name, secretType, len(data))
|
||||
}
|
||||
}
|
||||
|
||||
func printServices(items []unstructured.Unstructured) {
|
||||
w := newTabWriter()
|
||||
defer w.Flush()
|
||||
|
||||
fmt.Fprintln(w, "NAME\tTYPE\tCLUSTER-IP\tEXTERNAL-IP\tPORTS")
|
||||
for _, item := range items {
|
||||
name := item.GetName()
|
||||
svcType, _, _ := unstructured.NestedString(item.Object, "spec", "type")
|
||||
clusterIP, _, _ := unstructured.NestedString(item.Object, "spec", "clusterIP")
|
||||
|
||||
externalIP := "<none>"
|
||||
if lbIngress, ok, _ := unstructured.NestedSlice(item.Object, "status", "loadBalancer", "ingress"); ok && len(lbIngress) > 0 {
|
||||
var ips []string
|
||||
for _, ingress := range lbIngress {
|
||||
if m, ok := ingress.(map[string]interface{}); ok {
|
||||
if ip, ok := m["ip"].(string); ok {
|
||||
ips = append(ips, ip)
|
||||
} else if hostname, ok := m["hostname"].(string); ok {
|
||||
ips = append(ips, hostname)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(ips) > 0 {
|
||||
externalIP = strings.Join(ips, ",")
|
||||
}
|
||||
}
|
||||
|
||||
ports := formatPorts(item)
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", name, svcType, clusterIP, externalIP, ports)
|
||||
}
|
||||
}
|
||||
|
||||
func printIngresses(items []unstructured.Unstructured) {
|
||||
w := newTabWriter()
|
||||
defer w.Flush()
|
||||
|
||||
fmt.Fprintln(w, "NAME\tCLASS\tHOSTS\tADDRESS")
|
||||
for _, item := range items {
|
||||
name := item.GetName()
|
||||
class, _, _ := unstructured.NestedString(item.Object, "spec", "ingressClassName")
|
||||
|
||||
var hosts []string
|
||||
if rules, ok, _ := unstructured.NestedSlice(item.Object, "spec", "rules"); ok {
|
||||
for _, rule := range rules {
|
||||
if m, ok := rule.(map[string]interface{}); ok {
|
||||
if host, ok := m["host"].(string); ok {
|
||||
hosts = append(hosts, host)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
hostsStr := "<none>"
|
||||
if len(hosts) > 0 {
|
||||
hostsStr = strings.Join(hosts, ",")
|
||||
}
|
||||
|
||||
address := ""
|
||||
if lbIngress, ok, _ := unstructured.NestedSlice(item.Object, "status", "loadBalancer", "ingress"); ok && len(lbIngress) > 0 {
|
||||
var addrs []string
|
||||
for _, ingress := range lbIngress {
|
||||
if m, ok := ingress.(map[string]interface{}); ok {
|
||||
if ip, ok := m["ip"].(string); ok {
|
||||
addrs = append(addrs, ip)
|
||||
} else if hostname, ok := m["hostname"].(string); ok {
|
||||
addrs = append(addrs, hostname)
|
||||
}
|
||||
}
|
||||
}
|
||||
address = strings.Join(addrs, ",")
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", name, class, hostsStr, address)
|
||||
}
|
||||
}
|
||||
|
||||
func printWorkloads(items []unstructured.Unstructured) {
|
||||
w := newTabWriter()
|
||||
defer w.Flush()
|
||||
|
||||
fmt.Fprintln(w, "NAME\tKIND\tTYPE\tVERSION\tAVAILABLE\tOBSERVED\tOPERATIONAL")
|
||||
for _, item := range items {
|
||||
name := item.GetName()
|
||||
kind, _, _ := unstructured.NestedString(item.Object, "spec", "kind")
|
||||
wType, _, _ := unstructured.NestedString(item.Object, "spec", "type")
|
||||
version, _, _ := unstructured.NestedString(item.Object, "spec", "version")
|
||||
available, _, _ := unstructured.NestedInt64(item.Object, "status", "availableReplicas")
|
||||
observed, _, _ := unstructured.NestedInt64(item.Object, "status", "observedReplicas")
|
||||
operational, ok, _ := unstructured.NestedBool(item.Object, "status", "operational")
|
||||
opStr := ""
|
||||
if ok {
|
||||
opStr = fmt.Sprintf("%t", operational)
|
||||
}
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%d\t%d\t%s\n", name, kind, wType, version, available, observed, opStr)
|
||||
}
|
||||
}
|
||||
|
||||
func printNoResources(w io.Writer, resourceType string) {
|
||||
fmt.Fprintf(w, "No %s found\n", resourceType)
|
||||
}
|
||||
|
||||
func extractCondition(item unstructured.Unstructured) (string, string) {
|
||||
conditions, ok, _ := unstructured.NestedSlice(item.Object, "status", "conditions")
|
||||
if !ok {
|
||||
return "Unknown", ""
|
||||
}
|
||||
for _, c := range conditions {
|
||||
cond, ok := c.(map[string]interface{})
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if cond["type"] == "Ready" {
|
||||
ready, _ := cond["status"].(string)
|
||||
message, _ := cond["message"].(string)
|
||||
return ready, message
|
||||
}
|
||||
}
|
||||
return "Unknown", ""
|
||||
}
|
||||
|
||||
func truncate(s string, maxLen int) string {
|
||||
if len(s) <= maxLen {
|
||||
return s
|
||||
}
|
||||
return s[:maxLen-3] + "..."
|
||||
}
|
||||
|
||||
func formatPorts(item unstructured.Unstructured) string {
|
||||
ports, ok, _ := unstructured.NestedSlice(item.Object, "spec", "ports")
|
||||
if !ok || len(ports) == 0 {
|
||||
return "<none>"
|
||||
}
|
||||
var parts []string
|
||||
for _, p := range ports {
|
||||
port, ok := p.(map[string]interface{})
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
portNum, _, _ := unstructured.NestedInt64(port, "port")
|
||||
protocol, _, _ := unstructured.NestedString(port, "protocol")
|
||||
if protocol == "" {
|
||||
protocol = "TCP"
|
||||
}
|
||||
nodePort, _, _ := unstructured.NestedInt64(port, "nodePort")
|
||||
if nodePort > 0 {
|
||||
parts = append(parts, fmt.Sprintf("%d:%d/%s", portNum, nodePort, protocol))
|
||||
} else {
|
||||
parts = append(parts, fmt.Sprintf("%d/%s", portNum, protocol))
|
||||
}
|
||||
}
|
||||
return strings.Join(parts, ",")
|
||||
}
|
||||
57
cmd/cozyctl/cmd/root.go
Normal file
57
cmd/cozyctl/cmd/root.go
Normal file
@@ -0,0 +1,57 @@
|
||||
/*
|
||||
Copyright 2025 The Cozystack Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Version is set at build time via -ldflags.
|
||||
var Version = "dev"
|
||||
|
||||
var globalFlags struct {
|
||||
kubeconfig string
|
||||
context string
|
||||
namespace string
|
||||
}
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "cozyctl",
|
||||
Short: "A CLI for managing Cozystack applications",
|
||||
SilenceErrors: true,
|
||||
SilenceUsage: true,
|
||||
DisableAutoGenTag: true,
|
||||
}
|
||||
|
||||
// Execute adds all child commands to the root command and sets flags appropriately.
|
||||
func Execute() error {
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err.Error())
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.Version = Version
|
||||
rootCmd.PersistentFlags().StringVar(&globalFlags.kubeconfig, "kubeconfig", "", "Path to kubeconfig file")
|
||||
rootCmd.PersistentFlags().StringVar(&globalFlags.context, "context", "", "Kubernetes context to use")
|
||||
rootCmd.PersistentFlags().StringVarP(&globalFlags.namespace, "namespace", "n", "", "Kubernetes namespace")
|
||||
}
|
||||
106
cmd/cozyctl/cmd/vm.go
Normal file
106
cmd/cozyctl/cmd/vm.go
Normal file
@@ -0,0 +1,106 @@
|
||||
/*
|
||||
Copyright 2025 The Cozystack Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// vmKindPrefix maps application Kind to the release prefix used by KubeVirt VMs.
|
||||
func vmKindPrefix(kind string) (string, bool) {
|
||||
switch kind {
|
||||
case "VirtualMachine":
|
||||
return "virtual-machine", true
|
||||
case "VMInstance":
|
||||
return "vm-instance", true
|
||||
default:
|
||||
return "", false
|
||||
}
|
||||
}
|
||||
|
||||
// resolveVMArgs takes CLI args (type, name or type/name), resolves the application type
|
||||
// via discovery, validates it's a VM kind, and returns the full VM name and namespace.
|
||||
func resolveVMArgs(args []string) (string, string, error) {
|
||||
var resourceType, resourceName string
|
||||
|
||||
if len(args) == 1 {
|
||||
// type/name format
|
||||
parts := strings.SplitN(args[0], "/", 2)
|
||||
if len(parts) != 2 {
|
||||
return "", "", fmt.Errorf("expected type/name format, got %q", args[0])
|
||||
}
|
||||
resourceType, resourceName = parts[0], parts[1]
|
||||
} else {
|
||||
resourceType = args[0]
|
||||
resourceName = args[1]
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
typedClient, _, err := newClients()
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
registry, err := discoverAppDefs(ctx, typedClient)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
info := registry.Resolve(resourceType)
|
||||
if info == nil {
|
||||
return "", "", fmt.Errorf("unknown application type %q", resourceType)
|
||||
}
|
||||
|
||||
prefix, ok := vmKindPrefix(info.Kind)
|
||||
if !ok {
|
||||
return "", "", fmt.Errorf("resource type %q (Kind=%s) is not a VirtualMachine or VMInstance", resourceType, info.Kind)
|
||||
}
|
||||
|
||||
ns, err := getNamespace()
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
vmName := prefix + "-" + resourceName
|
||||
return vmName, ns, nil
|
||||
}
|
||||
|
||||
// execVirtctl replaces the current process with virtctl.
|
||||
func execVirtctl(args []string) error {
|
||||
virtctlPath, err := exec.LookPath("virtctl")
|
||||
if err != nil {
|
||||
return fmt.Errorf("virtctl not found in PATH: %w", err)
|
||||
}
|
||||
|
||||
// Append kubeconfig/context flags if set
|
||||
if globalFlags.kubeconfig != "" {
|
||||
args = append(args, "--kubeconfig", globalFlags.kubeconfig)
|
||||
}
|
||||
if globalFlags.context != "" {
|
||||
args = append(args, "--context", globalFlags.context)
|
||||
}
|
||||
|
||||
if err := syscall.Exec(virtctlPath, args, os.Environ()); err != nil {
|
||||
return fmt.Errorf("failed to exec virtctl: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
43
cmd/cozyctl/cmd/vnc.go
Normal file
43
cmd/cozyctl/cmd/vnc.go
Normal file
@@ -0,0 +1,43 @@
|
||||
/*
|
||||
Copyright 2025 The Cozystack Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var vncCmd = &cobra.Command{
|
||||
Use: "vnc <type> <name>",
|
||||
Short: "Open a VNC connection to a VirtualMachine",
|
||||
Long: `Open a VNC connection to a VirtualMachine using virtctl. Only valid for VirtualMachine or VMInstance kinds.`,
|
||||
Args: cobra.ExactArgs(2),
|
||||
RunE: runVNC,
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(vncCmd)
|
||||
}
|
||||
|
||||
func runVNC(cmd *cobra.Command, args []string) error {
|
||||
vmName, ns, err := resolveVMArgs(args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
virtctlArgs := []string{"virtctl", "vnc", vmName, "-n", ns}
|
||||
return execVirtctl(virtctlArgs)
|
||||
}
|
||||
29
cmd/cozyctl/main.go
Normal file
29
cmd/cozyctl/main.go
Normal file
@@ -0,0 +1,29 @@
|
||||
/*
|
||||
Copyright 2025 The Cozystack Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/cozystack/cozystack/cmd/cozyctl/cmd"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if err := cmd.Execute(); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
<!--
|
||||
https://github.com/cozystack/cozystack/releases/tag/v0.40.5
|
||||
-->
|
||||
|
||||
## Improvements
|
||||
|
||||
* **[dashboard] Improve dashboard session params**: Improved session parameter handling in the dashboard for better user experience and more reliable session management ([**@lllamnyp**](https://github.com/lllamnyp) in #1913, #1919).
|
||||
|
||||
## Dependencies
|
||||
|
||||
* **Update cozyhr to v1.6.1**: Updated cozyhr to v1.6.1, which fixes a critical bug causing helm-controller v0.37.0+ to unexpectedly uninstall HelmReleases after cozyhr apply by correcting history snapshot fields for helm-controller compatibility ([**@kvaps**](https://github.com/kvaps) in cozystack/cozyhr#10).
|
||||
|
||||
---
|
||||
|
||||
**Full Changelog**: [v0.40.4...v0.40.5](https://github.com/cozystack/cozystack/compare/v0.40.4...v0.40.5)
|
||||
@@ -1,11 +0,0 @@
|
||||
<!--
|
||||
https://github.com/cozystack/cozystack/releases/tag/v0.40.6
|
||||
-->
|
||||
|
||||
## Fixes
|
||||
|
||||
* **[kubernetes] Fix manifests for kubernetes deployment**: Fixed incorrect manifests that prevented proper Kubernetes deployment, restoring correct application behavior ([**@IvanHunters**](https://github.com/IvanHunters) in #1943, #1944).
|
||||
|
||||
---
|
||||
|
||||
**Full Changelog**: [v0.40.5...v0.40.6](https://github.com/cozystack/cozystack/compare/v0.40.5...v0.40.6)
|
||||
@@ -1,11 +0,0 @@
|
||||
<!--
|
||||
https://github.com/cozystack/cozystack/releases/tag/v0.40.7
|
||||
-->
|
||||
|
||||
## Security
|
||||
|
||||
* **[dashboard] Verify JWT token**: Added JWT token verification to the dashboard, ensuring that authentication tokens are properly validated before granting access. This prevents unauthorized access through forged or expired tokens ([**@lllamnyp**](https://github.com/lllamnyp) in #1980, #1984).
|
||||
|
||||
---
|
||||
|
||||
**Full Changelog**: [v0.40.6...v0.40.7](https://github.com/cozystack/cozystack/compare/v0.40.6...v0.40.7)
|
||||
@@ -1,11 +0,0 @@
|
||||
<!--
|
||||
https://github.com/cozystack/cozystack/releases/tag/v0.41.4
|
||||
-->
|
||||
|
||||
## Dependencies
|
||||
|
||||
* **Update cozyhr to v1.6.1**: Updated cozyhr to v1.6.1, which fixes a critical bug causing helm-controller v0.37.0+ to unexpectedly uninstall HelmReleases after cozyhr apply by correcting history snapshot fields for helm-controller compatibility ([**@kvaps**](https://github.com/kvaps) in cozystack/cozyhr#10).
|
||||
|
||||
---
|
||||
|
||||
**Full Changelog**: [v0.41.3...v0.41.4](https://github.com/cozystack/cozystack/compare/v0.41.3...v0.41.4)
|
||||
@@ -1,21 +0,0 @@
|
||||
<!--
|
||||
https://github.com/cozystack/cozystack/releases/tag/v0.41.5
|
||||
-->
|
||||
|
||||
## Features and Improvements
|
||||
|
||||
* **[dashboard] Add "Edit" button to all resources**: Added an "Edit" button across all resource views in the dashboard, allowing users to modify resource configurations directly from the UI ([**@sircthulhu**](https://github.com/sircthulhu) in #1928, #1931).
|
||||
|
||||
* **[dashboard] Add resource quota usage to tenant details page**: Added resource quota usage display to the tenant details page, giving administrators visibility into how much of allocated resources each tenant is consuming ([**@sircthulhu**](https://github.com/sircthulhu) in #1929, #1932).
|
||||
|
||||
* **[branding] Separate values for keycloak**: Separated Keycloak branding values into dedicated configuration, allowing more granular customization of Keycloak appearance without affecting other branding settings ([**@nbykov0**](https://github.com/nbykov0) in #1946).
|
||||
|
||||
* **Add instance profile label to workload monitor**: Added instance profile metadata labels to the workload monitor, enabling better resource tracking and monitoring by instance profile type ([**@matthieu-robin**](https://github.com/matthieu-robin) in #1954, #1957).
|
||||
|
||||
## Fixes
|
||||
|
||||
* **[kubernetes] Fix manifests for kubernetes deployment**: Fixed incorrect manifests that prevented proper Kubernetes deployment, restoring correct application behavior ([**@IvanHunters**](https://github.com/IvanHunters) in #1943, #1945).
|
||||
|
||||
---
|
||||
|
||||
**Full Changelog**: [v0.41.4...v0.41.5](https://github.com/cozystack/cozystack/compare/v0.41.4...v0.41.5)
|
||||
@@ -1,17 +0,0 @@
|
||||
<!--
|
||||
https://github.com/cozystack/cozystack/releases/tag/v0.41.6
|
||||
-->
|
||||
|
||||
## Improvements
|
||||
|
||||
* **[vm] Allow changing field external after creation**: Users can now modify the external network field on virtual machines after initial creation, providing more flexibility in VM networking configuration without requiring recreation ([**@sircthulhu**](https://github.com/sircthulhu) in #1956, #1962).
|
||||
|
||||
* **[branding] Separate values for keycloak**: Separated Keycloak branding values into dedicated configuration for more granular customization of Keycloak appearance ([**@nbykov0**](https://github.com/nbykov0) in #1947, #1963).
|
||||
|
||||
## Fixes
|
||||
|
||||
* **[kubernetes] Fix coredns serviceaccount to match kubernetes bootstrap RBAC**: Configured the CoreDNS chart to create a `kube-dns` ServiceAccount matching the Kubernetes bootstrap ClusterRoleBinding, fixing RBAC errors (`Failed to watch`) when CoreDNS pods restart ([**@mattia-eleuteri**](https://github.com/mattia-eleuteri) in #1958, #1978).
|
||||
|
||||
---
|
||||
|
||||
**Full Changelog**: [v0.41.5...v0.41.6](https://github.com/cozystack/cozystack/compare/v0.41.5...v0.41.6)
|
||||
@@ -1,15 +0,0 @@
|
||||
<!--
|
||||
https://github.com/cozystack/cozystack/releases/tag/v0.41.7
|
||||
-->
|
||||
|
||||
## Security
|
||||
|
||||
* **[dashboard] Verify JWT token**: Added JWT token verification to the dashboard, ensuring that authentication tokens are properly validated before granting access. This prevents unauthorized access through forged or expired tokens ([**@lllamnyp**](https://github.com/lllamnyp) in #1980, #1983).
|
||||
|
||||
## Fixes
|
||||
|
||||
* **[postgres-operator] Correct PromQL syntax in CNPGClusterOffline alert**: Fixed incorrect PromQL syntax in the `CNPGClusterOffline` alert rule for CloudNativePG, ensuring the alert fires correctly when all instances of a PostgreSQL cluster are offline ([**@mattia-eleuteri**](https://github.com/mattia-eleuteri) in #1981, #1989).
|
||||
|
||||
---
|
||||
|
||||
**Full Changelog**: [v0.41.6...v0.41.7](https://github.com/cozystack/cozystack/compare/v0.41.6...v0.41.7)
|
||||
@@ -1,17 +0,0 @@
|
||||
<!--
|
||||
https://github.com/cozystack/cozystack/releases/tag/v0.41.8
|
||||
-->
|
||||
|
||||
## Features and Improvements
|
||||
|
||||
* **[kubernetes] Auto-enable Gateway API support in cert-manager**: cert-manager now automatically enables `enableGatewayAPI` when the Gateway API addon is active in the Kubernetes application. Users no longer need to manually configure this setting, and the option can still be overridden via `valuesOverride` ([**@kvaps**](https://github.com/kvaps) in #1997, #2012).
|
||||
|
||||
* **[vm] Allow switching between instancetype and custom resources**: Users can now switch virtual machines between instancetype-based and custom resource configurations after creation. The upgrade hook atomically patches VM resources, providing more flexibility in adjusting VM sizing without recreation ([**@sircthulhu**](https://github.com/sircthulhu) in #2008, #2013).
|
||||
|
||||
## Fixes
|
||||
|
||||
* **[dashboard] Add startupProbe to prevent container restarts on slow hardware**: Added `startupProbe` to both `bff` and `web` containers in the dashboard deployment. On slow hardware, kubelet was killing containers because the `livenessProbe` only allowed ~33 seconds for startup. The `startupProbe` gives containers up to 60 seconds to start before `livenessProbe` kicks in ([**@kvaps**](https://github.com/kvaps) in #1996, #2014).
|
||||
|
||||
---
|
||||
|
||||
**Full Changelog**: [v0.41.7...v0.41.8](https://github.com/cozystack/cozystack/compare/v0.41.7...v0.41.8)
|
||||
@@ -1,15 +0,0 @@
|
||||
<!--
|
||||
https://github.com/cozystack/cozystack/releases/tag/v0.41.9
|
||||
-->
|
||||
|
||||
## Fixes
|
||||
|
||||
* **[cozystack-basics] Deny resourcequotas deletion for tenant admin**: Prevented tenant administrators from deleting resource quotas, ensuring that resource limits set by platform administrators cannot be bypassed by tenant-level users ([**@myasnikovdaniil**](https://github.com/myasnikovdaniil) in #2076).
|
||||
|
||||
## Dependencies
|
||||
|
||||
* **Update Kube-OVN to v1.15.3**: Updated Kube-OVN CNI to v1.15.3 with latest bug fixes and improvements ([**@kvaps**](https://github.com/kvaps)).
|
||||
|
||||
---
|
||||
|
||||
**Full Changelog**: [v0.41.8...v0.41.9](https://github.com/cozystack/cozystack/compare/v0.41.8...v0.41.9)
|
||||
@@ -1,65 +0,0 @@
|
||||
<!--
|
||||
https://github.com/cozystack/cozystack/releases/tag/v1.0.0-rc.1
|
||||
-->
|
||||
|
||||
> **⚠️ Release Candidate Warning**: This is a release candidate intended for final validation before the stable v1.0.0 release. Breaking changes are not expected at this stage, but please test thoroughly before deploying to production.
|
||||
|
||||
## Features and Improvements
|
||||
|
||||
* **[harbor] Add managed Harbor container registry**: Added Harbor v2.14.2 as a managed tenant-level container registry service. The application uses CloudNativePG for PostgreSQL, the Redis operator for caching, and S3 via COSI BucketClaim (from SeaweedFS) for registry image storage. Auto-generated admin credentials are persisted across upgrades, TLS is handled by cert-manager, and Trivy vulnerability scanner is included. Users can now deploy a fully managed, production-ready OCI container registry within their tenant ([**@lexfrei**](https://github.com/lexfrei) in #2058).
|
||||
|
||||
* **[kubernetes] Update supported Kubernetes versions to v1.30–v1.35**: Updated the tenant Kubernetes version matrix to v1.30, v1.31, v1.32, v1.33, v1.34, and v1.35 (now the default). EOL versions v1.28 and v1.29 are removed. Kamaji is updated to edge-26.2.4 with full Kubernetes 1.35 support, and the CAPI Kamaji provider is updated to v0.16.0. A compatibility patch ensures kubelets older than v1.35 are not broken by Kamaji injecting 1.35-specific kubelet fields ([**@lexfrei**](https://github.com/lexfrei) in #2073).
|
||||
|
||||
* **[platform] Make cluster issuer name and ACME solver configurable**: Added `publishing.certificates.solver` (`http01` or `dns01`) and `publishing.certificates.issuerName` (default: `letsencrypt-prod`) parameters to the platform chart. This allows operators to point all ingress TLS annotations at any ClusterIssuer — custom ACME, self-signed, or internal CA — without modifying individual package templates. See the Breaking Changes section for the rename from the previous `issuerType` field ([**@myasnikovdaniil**](https://github.com/myasnikovdaniil) in #2077).
|
||||
|
||||
* **[dashboard] VMInstance dropdowns for disks and instanceType**: The VM instance creation form now renders API-backed dropdowns for the `instanceType` field (populated from `VirtualMachineClusterInstancetype` cluster resources) and for disk `name` fields (populated from `VMDisk` resources in the same namespace). Default values are read from the ApplicationDefinition's OpenAPI schema. This eliminates manual lookups and reduces misconfiguration when attaching disks or selecting VM instance types ([**@sircthulhu**](https://github.com/sircthulhu) in #2071).
|
||||
|
||||
* **[installer] Remove CRDs from Helm chart, delegate lifecycle to operator**: The `cozy-installer` Helm chart no longer ships CRDs in its `crds/` directory. CRD lifecycle is now fully managed by the Cozystack operator via the `--install-crds` flag, which applies embedded CRD manifests on every startup using server-side apply. The platform PackageSource is also created by the operator instead of a Helm template. This ensures CRDs and the PackageSource are always up to date after each operator restart, eliminating stale CRDs from Helm's install-only behavior ([**@lexfrei**](https://github.com/lexfrei) in #2074).
|
||||
|
||||
## Fixes
|
||||
|
||||
* **[kubevirt] Update KubeVirt to v1.6.4 and CDI to v1.64.0, fix VM pod initialization**: Updated KubeVirt operator to v1.6.4 and CDI operator to v1.64.0, including live migration of existing VMs during the upgrade. Additionally, disabled serial console logging globally via the KubeVirt CR to prevent a known v1.6.x issue ([upstream #15989](https://github.com/kubevirt/kubevirt/issues/15989)) where the `guest-console-log` init container blocked virt-launcher pods from starting, causing all VMs to get stuck in `PodInitializing` state ([**@nbykov0**](https://github.com/nbykov0) in #1833; [**@kvaps**](https://github.com/kvaps) in 7dfb819).
|
||||
|
||||
* **[linstor] Fix DRBD+LUKS+STORAGE resource creation failure**: All newly created encrypted volumes were failing because the DRBD `.res` file was never written due to a missing `setExists(true)` call in the `LuksLayer`. Applied the upstream `skip-adjust-when-device-inaccessible` patch ([LINBIT/linstor-server#477](https://github.com/LINBIT/linstor-server/pull/477)) which fixes the root cause and also prevents unnecessary lsblk calls when devices are not yet physically present ([**@kvaps**](https://github.com/kvaps) in #2072).
|
||||
|
||||
* **[system] Fix monitoring-agents FQDN resolution for tenant workload clusters**: Monitoring agents (`vmagent`, `fluent-bit`) in tenant workload clusters were failing to deliver metrics and logs because service addresses used short DNS names without the cluster domain suffix. Fixed by appending the configured cluster domain from `_cluster.cluster-domain` (with fallback to `cluster.local`) to all vmagent remoteWrite URLs and fluent-bit output hosts ([**@IvanHunters**](https://github.com/IvanHunters) in #2075).
|
||||
|
||||
* **[cozystack-basics] Preserve existing HelmRelease values during reconciliations**: Fixed a data-loss bug where changes made to the `tenant-root` HelmRelease were silently dropped on the next forced or upgrade reconciliation of the `cozystack-basics` HelmRelease. The reconciler now merges new configuration with existing values instead of overwriting them ([**@sircthulhu**](https://github.com/sircthulhu) in #2068).
|
||||
|
||||
* **[cozystack-basics] Deny resourcequotas deletion for tenant admin**: Fixed the `cozy:tenant:admin:base` ClusterRole to explicitly deny deletion of `ResourceQuota` objects for tenant admins and superadmins, preventing accidental removal of tenant resource limits ([**@myasnikovdaniil**](https://github.com/myasnikovdaniil) in #2076).
|
||||
|
||||
## Breaking Changes & Upgrade Notes
|
||||
|
||||
* **[platform] Certificate issuer configuration parameters renamed**: The `publishing.certificates.issuerType` field is renamed to `publishing.certificates.solver`, and the value `cloudflare` is renamed to `dns01` to align with standard ACME terminology. A new `publishing.certificates.issuerName` field (default: `letsencrypt-prod`) is introduced to allow pointing all ingresses at a custom ClusterIssuer. Migration 32 is included and automatically converts existing configurations during upgrade — no manual action is required ([**@myasnikovdaniil**](https://github.com/myasnikovdaniil) in #2077).
|
||||
|
||||
## Documentation
|
||||
|
||||
* **[website] Migrate ConfigMap references to Platform Package in v1 documentation**: Updated the entire v1 documentation tree to replace legacy ConfigMap-based configuration references with the new Platform Package API, ensuring guides are consistent with the v1 configuration model ([**@sircthulhu**](https://github.com/sircthulhu) in cozystack/website#426).
|
||||
|
||||
* **[website] Add generic Kubernetes deployment guide for v1**: Added a new installation guide covering Cozystack deployment on any generic Kubernetes cluster, expanding the set of supported deployment targets beyond provider-specific guides ([**@lexfrei**](https://github.com/lexfrei) in cozystack/website#408).
|
||||
|
||||
* **[website] Refactor resource planning documentation**: Improved the resource planning guide with a clearer structure and more comprehensive coverage of planning considerations for Cozystack deployments ([**@IvanStukov**](https://github.com/IvanStukov) in cozystack/website#423).
|
||||
|
||||
* **[website] Add ServiceAccount API access documentation and update FAQ**: Added a new article documenting ServiceAccount API access token configuration and updated the FAQ to include related troubleshooting guidance ([**@IvanStukov**](https://github.com/IvanStukov) in cozystack/website#421).
|
||||
|
||||
* **[website] Update networking-mesh allowed-location-ips example**: Replaced provider-specific CLI usage with standard `kubectl` commands in the multi-location networking guide's `allowed-location-ips` example, making the documentation more universally applicable ([**@kvaps**](https://github.com/kvaps) in cozystack/website#425).
|
||||
|
||||
## Contributors
|
||||
|
||||
We'd like to thank all contributors who made this release possible:
|
||||
|
||||
* [**@IvanHunters**](https://github.com/IvanHunters)
|
||||
* [**@IvanStukov**](https://github.com/IvanStukov)
|
||||
* [**@kvaps**](https://github.com/kvaps)
|
||||
* [**@lexfrei**](https://github.com/lexfrei)
|
||||
* [**@myasnikovdaniil**](https://github.com/myasnikovdaniil)
|
||||
* [**@nbykov0**](https://github.com/nbykov0)
|
||||
* [**@sircthulhu**](https://github.com/sircthulhu)
|
||||
|
||||
### New Contributors
|
||||
|
||||
We're excited to welcome our first-time contributors:
|
||||
|
||||
* [**@myasnikovdaniil**](https://github.com/myasnikovdaniil) - First contribution!
|
||||
|
||||
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v1.0.0-beta.6...v1.0.0-rc.1
|
||||
@@ -1,57 +0,0 @@
|
||||
<!--
|
||||
https://github.com/cozystack/cozystack/releases/tag/v1.0.0-rc.2
|
||||
-->
|
||||
|
||||
> **⚠️ Release Candidate Warning**: This is a release candidate intended for final validation before the stable v1.0.0 release. Breaking changes are not expected at this stage, but please test thoroughly before deploying to production.
|
||||
|
||||
## Features and Improvements
|
||||
|
||||
* **[keycloak] Allow custom Ingress hostname via values**: Added an `ingress.host` field to the cozy-keycloak chart values, allowing operators to override the default `keycloak.<root-host>` Ingress hostname. The custom hostname is applied to both the Ingress resource and the `KC_HOSTNAME` environment variable in the StatefulSet. When left empty, the original behavior is preserved (fully backward compatible) ([**@sircthulhu**](https://github.com/sircthulhu) in #2101).
|
||||
|
||||
## Fixes
|
||||
|
||||
* **[platform] Fix upgrade issues in migrations, etcd timeout, and migration script**: Fixed multiple upgrade failures discovered during v0.41.1 → v1.0 upgrade testing. Migration 26 now uses the `cozystack.io/ui=true` label (always present on v0.41.1) instead of the new label that depends on migration 22 having run, and adds robust Helm secret deletion with fallback and verification. Migrations 28 and 29 wrap `grep` calls to prevent `pipefail` exits and fix the reconcile annotation to use RFC3339 format. Migration 27 now skips missing CRDs and adds a name-pattern fallback for Helm secret deletion. The etcd HelmRelease timeout is increased from 10m to 30m to accommodate TLS cert rotation hooks. The `migrate-to-version-1.0.sh` script gains the missing `bundle-disable`, `bundle-enable`, `expose-ingress`, and `expose-services` field mappings ([**@kvaps**](https://github.com/kvaps) in #2096).
|
||||
|
||||
* **[platform] Fix orphaned -rd HelmReleases after application renames**: After the `ferretdb→mongodb`, `mysql→mariadb`, and `virtual-machine→vm-disk+vm-instance` renames, the system-level `-rd` HelmReleases in `cozy-system` (`ferretdb-rd`, `mysql-rd`, `virtual-machine-rd`) were left orphaned, referencing ExternalArtifacts that no longer exist and causing persistent reconciliation failures. Migrations 28 and 29 are updated to remove these resources, and migration 33 is added as a safety net for clusters that already passed those migrations ([**@kvaps**](https://github.com/kvaps) in #2102).
|
||||
|
||||
* **[monitoring-agents] Fix FQDN resolution regression in tenant workload clusters**: The fix introduced in #2075 used `_cluster.cluster-domain` references in `values.yaml`, but `_cluster` values are not accessible from Helm subchart contexts — meaning fluent-bit received empty hostnames and failed to forward logs. This PR replaces the `_cluster` references with a new `global.clusterDomain` variable (empty by default for management clusters, set to the cluster domain for tenant clusters), which is correctly shared with all subcharts ([**@kvaps**](https://github.com/kvaps) in #2086).
|
||||
|
||||
* **[dashboard] Fix legacy templating and cluster identifier in sidebar links**: Standardized the cluster identifier used across dashboard menu links, administration links, and API request paths, resolving incorrect or broken link targets for the Backups and External IPs sidebar sections ([**@androndo**](https://github.com/androndo) in #2093).
|
||||
|
||||
* **[dashboard] Fix backupjobs creation form and sidebar backup category identifier**: Fixed the backup job creation form configuration, adding the required Name, Namespace, Plan Name, Application, and Backup Class fields. Fixed the sidebar backup category identifier that was causing incorrect navigation ([**@androndo**](https://github.com/androndo) in #2103).
|
||||
|
||||
## Documentation
|
||||
|
||||
* **[website] Add Helm chart development principles guide**: Added a new developer guide section documenting Cozystack's four core Helm chart principles: easy upstream updates, local-first artifacts, local dev/test workflow, and no external dependencies ([**@kvaps**](https://github.com/kvaps) in cozystack/website#418).
|
||||
|
||||
* **[website] Add network architecture overview**: Added comprehensive network architecture documentation covering the multi-layered networking stack — MetalLB (L2/BGP), Cilium eBPF (kube-proxy replacement), Kube-OVN (centralized IPAM), and tenant isolation with identity-based eBPF policies — with Mermaid diagrams for all major traffic flows ([**@IvanHunters**](https://github.com/IvanHunters) in cozystack/website#422).
|
||||
|
||||
* **[website] Update documentation to use jsonpatch for service exposure**: Improved `kubectl patch` commands throughout installation and configuration guides to use JSON Patch `add` operations for extending arrays instead of replacing them wholesale, making the documented commands safer and more precise ([**@sircthulhu**](https://github.com/sircthulhu) in cozystack/website#427).
|
||||
|
||||
* **[website] Update certificates section in Platform Package documentation**: Updated the certificate configuration documentation to reflect the new `solver` and `issuerName` fields introduced in v1.0.0-rc.1, replacing the legacy `issuerType` references ([**@myasnikovdaniil**](https://github.com/myasnikovdaniil) in cozystack/website#429).
|
||||
|
||||
* **[website] Add tenant Kubernetes cluster log querying guide**: Added documentation for querying logs from tenant Kubernetes clusters in Grafana using VictoriaLogs labels (`tenant`, `kubernetes_namespace_name`, `kubernetes_pod_name`), including the `monitoringAgents` addon prerequisite and step-by-step filtering examples ([**@IvanHunters**](https://github.com/IvanHunters) in cozystack/website#430).
|
||||
|
||||
* **[website] Replace non-idempotent commands with idempotent alternatives**: Updated `helm install` to `helm upgrade --install`, `kubectl create -f` to `kubectl apply -f`, and `kubectl create ns` to the dry-run+apply pattern across all installation and deployment guides so commands can be safely re-run ([**@lexfrei**](https://github.com/lexfrei) in cozystack/website#431).
|
||||
|
||||
* **[website] Fix broken documentation links with `.md` suffix**: Fixed incorrect internal links with `.md` suffix across virtualization guides for both v0 and v1 documentation, standardizing link text to "Developer Guide" ([**@cheese**](https://github.com/cheese) in cozystack/website#432).
|
||||
|
||||
## Contributors
|
||||
|
||||
We'd like to thank all contributors who made this release possible:
|
||||
|
||||
* [**@androndo**](https://github.com/androndo)
|
||||
* [**@cheese**](https://github.com/cheese)
|
||||
* [**@IvanHunters**](https://github.com/IvanHunters)
|
||||
* [**@kvaps**](https://github.com/kvaps)
|
||||
* [**@lexfrei**](https://github.com/lexfrei)
|
||||
* [**@myasnikovdaniil**](https://github.com/myasnikovdaniil)
|
||||
* [**@sircthulhu**](https://github.com/sircthulhu)
|
||||
|
||||
### New Contributors
|
||||
|
||||
We're excited to welcome our first-time contributors:
|
||||
|
||||
* [**@cheese**](https://github.com/cheese) - First contribution!
|
||||
|
||||
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v1.0.0-rc.1...v1.0.0-rc.2
|
||||
@@ -1,289 +0,0 @@
|
||||
<!--
|
||||
https://github.com/cozystack/cozystack/releases/tag/v1.0.0
|
||||
-->
|
||||
|
||||
# Cozystack v1.0.0 — "Stable"
|
||||
|
||||
We are thrilled to announce **Cozystack v1.0.0**, the first stable major release of the Cozystack platform. This milestone represents a fundamental architectural evolution from the v0.x series, introducing a fully operator-driven package management system, a comprehensive backup and restore framework, a redesigned virtual machine architecture, and a rich set of new managed applications — all hardened through an extensive alpha, beta, and release-candidate cycle.
|
||||
|
||||
## Feature Highlights
|
||||
|
||||
### Package-Based Architecture with Cozystack Operator
|
||||
|
||||
The most significant architectural change in v1.0.0 is the replacement of HelmRelease bundle deployments with a declarative **Package** and **PackageSource** model managed by the new `cozystack-operator`. Operators now define their platform configuration in a structured `values.yaml` and the operator reconciles the desired state by managing Package and PackageSource resources across the cluster.
|
||||
|
||||
The operator also takes ownership of CRD lifecycle — installing and updating CRDs from embedded manifests at every startup — eliminating the stale-CRD problem that affected Helm-only installations. Flux sharding has been added to distribute tenant HelmRelease reconciliation across multiple Flux controllers, providing horizontal scalability in large multi-tenant environments.
|
||||
|
||||
A migration script (`hack/migrate-to-version-1.0.sh`) is provided for upgrading existing v0.x clusters, along with 33 incremental migration steps that automate resource renaming, secret cleanup, and configuration conversion.
|
||||
|
||||
### Comprehensive Backup and Restore System
|
||||
|
||||
v1.0.0 ships a fully featured, production-ready backup and restore framework built on Velero integration. Users can define **BackupClass** resources to describe backup storage targets, create **BackupPlan** schedules, and trigger **RestoreJob** resources for end-to-end application recovery.
|
||||
|
||||
Virtual machine backups are supported natively via the Velero KubeVirt plugin, which captures consistent VM disk snapshots alongside metadata. The backup controller and the backup strategy sub-controllers (including the VM-specific strategy) are installed by default, and a full dashboard UI allows users to monitor backup status, view backup job history, and initiate restore workflows.
|
||||
|
||||
### Redesigned Virtual Machine Architecture
|
||||
|
||||
The legacy `virtual-machine` application has been replaced with a two-resource architecture: **`vm-disk`** for managing persistent disks and **`vm-instance`** for managing VM lifecycle. This separation provides cleaner disk/instance management, allows disks to be reused across VM instances, and aligns with modern KubeVirt patterns.
|
||||
|
||||
New capabilities include: a `cpuModel` field for direct CPU model specification without using an instanceType; the ability to switch between `instanceType`-based and custom resource-based configurations; migration from the deprecated `running` field to `runStrategy`; and native **RWX (NFS) filesystem support** in the KubeVirt CSI driver, enabling multiple pods to mount the same persistent volume simultaneously.
|
||||
|
||||
### New Managed Applications
|
||||
|
||||
v1.0.0 expands the application catalog significantly:
|
||||
|
||||
- **MongoDB**: A fully managed MongoDB replica set with persistent storage, monitoring integration, and unified user/database configuration API.
|
||||
- **Qdrant**: A high-performance vector database for AI and machine learning workloads, supporting single-replica and clustered modes with API key authentication and optional external LoadBalancer access.
|
||||
- **Harbor**: A fully managed OCI container registry backed by CloudNativePG, Redis operator, and COSI BucketClaim (SeaweedFS). Includes Trivy vulnerability scanner, auto-generated admin credentials, and TLS via cert-manager.
|
||||
- **NATS**: Enhanced with full Grafana monitoring dashboards for JetStream and server metrics, Prometheus support with TLS-aware configuration, and updated image customization options.
|
||||
- **MariaDB**: The `mysql` application is renamed to `mariadb`, accurately reflecting the underlying engine. An automatic migration (migration 27) converts all existing MySQL resources to use the `mariadb` naming.
|
||||
|
||||
FerretDB has been removed from the catalog as it is superseded by native MongoDB support.
|
||||
|
||||
### Multi-Location Networking with Kilo and cilium-kilo
|
||||
|
||||
Cozystack v1.0.0 introduces first-class support for multi-location clusters via the **Kilo** WireGuard mesh networking package. Kilo automatically establishes encrypted WireGuard tunnels between nodes in different network segments, enabling seamless cross-region communication.
|
||||
|
||||
A new integrated **`cilium-kilo`** networking variant combines Cilium eBPF CNI with Kilo's WireGuard overlay in a single platform configuration selection. This variant enables `enable-ipip-termination` in Cilium and deploys Kilo with `--compatibility=cilium`, allowing Cilium network policies to function correctly over the WireGuard mesh — without any manual configuration of the two components.
|
||||
|
||||
### Flux Sharding for Scalable Multi-Tenancy
|
||||
|
||||
Tenant HelmRelease reconciliation is now distributed across multiple Flux controllers via sharding labels. Each tenant workload is assigned to a shard based on a deterministic hash, preventing a single Flux controller from becoming a bottleneck in large multi-tenant environments. The platform operator manages the shard assignment automatically, and new shards can be added by scaling the Flux deployment.
|
||||
|
||||
## Major Features and Improvements
|
||||
|
||||
### Cozystack Operator
|
||||
|
||||
* **[cozystack-operator] Introduce Package and PackageSource APIs**: Added new CRDs for declarative package management, defining the full API for Package and PackageSource resources ([**@kvaps**](https://github.com/kvaps) in #1740, #1741, #1755, #1756, #1760, #1761).
|
||||
* **[platform] Migrate from HelmRelease bundles to Package-based deployment**: Replaced HelmRelease bundle system with Package resources managed by cozystack-operator, including restructured values.yaml with full configuration support for networking, publishing, authentication, scheduling, branding, and resources ([**@kvaps**](https://github.com/kvaps) in #1816).
|
||||
* **[cozystack-operator] Add automatic CRD installation at startup**: Added `--install-crds` flag to install embedded CRD manifests on every startup via server-side apply, ensuring CRDs and the PackageSource are always up to date ([**@lexfrei**](https://github.com/lexfrei) in #2060).
|
||||
* **[installer] Remove CRDs from Helm chart, delegate lifecycle to operator**: The `cozy-installer` Helm chart no longer ships CRDs; CRD lifecycle is fully managed by the Cozystack operator ([**@lexfrei**](https://github.com/lexfrei) in #2074).
|
||||
* **[cozystack-operator] Preserve existing suspend field in package reconciler**: Fixed package reconciler to properly preserve the suspend field state during reconciliation ([**@sircthulhu**](https://github.com/sircthulhu) in #2043).
|
||||
* **[cozystack-operator] Fix namespace privileged flag resolution and field ownership**: Fixed operator to correctly check all Packages in a namespace when determining privileged status, and resolved SSA field ownership conflicts ([**@kvaps**](https://github.com/kvaps) in #2046).
|
||||
* **[platform] Add flux-plunger controller**: Added flux-plunger controller to automatically fix stuck HelmRelease errors by cleaning up failed resources and retrying reconciliation ([**@kvaps**](https://github.com/kvaps) in #1843).
|
||||
* **[installer] Add variant-aware templates for generic Kubernetes support**: Extended the installer to support generic and hosted Kubernetes deployments via the `cozystackOperator.variant=generic` parameter ([**@lexfrei**](https://github.com/lexfrei) in #2010).
|
||||
* **[installer] Unify operator templates**: Merged separate operator templates into a single variant-based template supporting Talos and non-Talos deployments ([**@kvaps**](https://github.com/kvaps) in #2034).
|
||||
|
||||
### API and Platform
|
||||
|
||||
* **[api] Rename CozystackResourceDefinition to ApplicationDefinition**: Renamed CRD and all related types for clarity and consistency, with migration 24 handling the transition automatically ([**@kvaps**](https://github.com/kvaps) in #1864).
|
||||
* **[platform] Add DNS-1035 validation for Application names**: Added dynamic DNS-1035 label validation for Application names at creation time, preventing resources with invalid names that would fail downstream ([**@lexfrei**](https://github.com/lexfrei) in #1771).
|
||||
* **[platform] Make cluster issuer name and ACME solver configurable**: Added `publishing.certificates.solver` and `publishing.certificates.issuerName` parameters to allow pointing all ingress TLS annotations at any ClusterIssuer ([**@myasnikovdaniil**](https://github.com/myasnikovdaniil) in #2077).
|
||||
* **[platform] Add cilium-kilo networking variant**: Added integrated `cilium-kilo` networking variant combining Cilium CNI with Kilo WireGuard mesh overlay ([**@kvaps**](https://github.com/kvaps) in #2064).
|
||||
* **[cozystack-api] Switch from DaemonSet to Deployment**: Migrated cozystack-api to a Deployment with PreferClose topology spread constraints, reducing resource consumption while maintaining high availability ([**@kvaps**](https://github.com/kvaps) in #2041, #2048).
|
||||
|
||||
### Virtual Machines
|
||||
|
||||
* **[vm-instance] Complete migration from virtual-machine to vm-disk and vm-instance**: Fully migrated from `virtual-machine` to the new `vm-disk` and `vm-instance` architecture, with automatic migration script (migration 28) for existing VMs ([**@kvaps**](https://github.com/kvaps) in #2040).
|
||||
* **[kubevirt-csi-driver] Add RWX Filesystem (NFS) support**: Added Read-Write-Many filesystem support to kubevirt-csi-driver via automatic NFS server deployment per PVC ([**@kvaps**](https://github.com/kvaps) in #2042).
|
||||
* **[vm] Add cpuModel field to specify CPU model without instanceType**: Added cpuModel field to VirtualMachine API for granular CPU control ([**@sircthulhu**](https://github.com/sircthulhu) in #2007).
|
||||
* **[vm] Allow switching between instancetype and custom resources**: Implemented atomic upgrade hook for switching between instanceType-based and custom resource VM configurations ([**@sircthulhu**](https://github.com/sircthulhu) in #2008).
|
||||
* **[vm] Migrate to runStrategy instead of running**: Migrated VirtualMachine API from deprecated `running` field to `runStrategy` ([**@sircthulhu**](https://github.com/sircthulhu) in #2004).
|
||||
* **[vm] Always expose VMs with a service**: Virtual machines are now always exposed with at least a ClusterIP service, ensuring in-cluster DNS names ([**@lllamnyp**](https://github.com/lllamnyp) in #1738, #1751).
|
||||
* **[dashboard] VMInstance dropdowns for disks and instanceType**: VM instance creation form now renders API-backed dropdowns for `instanceType` and disk `name` fields ([**@sircthulhu**](https://github.com/sircthulhu) in #2071).
|
||||
|
||||
### Backup System
|
||||
|
||||
* **[backups] Implement comprehensive backup and restore functionality**: Core backup Plan controller, Velero strategy controller, RestoreJob resource with end-to-end restore workflows, and enhanced backup plans UI ([**@lllamnyp**](https://github.com/lllamnyp) in #1640, #1685, #1687, #1719, #1720, #1737, #1967; [**@androndo**](https://github.com/androndo) in #1762, #1967, #1968, #1811).
|
||||
* **[backups] Add kubevirt plugin to velero**: Added KubeVirt plugin to Velero for consistent VM state and data snapshots ([**@lllamnyp**](https://github.com/lllamnyp) in #2017).
|
||||
* **[backups] Install backupstrategy controller by default**: Enabled backupstrategy controller by default for automatic backup scheduling ([**@lllamnyp**](https://github.com/lllamnyp) in #2020).
|
||||
* **[backups] Better selectors for VM strategy**: Improved VM backup strategy selectors for accurate and reliable backup targeting ([**@lllamnyp**](https://github.com/lllamnyp) in #2023).
|
||||
* **[backups] Create RBAC for backup resources**: Added comprehensive RBAC configuration for backup operations and restore jobs ([**@lllamnyp**](https://github.com/lllamnyp) in #2018).
|
||||
|
||||
### Networking
|
||||
|
||||
* **[kilo] Introduce Kilo WireGuard mesh networking**: Added Kilo as a system package providing secure WireGuard-based VPN mesh for connecting Kubernetes nodes across different networks and regions ([**@kvaps**](https://github.com/kvaps) in #1691).
|
||||
* **[kilo] Add Cilium compatibility variant**: Added `cilium` variant enabling Cilium-aware IPIP encapsulation for full network policy enforcement with Kilo mesh ([**@kvaps**](https://github.com/kvaps) in #2055).
|
||||
* **[kilo] Update to v0.8.0 with configurable MTU**: Updated Kilo to v0.8.0 with configurable MTU parameter and performance improvements ([**@kvaps**](https://github.com/kvaps) in #2003, #2049, #2053).
|
||||
* **[local-ccm] Add local-ccm package**: Added local cloud controller manager for managing load balancer services in bare-metal environments ([**@kvaps**](https://github.com/kvaps) in #1831).
|
||||
* **[local-ccm] Add node-lifecycle-controller component**: Added optional node-lifecycle-controller that automatically deletes unreachable NotReady nodes, solving the "zombie" node problem in autoscaled clusters ([**@IvanHunters**](https://github.com/IvanHunters) in #1992).
|
||||
* **[tenant] Allow egress to parent ingress pods**: Updated tenant network policies to allow egress traffic to parent cluster ingress pods ([**@lexfrei**](https://github.com/lexfrei) in #1765, #1776).
|
||||
|
||||
### New Applications
|
||||
|
||||
* **[mongodb] Add MongoDB managed application**: Added MongoDB as a fully managed database with replica sets, persistent storage, and unified user/database configuration ([**@lexfrei**](https://github.com/lexfrei) in #1822; [**@kvaps**](https://github.com/kvaps) in #1923).
|
||||
* **[qdrant] Add Qdrant vector database**: Added Qdrant as a high-performance vector database for AI/ML workloads with API key authentication and optional LoadBalancer access ([**@lexfrei**](https://github.com/lexfrei) in #1987).
|
||||
* **[harbor] Add managed Harbor container registry**: Added Harbor v2.14.2 as a managed tenant-level container registry with CloudNativePG, Redis operator, COSI BucketClaim storage, and Trivy scanner ([**@lexfrei**](https://github.com/lexfrei) in #2058).
|
||||
* **[nats] Add monitoring**: Added Grafana dashboards for NATS JetStream and server metrics, Prometheus monitoring with TLS support ([**@klinch0**](https://github.com/klinch0) in #1381).
|
||||
* **[mariadb] Rename mysql application to mariadb**: Renamed MySQL application to MariaDB with automatic migration (migration 27) for all existing resources ([**@kvaps**](https://github.com/kvaps) in #2026).
|
||||
* **[ferretdb] Remove FerretDB application**: Removed FerretDB, superseded by native MongoDB support ([**@kvaps**](https://github.com/kvaps) in #2028).
|
||||
|
||||
### Kubernetes and System Components
|
||||
|
||||
* **[kubernetes] Update supported Kubernetes versions to v1.30–v1.35**: Updated the tenant Kubernetes version matrix, with v1.35 as the new default. Kamaji updated to edge-26.2.4 and CAPI Kamaji provider to v0.16.0 ([**@lexfrei**](https://github.com/lexfrei) in #2073).
|
||||
* **[kubernetes] Auto-enable Gateway API support in cert-manager**: Added automatic Gateway API support in cert-manager for tenant clusters ([**@kvaps**](https://github.com/kvaps) in #1997).
|
||||
* **[kubernetes] Use ingress-nginx nodeport service**: Changed tenant Kubernetes clusters to use ingress-nginx NodePort service for improved compatibility ([**@sircthulhu**](https://github.com/sircthulhu) in #1948).
|
||||
* **[system] Add cluster-autoscaler for Hetzner and Azure**: Added cluster-autoscaler system package for automatically scaling management cluster nodes on Hetzner and Azure ([**@kvaps**](https://github.com/kvaps) in #1964).
|
||||
* **[cluster-autoscaler] Enable enforce-node-group-min-size by default**: Ensures node groups are always scaled up to their configured minimum size ([**@kvaps**](https://github.com/kvaps) in #2050).
|
||||
* **[system] Add clustersecret-operator package**: Added clustersecret-operator for managing secrets across multiple namespaces ([**@sircthulhu**](https://github.com/sircthulhu) in #2025).
|
||||
|
||||
### Monitoring
|
||||
|
||||
* **[monitoring] Enable monitoring for core components**: Enhanced monitoring capabilities with dashboards and metrics for core Cozystack components ([**@IvanHunters**](https://github.com/IvanHunters) in #1937).
|
||||
* **[monitoring] Add SLACK_SEVERITY_FILTER and VMAgent for tenant monitoring**: Added SLACK_SEVERITY_FILTER for Slack alert filtering and VMAgent for tenant namespace metrics scraping ([**@IvanHunters**](https://github.com/IvanHunters) in #1712).
|
||||
* **[monitoring-agents] Fix FQDN resolution for tenant workload clusters**: Fixed monitoring agents in tenant clusters to use full DNS names with cluster domain suffix ([**@IvanHunters**](https://github.com/IvanHunters) in #2075; [**@kvaps**](https://github.com/kvaps) in #2086).
|
||||
|
||||
### Storage
|
||||
|
||||
* **[linstor] Move CRDs to dedicated piraeus-operator-crds chart**: Moved LINSTOR CRDs to a dedicated chart, ensuring reliable installation of all CRDs including `linstorsatellites.io` ([**@kvaps**](https://github.com/kvaps) in #2036; [**@IvanHunters**](https://github.com/IvanHunters) in #1991).
|
||||
* **[seaweedfs] Increase certificate duration to 10 years**: Increased SeaweedFS certificate validity to 10 years to reduce rotation overhead ([**@IvanHunters**](https://github.com/IvanHunters) in #1986).
|
||||
|
||||
## Improvements
|
||||
|
||||
* **[dashboard] Upgrade dashboard to version 1.4.0**: Updated Cozystack dashboard to v1.4.0 with new features and improvements ([**@sircthulhu**](https://github.com/sircthulhu) in #2051).
|
||||
* **[dashboard] Hide Ingresses/Services/Secrets tabs when no selectors defined**: Tabs are now conditionally shown based on whether the ApplicationDefinition has resource selectors configured, reducing UI clutter ([**@kvaps**](https://github.com/kvaps) in #2087).
|
||||
* **[dashboard] Add startupProbe to prevent container restarts on slow hardware**: Added startup probe to dashboard pods to prevent unnecessary restarts ([**@kvaps**](https://github.com/kvaps) in #1996).
|
||||
* **[keycloak] Allow custom Ingress hostname via values**: Added `ingress.host` field to cozy-keycloak chart values for overriding the default `keycloak.<root-host>` hostname ([**@sircthulhu**](https://github.com/sircthulhu) in #2101).
|
||||
* **[branding] Separate values for Keycloak**: Separated Keycloak branding values for better customization capabilities ([**@nbykov0**](https://github.com/nbykov0) in #1947).
|
||||
* **[rbac] Use hierarchical naming scheme**: Refactored RBAC to use hierarchical naming for cluster roles and role bindings ([**@lllamnyp**](https://github.com/lllamnyp) in #2019).
|
||||
* **[tenant,rbac] Use shared clusterroles**: Refactored tenant RBAC to use shared ClusterRoles for improved consistency ([**@lllamnyp**](https://github.com/lllamnyp) in #1999).
|
||||
* **[kubernetes] Increase default apiServer resourcesPreset to large**: Increased kube-apiserver resource preset to `large` for more reliable operation under higher workloads ([**@kvaps**](https://github.com/kvaps) in #1875).
|
||||
* **[kubernetes] Increase kube-apiserver startup probe threshold**: Increased startup probe threshold to allow more time for API server readiness ([**@kvaps**](https://github.com/kvaps) in #1876).
|
||||
* **[etcd] Increase probe thresholds for better recovery**: Increased etcd probe thresholds to improve cluster resilience during temporary slowdowns ([**@kvaps**](https://github.com/kvaps) in #1874).
|
||||
* **[etcd-operator] Add vertical-pod-autoscaler dependency**: Added VPA as a dependency to etcd-operator for proper resource scaling ([**@sircthulhu**](https://github.com/sircthulhu) in #2047).
|
||||
* **[cilium] Change cilium-operator replicas to 1**: Reduced Cilium operator replicas to decrease resource consumption in smaller deployments ([**@IvanHunters**](https://github.com/IvanHunters) in #1784).
|
||||
* **[keycloak-configure,dashboard] Enable insecure TLS verification by default**: Made SSL certificate verification configurable with insecure mode enabled by default for local development ([**@IvanHunters**](https://github.com/IvanHunters) in #2005).
|
||||
* **[platform] Split telemetry between operator and controller**: Separated telemetry collection for better metrics isolation ([**@kvaps**](https://github.com/kvaps) in #1869).
|
||||
* **[system] Add resource requests and limits to etcd-defrag**: Added resource requests and limits to etcd-defrag job to prevent resource contention ([**@matthieu-robin**](https://github.com/matthieu-robin) in #1785, #1786).
|
||||
|
||||
## Fixes
|
||||
|
||||
* **[dashboard] Fix sidebar visibility on cluster-level pages**: Fixed broken URLs with double `//` on cluster-level pages by hiding namespace-scoped sidebar items when no tenant is selected ([**@sircthulhu**](https://github.com/sircthulhu) in #2106).
|
||||
* **[platform] Fix upgrade issues in migrations, etcd timeout, and migration script**: Fixed multiple upgrade failures discovered during v0.41.1 → v1.0 upgrade testing, including migration 26-29 fixes, RFC3339 format for annotations, and extended etcd HelmRelease timeout to 30m ([**@kvaps**](https://github.com/kvaps) in #2096).
|
||||
* **[platform] Fix orphaned -rd HelmReleases after application renames**: Migrations 28-29 updated to remove orphaned `-rd` HelmReleases in `cozy-system` after `ferretdb→mongodb`, `mysql→mariadb`, and `virtual-machine→vm-disk+vm-instance` renames, with migration 33 as a safety net ([**@kvaps**](https://github.com/kvaps) in #2102).
|
||||
* **[platform] Adopt tenant-root into cozystack-basics during migration**: Added migration 31 to adopt existing `tenant-root` Namespace and HelmRelease into `cozystack-basics` for a safe v0.41.x → v1.0 upgrade path ([**@kvaps**](https://github.com/kvaps) in #2065).
|
||||
* **[platform] Preserve tenant-root HelmRelease during migration**: Fixed data-loss risk during migration where `tenant-root` HelmRelease could be deleted ([**@sircthulhu**](https://github.com/sircthulhu) in #2063).
|
||||
* **[platform] Fix cozystack-values secret race condition**: Fixed race condition in cozystack-values secret creation that could cause initialization failures ([**@lllamnyp**](https://github.com/lllamnyp) in #2024).
|
||||
* **[cozystack-basics] Preserve existing HelmRelease values during reconciliations**: Fixed data-loss bug where changes to `tenant-root` HelmRelease were dropped on the next reconciliation ([**@sircthulhu**](https://github.com/sircthulhu) in #2068).
|
||||
* **[cozystack-basics] Deny resourcequotas deletion for tenant admin**: Fixed `cozy:tenant:admin:base` ClusterRole to explicitly deny deletion of ResourceQuota objects ([**@myasnikovdaniil**](https://github.com/myasnikovdaniil) in #2076).
|
||||
* **[dashboard] Fix legacy templating and cluster identifier in sidebar links**: Standardized cluster identifier across dashboard menu links resolving broken link targets for Backups and External IPs ([**@androndo**](https://github.com/androndo) in #2093).
|
||||
* **[dashboard] Fix backupjobs creation form and sidebar backup category identifier**: Fixed backup job creation form fields and fixed sidebar backup category identifier ([**@androndo**](https://github.com/androndo) in #2103).
|
||||
* **[kubevirt] Update KubeVirt to v1.6.4 and CDI to v1.64.0, fix VM pod initialization**: Updated KubeVirt and CDI and disabled serial console logging globally to fix the `guest-console-log` init container blocking virt-launcher pods ([**@nbykov0**](https://github.com/nbykov0) in #1833; [**@kvaps**](https://github.com/kvaps)).
|
||||
* **[linstor] Fix DRBD+LUKS+STORAGE resource creation failure**: Applied upstream fix for all newly created encrypted volumes failing due to missing `setExists(true)` call in `LuksLayer` ([**@kvaps**](https://github.com/kvaps) in #2072).
|
||||
* **[platform] Clean up Helm secrets for removed releases**: Added cleanup logic to migration 23 to remove orphaned Helm secrets from removed `-rd` releases ([**@kvaps**](https://github.com/kvaps) in #2035).
|
||||
* **[monitoring] Fix YAML parse error in vmagent template**: Fixed YAML parsing error in monitoring-agents vmagent template ([**@kvaps**](https://github.com/kvaps) in #2037).
|
||||
* **[monitoring] Remove cozystack-controller dependency**: Fixed monitoring package to remove unnecessary cozystack-controller dependency ([**@IvanHunters**](https://github.com/IvanHunters) in #1990).
|
||||
* **[monitoring] Remove duplicate dashboards.list**: Fixed duplicate dashboards.list configuration in extra/monitoring package ([**@IvanHunters**](https://github.com/IvanHunters) in #2016).
|
||||
* **[linstor] Update piraeus-server patches with critical fixes**: Backported critical patches fixing edge cases in device management and DRBD resource handling ([**@kvaps**](https://github.com/kvaps) in #1850).
|
||||
* **[apiserver] Fix Watch resourceVersion and bookmark handling**: Fixed Watch API handling of resourceVersion and bookmarks for proper event streaming ([**@kvaps**](https://github.com/kvaps) in #1860).
|
||||
* **[bootbox] Auto-create bootbox-application as dependency**: Fixed bootbox package to automatically create required bootbox-application dependency ([**@kvaps**](https://github.com/kvaps) in #1974).
|
||||
* **[postgres-operator] Correct PromQL syntax in CNPGClusterOffline alert**: Fixed incorrect PromQL syntax in the CNPGClusterOffline Prometheus alert ([**@mattia-eleuteri**](https://github.com/mattia-eleuteri) in #1981).
|
||||
* **[coredns] Fix serviceaccount to match kubernetes bootstrap RBAC**: Fixed CoreDNS service account to correctly match Kubernetes bootstrap RBAC requirements ([**@mattia-eleuteri**](https://github.com/mattia-eleuteri) in #1958).
|
||||
* **[dashboard] Verify JWT token**: Added JWT token verification to dashboard for improved security ([**@lllamnyp**](https://github.com/lllamnyp) in #1980).
|
||||
* **[codegen] Fix missing gen_client in update-codegen.sh**: Fixed build error in `pkg/generated/applyconfiguration/utils.go` by including `gen_client` in the codegen script ([**@lexfrei**](https://github.com/lexfrei) in #2061).
|
||||
* **[kubevirt-operator] Fix typo in VMNotRunningFor10Minutes alert**: Fixed typo in VM alert name ensuring proper alert triggering ([**@lexfrei**](https://github.com/lexfrei) in #1770, #1775).
|
||||
|
||||
## Security
|
||||
|
||||
* **[dashboard] Verify JWT token**: Added JWT token verification to the dashboard for improved authentication security ([**@lllamnyp**](https://github.com/lllamnyp) in #1980).
|
||||
|
||||
## Dependencies
|
||||
|
||||
* **[cilium] Update to v1.18.6**: Updated Cilium CNI to v1.18.6 with security fixes and performance improvements ([**@sircthulhu**](https://github.com/sircthulhu) in #1868).
|
||||
* **[kube-ovn] Update to v1.15.3**: Updated Kube-OVN CNI to v1.15.3 with performance improvements and bug fixes ([**@kvaps**](https://github.com/kvaps) in #2022).
|
||||
* **[kilo] Update to v0.8.0**: Updated Kilo WireGuard mesh to v0.8.0 with performance improvements and new compatibility features ([**@kvaps**](https://github.com/kvaps) in #2053).
|
||||
* **Update Talos Linux to v1.12.1**: Updated Talos Linux to v1.12.1 with latest features and security patches ([**@kvaps**](https://github.com/kvaps) in #1877).
|
||||
|
||||
## System Configuration
|
||||
|
||||
* **[vpc] Migrate subnets definition from map to array format**: Migrated VPC subnets from `map[string]Subnet` to `[]Subnet` with explicit `name` field, with automatic migration via migration 30 ([**@kvaps**](https://github.com/kvaps) in #2052).
|
||||
* **[migrations] Add migrations 23-33 for v1.0 upgrade path**: Added 11 incremental migrations handling CRD ownership, resource renaming, secret cleanup, Helm adoption, and configuration conversion for the v0.41.x → v1.0.0 upgrade path ([**@kvaps**](https://github.com/kvaps) in #1975, #2035, #2036, #2040, #2026, #2065, #2052, #2102).
|
||||
* **[tenant] Run cleanup job from system namespace**: Moved tenant cleanup job to system namespace for improved security and resource isolation ([**@lllamnyp**](https://github.com/lllamnyp) in #1774, #1777).
|
||||
|
||||
## Development, Testing, and CI/CD
|
||||
|
||||
* **[ci] Use GitHub Copilot CLI for changelog generation**: Automated changelog generation using GitHub Copilot CLI ([**@androndo**](https://github.com/androndo) in #1753).
|
||||
* **[ci] Choose runner conditional on label**: Added conditional runner selection in CI based on PR labels ([**@lllamnyp**](https://github.com/lllamnyp) in #1998).
|
||||
* **[e2e] Use helm install instead of kubectl apply for cozystack installation**: Replaced static YAML apply flow with direct `helm upgrade --install` of the installer chart in E2E tests ([**@lexfrei**](https://github.com/lexfrei) in #2060).
|
||||
* **[e2e] Make kubernetes test retries effective by cleaning up stale resources**: Fixed E2E test retries by adding pre-creation cleanup and increasing deployment wait timeout to 300s ([**@lexfrei**](https://github.com/lexfrei) in #2062).
|
||||
* **[e2e] Increase HelmRelease readiness timeout for kubernetes test**: Increased HelmRelease readiness timeout to prevent false failures on slower hardware ([**@lexfrei**](https://github.com/lexfrei) in #2033).
|
||||
* **[ci] Improve cozyreport functionality**: Enhanced cozyreport tool with improved reporting for CI/CD pipelines ([**@lllamnyp**](https://github.com/lllamnyp) in #2032).
|
||||
* **feat(cozypkg): add cross-platform build targets with version injection**: Added cross-platform build targets for cozypkg/cozyhr tool for linux/amd64, linux/arm64, darwin/amd64, darwin/arm64 ([**@kvaps**](https://github.com/kvaps) in #1862).
|
||||
* **refactor: move scripts to hack directory**: Reorganized scripts to the standard `hack/` location ([**@kvaps**](https://github.com/kvaps) in #1863).
|
||||
* **Update CODEOWNERS**: Updated CODEOWNERS to include new maintainers ([**@lllamnyp**](https://github.com/lllamnyp) in #1972; [**@IvanHunters**](https://github.com/IvanHunters) in #2015).
|
||||
* **[talm] Skip config loading for completion subcommands**: Fixed talm CLI to skip config loading for shell completion commands ([**@kitsunoff**](https://github.com/kitsunoff) in cozystack/talm#109).
|
||||
* **[talm] Fix metadata.id type casting in physical_links_info**: Fixed Prometheus query to properly cast metadata.id to string for regexMatch operations ([**@kvaps**](https://github.com/kvaps) in cozystack/talm#110).
|
||||
|
||||
## Documentation
|
||||
|
||||
* **[website] Add documentation versioning**: Implemented comprehensive documentation versioning with separate v0 and v1 documentation trees and a version selector in the UI ([**@IvanStukov**](https://github.com/IvanStukov) in cozystack/website#415).
|
||||
* **[website] Describe upgrade to v1.0**: Added detailed upgrade instructions for migrating from v0.x to v1.0 ([**@nbykov0**](https://github.com/nbykov0) in cozystack/website@21bbe84).
|
||||
* **[website] Migrate ConfigMap references to Platform Package in v1 docs**: Updated entire v1 documentation to replace legacy ConfigMap-based configuration with the new Platform Package API ([**@sircthulhu**](https://github.com/sircthulhu) in cozystack/website#426).
|
||||
* **[website] Add generic Kubernetes deployment guide for v1**: Added installation guide for deploying Cozystack on any generic Kubernetes cluster ([**@lexfrei**](https://github.com/lexfrei) in cozystack/website#408).
|
||||
* **[website] Describe operator-based and HelmRelease-based package patterns**: Added development documentation explaining operator-based and HelmRelease-based package patterns ([**@kvaps**](https://github.com/kvaps) in cozystack/website#413).
|
||||
* **[website] Add Helm chart development principles guide**: Added developer guide documenting Cozystack's four core Helm chart principles ([**@kvaps**](https://github.com/kvaps) in cozystack/website#418).
|
||||
* **[website] Add network architecture overview**: Added comprehensive network architecture documentation covering the multi-layered networking stack with Mermaid diagrams ([**@IvanHunters**](https://github.com/IvanHunters) in cozystack/website#422).
|
||||
* **[website] Add LINSTOR disk preparation guide**: Added comprehensive documentation for preparing disks for LINSTOR storage ([**@IvanHunters**](https://github.com/IvanHunters) in cozystack/website#411).
|
||||
* **[website] Add Proxmox VM migration guide**: Added detailed guide for migrating virtual machines from Proxmox to Cozystack ([**@IvanHunters**](https://github.com/IvanHunters) in cozystack/website#410).
|
||||
* **[website] Add cluster autoscaler documentation**: Added documentation for Hetzner setup with Talos, vSwitch, and Kilo mesh integration ([**@kvaps**](https://github.com/kvaps) in #1964).
|
||||
* **[website] Improve Azure autoscaling troubleshooting guide**: Enhanced Azure autoscaling documentation with serial console instructions and `az vmss update --custom-data` guidance ([**@kvaps**](https://github.com/kvaps) in cozystack/website#424).
|
||||
* **[website] Update multi-location documentation for cilium-kilo variant**: Updated multi-location networking docs to reflect the integrated `cilium-kilo` variant selection ([**@kvaps**](https://github.com/kvaps) in cozystack/website@02d63f0).
|
||||
* **[website] Update documentation to use jsonpatch for service exposure**: Improved `kubectl patch` commands to use JSON Patch `add` operations ([**@sircthulhu**](https://github.com/sircthulhu) in cozystack/website#427).
|
||||
* **[website] Update certificates section in Platform Package documentation**: Updated certificate configuration docs to reflect new `solver` and `issuerName` fields ([**@myasnikovdaniil**](https://github.com/myasnikovdaniil) in cozystack/website#429).
|
||||
* **[website] Add tenant Kubernetes cluster log querying guide**: Added documentation for querying logs from tenant clusters in Grafana using VictoriaLogs labels ([**@IvanHunters**](https://github.com/IvanHunters) in cozystack/website#430).
|
||||
* **[website] Replace non-idempotent commands with idempotent alternatives**: Updated `helm install` to `helm upgrade --install` and `kubectl create` to `kubectl apply` across all installation guides ([**@lexfrei**](https://github.com/lexfrei) in cozystack/website#431).
|
||||
* **[website] Fix broken documentation links with .md suffix**: Fixed incorrect internal links across virtualization guides for v0 and v1 documentation ([**@cheese**](https://github.com/cheese) in cozystack/website#432).
|
||||
* **[website] Refactor resource planning documentation**: Improved resource planning guide with clearer structure and more comprehensive coverage ([**@IvanStukov**](https://github.com/IvanStukov) in cozystack/website#423).
|
||||
* **[website] Add ServiceAccount API access documentation and update FAQ**: Added documentation for ServiceAccount API access token configuration and updated FAQ ([**@IvanStukov**](https://github.com/IvanStukov) in cozystack/website#421).
|
||||
* **[website] Update networking-mesh allowed-location-ips example**: Replaced provider-specific CLI with standard `kubectl` commands in multi-location networking guide ([**@kvaps**](https://github.com/kvaps) in cozystack/website#425).
|
||||
* **[website] docs(storage): simplify NFS driver setup instructions**: Simplified NFS driver setup documentation ([**@kvaps**](https://github.com/kvaps) in cozystack/website#399).
|
||||
* **[website] Add Hetzner RobotLB documentation**: Added documentation for configuring public IP with Hetzner RobotLB ([**@kvaps**](https://github.com/kvaps) in cozystack/website#394).
|
||||
* **[website] Add documentation for creating and managing cloned VMs**: Added comprehensive guide for VM cloning operations ([**@sircthulhu**](https://github.com/sircthulhu) in cozystack/website#401).
|
||||
* **[website] Update Talos installation docs for Hetzner and Servers.com**: Updated installation documentation for Hetzner and Servers.com environments ([**@kvaps**](https://github.com/kvaps) in cozystack/website#395).
|
||||
* **[website] Add Hidora organization support details**: Added Hidora to the support page ([**@matthieu-robin**](https://github.com/matthieu-robin) in cozystack/website#397, cozystack/website#398).
|
||||
* **[website] Check quotas before an upgrade**: Added troubleshooting documentation for checking resource quotas before upgrades ([**@nbykov0**](https://github.com/nbykov0) in cozystack/website#405).
|
||||
* **[website] Update support documentation**: Updated support documentation with current contact information ([**@xrmtech-isk**](https://github.com/xrmtech-isk) in cozystack/website#420).
|
||||
* **[website] Correct typo in kubeconfig reference in Kubernetes installation guide**: Fixed documentation typo in kubeconfig reference ([**@shkarface**](https://github.com/shkarface) in cozystack/website#414).
|
||||
|
||||
## Breaking Changes & Upgrade Notes
|
||||
|
||||
* **[api] CozystackResourceDefinition renamed to ApplicationDefinition**: The `CozystackResourceDefinition` CRD has been renamed to `ApplicationDefinition`. Migration 24 handles the transition automatically during upgrade ([**@kvaps**](https://github.com/kvaps) in #1864).
|
||||
|
||||
* **[platform] Certificate issuer configuration parameters renamed**: The `publishing.certificates.issuerType` field is renamed to `publishing.certificates.solver`, and the value `cloudflare` is renamed to `dns01`. A new `publishing.certificates.issuerName` field (default: `letsencrypt-prod`) is added. Migration 32 automatically converts existing configurations — no manual action required ([**@myasnikovdaniil**](https://github.com/myasnikovdaniil) in #2077).
|
||||
|
||||
* **[vpc] VPC subnets definition migrated from map to array format**: VPC subnets are now defined as `[]Subnet` with an explicit `name` field instead of `map[string]Subnet`. Migration 30 handles the conversion automatically ([**@kvaps**](https://github.com/kvaps) in #2052).
|
||||
|
||||
* **[vm] virtual-machine application replaced by vm-disk and vm-instance**: The legacy `virtual-machine` application has been fully replaced. Migration 28 automatically converts existing VMs to the new architecture ([**@kvaps**](https://github.com/kvaps) in #2040).
|
||||
|
||||
* **[mysql] mysql application renamed to mariadb**: Existing MySQL deployments are automatically renamed to MariaDB via migration 27 ([**@kvaps**](https://github.com/kvaps) in #2026).
|
||||
|
||||
### Upgrade Guide
|
||||
|
||||
To upgrade from v0.41.x to v1.0.0:
|
||||
1. **Backup your cluster** before upgrading.
|
||||
2. Run the provided migration script: `hack/migrate-to-version-1.0.sh`.
|
||||
3. The 33 incremental migration steps will automatically handle all resource renaming, configuration conversion, CRD adoption, and secret cleanup.
|
||||
4. Refer to the [upgrade documentation](https://cozystack.io/docs/v1/upgrade) for detailed instructions and troubleshooting.
|
||||
|
||||
## Contributors
|
||||
|
||||
We'd like to thank all contributors who made this release possible:
|
||||
|
||||
* [**@androndo**](https://github.com/androndo)
|
||||
* [**@cheese**](https://github.com/cheese)
|
||||
* [**@IvanHunters**](https://github.com/IvanHunters)
|
||||
* [**@IvanStukov**](https://github.com/IvanStukov)
|
||||
* [**@kitsunoff**](https://github.com/kitsunoff)
|
||||
* [**@klinch0**](https://github.com/klinch0)
|
||||
* [**@kvaps**](https://github.com/kvaps)
|
||||
* [**@lexfrei**](https://github.com/lexfrei)
|
||||
* [**@lllamnyp**](https://github.com/lllamnyp)
|
||||
* [**@matthieu-robin**](https://github.com/matthieu-robin)
|
||||
* [**@mattia-eleuteri**](https://github.com/mattia-eleuteri)
|
||||
* [**@myasnikovdaniil**](https://github.com/myasnikovdaniil)
|
||||
* [**@nbykov0**](https://github.com/nbykov0)
|
||||
* [**@shkarface**](https://github.com/shkarface)
|
||||
* [**@sircthulhu**](https://github.com/sircthulhu)
|
||||
* [**@xrmtech-isk**](https://github.com/xrmtech-isk)
|
||||
|
||||
### New Contributors
|
||||
|
||||
We're excited to welcome our first-time contributors:
|
||||
|
||||
* [**@cheese**](https://github.com/cheese) - First contribution!
|
||||
* [**@IvanStukov**](https://github.com/IvanStukov) - First contribution!
|
||||
* [**@kitsunoff**](https://github.com/kitsunoff) - First contribution!
|
||||
* [**@shkarface**](https://github.com/shkarface) - First contribution!
|
||||
* [**@xrmtech-isk**](https://github.com/xrmtech-isk) - First contribution!
|
||||
|
||||
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.41.0...v1.0.0
|
||||
@@ -1,21 +0,0 @@
|
||||
<!--
|
||||
https://github.com/cozystack/cozystack/releases/tag/v1.0.1
|
||||
-->
|
||||
|
||||
## Fixes
|
||||
|
||||
* **[platform] Prevent cozystack-version ConfigMap from deletion**: Added resource protection to prevent the `cozystack-version` ConfigMap from being accidentally deleted, improving platform stability and reliability ([**@myasnikovdaniil**](https://github.com/myasnikovdaniil) in #2112, #2114).
|
||||
|
||||
* **[installer] Add keep annotation to Namespace and update migration script**: Added `helm.sh/resource-policy: keep` annotation to the `cozy-system` Namespace in the installer Helm chart to prevent Helm from deleting the namespace (and all HelmReleases within it) when the installer release is removed. The v1.0 migration script is also updated to annotate the `cozy-system` namespace and `cozystack-version` ConfigMap with this policy before migration ([**@kvaps**](https://github.com/kvaps) in #2122, #2123).
|
||||
|
||||
* **[dashboard] Add FlowSchema to exempt BFF from API throttling**: Added a `cozy-dashboard-exempt` FlowSchema to exempt the dashboard Back-End-for-Frontend (BFF) service account from Kubernetes API Priority and Fairness throttling. Previously, the BFF fell under the `workload-low` priority level, causing 429 (Too Many Requests) errors under load, resulting in dashboard unresponsiveness ([**@kvaps**](https://github.com/kvaps) in #2121, #2124).
|
||||
|
||||
## Documentation
|
||||
|
||||
* **[website] Replace bundles documentation with variants**: Renamed the "Bundles" documentation section to "Variants" to match current Cozystack terminology. Removed deprecated variants (`iaas-full`, `distro-full`, `distro-hosted`) and added new variants: `default` (PackageSources only, for manual package management via cozypkg) and `isp-full-generic` (full PaaS/IaaS on k3s, kubeadm, or RKE2). Updated all cross-references throughout the documentation ([**@kvaps**](https://github.com/kvaps) in cozystack/website#433).
|
||||
|
||||
* **[website] Add step to protect namespace before upgrading**: Updated the cluster upgrade guide and v0.41→v1.0 migration guide with a required step to annotate the `cozy-system` namespace and `cozystack-version` ConfigMap with `helm.sh/resource-policy=keep` before running `helm upgrade`, preventing accidental namespace deletion ([**@kvaps**](https://github.com/kvaps) in cozystack/website#435).
|
||||
|
||||
---
|
||||
|
||||
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v1.0.0...v1.0.1
|
||||
@@ -1,19 +0,0 @@
|
||||
<!--
|
||||
https://github.com/cozystack/cozystack/releases/tag/v1.0.2
|
||||
-->
|
||||
|
||||
## Fixes
|
||||
|
||||
* **[platform] Suspend cozy-proxy if it conflicts with installer release during migration**: Added a check in the v0.41→v1.0 migration script to detect and automatically suspend the `cozy-proxy` HelmRelease when its `releaseName` is set to `cozystack`, which conflicts with the installer release and would cause `cozystack-operator` deletion during the upgrade ([**@kvaps**](https://github.com/kvaps) in #2128, #2130).
|
||||
|
||||
* **[platform] Fix off-by-one error in run-migrations script**: Fixed a bug in the migration runner where the first required migration was always skipped due to an off-by-one error in the migration range calculation, ensuring all upgrade steps execute correctly ([**@myasnikovdaniil**](https://github.com/myasnikovdaniil) in #2126, #2132).
|
||||
|
||||
* **[system] Fix Keycloak proxy configuration for v26.x**: Replaced the deprecated `KC_PROXY=edge` environment variable with `KC_PROXY_HEADERS=xforwarded` and `KC_HTTP_ENABLED=true` in the Keycloak StatefulSet template. `KC_PROXY` was removed in Keycloak 26.x, previously causing "Non-secure context detected" warnings and broken cookie handling when running behind a reverse proxy with TLS termination ([**@sircthulhu**](https://github.com/sircthulhu) in #2125, #2134).
|
||||
|
||||
* **[dashboard] Allow clearing instanceType field and preserve newlines in secret copy**: Added `allowEmpty: true` to the `instanceType` field in the VMInstance form so users can explicitly clear it to use custom KubeVirt resources without a named instance type. Also fixed newline preservation when copying secrets with CMD+C ([**@sircthulhu**](https://github.com/sircthulhu) in #2135, #2137).
|
||||
|
||||
* **[dashboard] Restore stock-instance sidebars for namespace-level pages**: Restored `stock-instance-api-form`, `stock-instance-api-table`, `stock-instance-builtin-form`, and `stock-instance-builtin-table` sidebar resources that were inadvertently removed in #2106. Without these sidebars, namespace-level pages such as Backup Plans rendered as empty pages with no interactive content ([**@sircthulhu**](https://github.com/sircthulhu) in #2136, #2138).
|
||||
|
||||
---
|
||||
|
||||
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v1.0.1...v1.0.2
|
||||
@@ -10,11 +10,7 @@ PATTERN=${2:-*}
|
||||
LINE='----------------------------------------------------------------'
|
||||
|
||||
cols() { stty size 2>/dev/null | awk '{print $2}' || echo 80; }
|
||||
if [ -t 1 ]; then
|
||||
MAXW=$(( $(cols) - 12 )); [ "$MAXW" -lt 40 ] && MAXW=70
|
||||
else
|
||||
MAXW=0 # no truncation when not a tty (e.g. CI)
|
||||
fi
|
||||
MAXW=$(( $(cols) - 12 )); [ "$MAXW" -lt 40 ] && MAXW=70
|
||||
BEGIN=$(date +%s)
|
||||
timestamp() { s=$(( $(date +%s) - BEGIN )); printf '[%02d:%02d]' $((s/60)) $((s%60)); }
|
||||
|
||||
@@ -49,7 +45,7 @@ run_one() {
|
||||
*) out=$line ;;
|
||||
esac
|
||||
now=$(( $(date +%s) - START ))
|
||||
[ "$MAXW" -gt 0 ] && [ ${#out} -gt "$MAXW" ] && out="$(printf '%.*s…' "$MAXW" "$out")"
|
||||
[ ${#out} -gt "$MAXW" ] && out="$(printf '%.*s…' "$MAXW" "$out")"
|
||||
printf '┊[%02d:%02d] %s\n' $((now/60)) $((now%60)) "$out"
|
||||
done
|
||||
|
||||
|
||||
@@ -1,59 +0,0 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
@test "Create OpenBAO (standalone)" {
|
||||
name='test'
|
||||
kubectl apply -f- <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: OpenBAO
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
replicas: 1
|
||||
size: 10Gi
|
||||
storageClass: ""
|
||||
resourcesPreset: "small"
|
||||
resources: {}
|
||||
external: false
|
||||
ui: true
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr openbao-$name --timeout=60s --for=condition=ready
|
||||
kubectl -n tenant-test wait hr openbao-$name-system --timeout=120s --for=condition=ready
|
||||
|
||||
# Wait for container to be started (pod Running does not guarantee container is ready for exec on slow CI)
|
||||
if ! timeout 120 sh -ec "until kubectl -n tenant-test get pod openbao-$name-0 --output jsonpath='{.status.containerStatuses[0].started}' 2>/dev/null | grep -q true; do sleep 5; done"; then
|
||||
echo "=== DEBUG: Container did not start in time ===" >&2
|
||||
kubectl -n tenant-test describe pod openbao-$name-0 >&2 || true
|
||||
kubectl -n tenant-test logs openbao-$name-0 --previous >&2 || true
|
||||
kubectl -n tenant-test logs openbao-$name-0 >&2 || true
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Wait for OpenBAO API to accept connections
|
||||
# bao status exit codes: 0 = unsealed, 1 = error/not ready, 2 = sealed but responsive
|
||||
if ! timeout 60 sh -ec "until kubectl -n tenant-test exec openbao-$name-0 -- bao status >/dev/null 2>&1; rc=\$?; test \$rc -eq 0 -o \$rc -eq 2; do sleep 3; done"; then
|
||||
echo "=== DEBUG: OpenBAO API did not become responsive ===" >&2
|
||||
kubectl -n tenant-test describe pod openbao-$name-0 >&2 || true
|
||||
kubectl -n tenant-test logs openbao-$name-0 --previous >&2 || true
|
||||
kubectl -n tenant-test logs openbao-$name-0 >&2 || true
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Initialize OpenBAO (single key share for testing simplicity)
|
||||
init_output=$(kubectl -n tenant-test exec openbao-$name-0 -- bao operator init -key-shares=1 -key-threshold=1 -format=json)
|
||||
unseal_key=$(echo "$init_output" | jq -r '.unseal_keys_b64[0]')
|
||||
if [ -z "$unseal_key" ] || [ "$unseal_key" = "null" ]; then
|
||||
echo "Failed to extract unseal key. Init output: $init_output" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Unseal OpenBAO
|
||||
kubectl -n tenant-test exec openbao-$name-0 -- bao operator unseal "$unseal_key"
|
||||
|
||||
# Now wait for pod to become ready (readiness probe checks seal status)
|
||||
kubectl -n tenant-test wait sts openbao-$name --timeout=90s --for=jsonpath='{.status.readyReplicas}'=1
|
||||
kubectl -n tenant-test wait pvc data-openbao-$name-0 --timeout=50s --for=jsonpath='{.status.phase}'=Bound
|
||||
kubectl -n tenant-test delete openbao.apps.cozystack.io $name
|
||||
kubectl -n tenant-test delete pvc data-openbao-$name-0 --ignore-not-found
|
||||
}
|
||||
@@ -102,19 +102,15 @@ EOF
|
||||
done
|
||||
'
|
||||
# Verify the nodes are ready
|
||||
if ! kubectl --kubeconfig "tenantkubeconfig-${test_name}" wait node --all --timeout=2m --for=condition=Ready; then
|
||||
# Additional debug messages
|
||||
kubectl --kubeconfig "tenantkubeconfig-${test_name}" describe nodes
|
||||
kubectl -n tenant-test get hr
|
||||
fi
|
||||
kubectl --kubeconfig "tenantkubeconfig-${test_name}" wait node --all --timeout=2m --for=condition=Ready
|
||||
kubectl --kubeconfig "tenantkubeconfig-${test_name}" get nodes -o wide
|
||||
|
||||
# Verify the kubelet version matches what we expect
|
||||
versions=$(kubectl --kubeconfig "tenantkubeconfig-${test_name}" \
|
||||
get nodes -o jsonpath='{.items[*].status.nodeInfo.kubeletVersion}')
|
||||
|
||||
|
||||
node_ok=true
|
||||
|
||||
|
||||
for v in $versions; do
|
||||
case "$v" in
|
||||
"${k8s_version}" | "${k8s_version}".* | "${k8s_version}"-*)
|
||||
@@ -197,7 +193,7 @@ EOF
|
||||
|
||||
# Wait for pods readiness
|
||||
kubectl wait deployment --kubeconfig "tenantkubeconfig-${test_name}" "${test_name}-backend" -n tenant-test --for=condition=Available --timeout=300s
|
||||
|
||||
|
||||
# Wait for LoadBalancer to be provisioned (IP or hostname)
|
||||
timeout 90 sh -ec "
|
||||
until kubectl get svc ${test_name}-backend --kubeconfig tenantkubeconfig-${test_name} -n tenant-test \
|
||||
|
||||
@@ -32,54 +32,6 @@ if ! kubectl get namespace "$NAMESPACE" &> /dev/null; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Step 0: Annotate critical resources to prevent Helm from deleting them
|
||||
echo "Step 0: Protect critical resources from Helm deletion"
|
||||
echo ""
|
||||
echo "The following resources will be annotated with helm.sh/resource-policy=keep"
|
||||
echo "to prevent Helm from deleting them when the installer release is removed:"
|
||||
echo " - Namespace: $NAMESPACE"
|
||||
echo " - ConfigMap: $NAMESPACE/cozystack-version"
|
||||
echo ""
|
||||
read -p "Do you want to annotate these resources? (y/N) " -n 1 -r
|
||||
echo ""
|
||||
|
||||
if [[ $REPLY =~ ^[Yy]$ ]]; then
|
||||
echo "Annotating namespace $NAMESPACE..."
|
||||
kubectl annotate namespace "$NAMESPACE" helm.sh/resource-policy=keep --overwrite
|
||||
echo "Annotating ConfigMap cozystack-version..."
|
||||
kubectl annotate configmap -n "$NAMESPACE" cozystack-version helm.sh/resource-policy=keep --overwrite 2>/dev/null || echo " ConfigMap cozystack-version not found, skipping."
|
||||
echo ""
|
||||
echo "Resources annotated successfully."
|
||||
else
|
||||
echo "WARNING: Skipping annotation. If you remove the Helm installer release,"
|
||||
echo "the namespace and its contents may be deleted!"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Step 1: Check for cozy-proxy HelmRelease with conflicting releaseName
|
||||
# In v0.41.x, cozy-proxy was incorrectly configured with releaseName "cozystack",
|
||||
# which conflicts with the installer helm release name. If not suspended, cozy-proxy
|
||||
# HelmRelease will overwrite the installer release and delete cozystack-operator.
|
||||
COZY_PROXY_RELEASE_NAME=$(kubectl get hr -n "$NAMESPACE" cozy-proxy -o jsonpath='{.spec.releaseName}' 2>/dev/null || true)
|
||||
if [ "$COZY_PROXY_RELEASE_NAME" = "cozystack" ]; then
|
||||
echo "WARNING: HelmRelease cozy-proxy has releaseName 'cozystack', which conflicts"
|
||||
echo "with the installer release. It must be suspended before proceeding, otherwise"
|
||||
echo "it will overwrite the installer and delete cozystack-operator."
|
||||
echo ""
|
||||
read -p "Suspend HelmRelease cozy-proxy? (y/N) " -n 1 -r
|
||||
echo ""
|
||||
if [[ $REPLY =~ ^[Yy]$ ]]; then
|
||||
kubectl -n "$NAMESPACE" patch hr cozy-proxy --type=merge --field-manager=flux-client-side-apply -p '{"spec":{"suspend":true}}'
|
||||
echo "HelmRelease cozy-proxy suspended."
|
||||
else
|
||||
echo "ERROR: Cannot proceed with conflicting cozy-proxy HelmRelease active."
|
||||
echo "Please suspend it manually:"
|
||||
echo " kubectl -n $NAMESPACE patch hr cozy-proxy --type=merge -p '{\"spec\":{\"suspend\":true}}'"
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Read ConfigMap cozystack
|
||||
echo "Reading ConfigMap cozystack..."
|
||||
COZYSTACK_CM=$(kubectl get configmap -n "$NAMESPACE" cozystack -o json 2>/dev/null || echo "{}")
|
||||
@@ -100,10 +52,6 @@ OIDC_ENABLED=$(echo "$COZYSTACK_CM" | jq -r '.data["oidc-enabled"] // "false"')
|
||||
KEYCLOAK_REDIRECTS=$(echo "$COZYSTACK_CM" | jq -r '.data["extra-keycloak-redirect-uri-for-dashboard"] // ""' )
|
||||
TELEMETRY_ENABLED=$(echo "$COZYSTACK_CM" | jq -r '.data["telemetry-enabled"] // "true"')
|
||||
BUNDLE_NAME=$(echo "$COZYSTACK_CM" | jq -r '.data["bundle-name"] // "paas-full"')
|
||||
BUNDLE_DISABLE=$(echo "$COZYSTACK_CM" | jq -r '.data["bundle-disable"] // ""')
|
||||
BUNDLE_ENABLE=$(echo "$COZYSTACK_CM" | jq -r '.data["bundle-enable"] // ""')
|
||||
EXPOSE_INGRESS=$(echo "$COZYSTACK_CM" | jq -r '.data["expose-ingress"] // "tenant-root"')
|
||||
EXPOSE_SERVICES=$(echo "$COZYSTACK_CM" | jq -r '.data["expose-services"] // ""')
|
||||
|
||||
# Certificate issuer configuration (old undocumented field: clusterissuer)
|
||||
OLD_CLUSTER_ISSUER=$(echo "$COZYSTACK_CM" | jq -r '.data["clusterissuer"] // ""')
|
||||
@@ -151,31 +99,28 @@ else
|
||||
EXTERNAL_IPS=$(echo "$EXTERNAL_IPS" | sed 's/,/\n/g' | awk 'BEGIN{print}{print " - "$0}')
|
||||
fi
|
||||
|
||||
# Convert comma-separated lists to YAML arrays
|
||||
if [ -z "$BUNDLE_DISABLE" ]; then
|
||||
DISABLED_PACKAGES="[]"
|
||||
else
|
||||
DISABLED_PACKAGES=$(echo "$BUNDLE_DISABLE" | sed 's/,/\n/g' | awk 'BEGIN{print}{print " - cozystack."$0}')
|
||||
fi
|
||||
|
||||
if [ -z "$BUNDLE_ENABLE" ]; then
|
||||
ENABLED_PACKAGES="[]"
|
||||
else
|
||||
ENABLED_PACKAGES=$(echo "$BUNDLE_ENABLE" | sed 's/,/\n/g' | awk 'BEGIN{print}{print " - cozystack."$0}')
|
||||
fi
|
||||
|
||||
if [ -z "$EXPOSE_SERVICES" ]; then
|
||||
EXPOSED_SERVICES_YAML="[]"
|
||||
else
|
||||
EXPOSED_SERVICES_YAML=$(echo "$EXPOSE_SERVICES" | sed 's/,/\n/g' | awk 'BEGIN{print}{print " - "$0}')
|
||||
fi
|
||||
# Determine bundle type
|
||||
case "$BUNDLE_NAME" in
|
||||
paas-full|distro-full)
|
||||
SYSTEM_ENABLED="true"
|
||||
SYSTEM_TYPE="full"
|
||||
;;
|
||||
paas-hosted|distro-hosted)
|
||||
SYSTEM_ENABLED="false"
|
||||
SYSTEM_TYPE="hosted"
|
||||
;;
|
||||
*)
|
||||
SYSTEM_ENABLED="false"
|
||||
SYSTEM_TYPE="hosted"
|
||||
;;
|
||||
esac
|
||||
|
||||
# Update bundle naming
|
||||
BUNDLE_NAME=$(echo "$BUNDLE_NAME" | sed 's/paas/isp/')
|
||||
|
||||
# Extract branding if available
|
||||
BRANDING=$(echo "$BRANDING_CM" | jq -r '.data // {} | to_entries[] | "\(.key): \"\(.value)\""')
|
||||
if [ -z "$BRANDING" ]; then
|
||||
if [ -z "$BRANDING" ]; then
|
||||
BRANDING="{}"
|
||||
else
|
||||
BRANDING=$(echo "$BRANDING" | awk 'BEGIN{print}{print " " $0}')
|
||||
@@ -196,6 +141,8 @@ echo " Root Host: $ROOT_HOST"
|
||||
echo " API Server Endpoint: $API_SERVER_ENDPOINT"
|
||||
echo " OIDC Enabled: $OIDC_ENABLED"
|
||||
echo " Bundle Name: $BUNDLE_NAME"
|
||||
echo " System Enabled: $SYSTEM_ENABLED"
|
||||
echo " System Type: $SYSTEM_TYPE"
|
||||
echo " Certificate Solver: ${SOLVER:-http01 (default)}"
|
||||
echo " Issuer Name: ${ISSUER_NAME:-letsencrypt-prod (default)}"
|
||||
echo ""
|
||||
@@ -213,8 +160,15 @@ spec:
|
||||
platform:
|
||||
values:
|
||||
bundles:
|
||||
disabledPackages: $DISABLED_PACKAGES
|
||||
enabledPackages: $ENABLED_PACKAGES
|
||||
system:
|
||||
enabled: $SYSTEM_ENABLED
|
||||
type: "$SYSTEM_TYPE"
|
||||
iaas:
|
||||
enabled: true
|
||||
paas:
|
||||
enabled: true
|
||||
naas:
|
||||
enabled: true
|
||||
networking:
|
||||
clusterDomain: "$CLUSTER_DOMAIN"
|
||||
podCIDR: "$POD_CIDR"
|
||||
@@ -223,8 +177,6 @@ spec:
|
||||
joinCIDR: "$JOIN_CIDR"
|
||||
publishing:
|
||||
host: "$ROOT_HOST"
|
||||
ingressName: "$EXPOSE_INGRESS"
|
||||
exposedServices: $EXPOSED_SERVICES_YAML
|
||||
apiServerEndpoint: "$API_SERVER_ENDPOINT"
|
||||
externalIPs: $EXTERNAL_IPS
|
||||
${CERTIFICATES_SECTION}
|
||||
|
||||
@@ -156,7 +156,7 @@ menuItems = append(menuItems, map[string]any{
|
||||
map[string]any{
|
||||
"key": "{plural}",
|
||||
"label": "{ResourceLabel}",
|
||||
"link": "/openapi-ui/{cluster}/{namespace}/api-table/{group}/{version}/{plural}",
|
||||
"link": "/openapi-ui/{clusterName}/{namespace}/api-table/{group}/{version}/{plural}",
|
||||
},
|
||||
},
|
||||
}),
|
||||
@@ -174,7 +174,7 @@ menuItems = append(menuItems, map[string]any{
|
||||
|
||||
**Important Notes**:
|
||||
- The sidebar tag (`{lowercase-kind}-sidebar`) must match what the Factory uses
|
||||
- The link format: `/openapi-ui/{cluster}/{namespace}/api-table/{group}/{version}/{plural}`
|
||||
- The link format: `/openapi-ui/{clusterName}/{namespace}/api-table/{group}/{version}/{plural}`
|
||||
- All sidebars share the same `keysAndTags` and `menuItems`, so changes affect all sidebar instances
|
||||
|
||||
### Step 4: Verify Integration
|
||||
|
||||
@@ -195,7 +195,6 @@ func applyListInputOverrides(schema map[string]any, kind string, openAPIProps ma
|
||||
"valueUri": "/api/clusters/{cluster}/k8s/apis/instancetype.kubevirt.io/v1beta1/virtualmachineclusterinstancetypes",
|
||||
"keysToValue": []any{"metadata", "name"},
|
||||
"keysToLabel": []any{"metadata", "name"},
|
||||
"allowEmpty": true,
|
||||
},
|
||||
}
|
||||
if prop, _ := openAPIProps["instanceType"].(map[string]any); prop != nil {
|
||||
@@ -215,34 +214,6 @@ func applyListInputOverrides(schema map[string]any, kind string, openAPIProps ma
|
||||
"keysToLabel": []any{"metadata", "name"},
|
||||
},
|
||||
}
|
||||
|
||||
case "ClickHouse", "Harbor", "HTTPCache", "Kubernetes", "MariaDB", "MongoDB",
|
||||
"NATS", "OpenBAO", "Postgres", "Qdrant", "RabbitMQ", "Redis", "VMDisk":
|
||||
specProps := ensureSchemaPath(schema, "spec")
|
||||
specProps["storageClass"] = storageClassListInput()
|
||||
|
||||
case "FoundationDB":
|
||||
storageProps := ensureSchemaPath(schema, "spec", "storage")
|
||||
storageProps["storageClass"] = storageClassListInput()
|
||||
|
||||
case "Kafka":
|
||||
kafkaProps := ensureSchemaPath(schema, "spec", "kafka")
|
||||
kafkaProps["storageClass"] = storageClassListInput()
|
||||
zkProps := ensureSchemaPath(schema, "spec", "zookeeper")
|
||||
zkProps["storageClass"] = storageClassListInput()
|
||||
}
|
||||
}
|
||||
|
||||
// storageClassListInput returns a listInput field config for a storageClass dropdown
|
||||
// backed by the cluster's available StorageClasses.
|
||||
func storageClassListInput() map[string]any {
|
||||
return map[string]any{
|
||||
"type": "listInput",
|
||||
"customProps": map[string]any{
|
||||
"valueUri": "/api/clusters/{cluster}/k8s/apis/storage.k8s.io/v1/storageclasses",
|
||||
"keysToValue": []any{"metadata", "name"},
|
||||
"keysToLabel": []any{"metadata", "name"},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -202,10 +202,6 @@ func TestApplyListInputOverrides_VMInstance(t *testing.T) {
|
||||
t.Errorf("expected valueUri %s, got %v", expectedURI, customProps["valueUri"])
|
||||
}
|
||||
|
||||
if customProps["allowEmpty"] != true {
|
||||
t.Errorf("expected allowEmpty true, got %v", customProps["allowEmpty"])
|
||||
}
|
||||
|
||||
// Check disks[].name is a listInput
|
||||
disks, ok := specProps["disks"].(map[string]any)
|
||||
if !ok {
|
||||
@@ -236,72 +232,6 @@ func TestApplyListInputOverrides_VMInstance(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyListInputOverrides_StorageClassSimple(t *testing.T) {
|
||||
for _, kind := range []string{
|
||||
"ClickHouse", "Harbor", "HTTPCache", "Kubernetes", "MariaDB", "MongoDB",
|
||||
"NATS", "OpenBAO", "Postgres", "Qdrant", "RabbitMQ", "Redis", "VMDisk",
|
||||
} {
|
||||
t.Run(kind, func(t *testing.T) {
|
||||
schema := map[string]any{}
|
||||
applyListInputOverrides(schema, kind, map[string]any{})
|
||||
|
||||
specProps := schema["properties"].(map[string]any)["spec"].(map[string]any)["properties"].(map[string]any)
|
||||
sc, ok := specProps["storageClass"].(map[string]any)
|
||||
if !ok {
|
||||
t.Fatalf("storageClass not found in spec.properties for kind %s", kind)
|
||||
}
|
||||
assertStorageClassListInput(t, sc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyListInputOverrides_StorageClassFoundationDB(t *testing.T) {
|
||||
schema := map[string]any{}
|
||||
applyListInputOverrides(schema, "FoundationDB", map[string]any{})
|
||||
|
||||
storageProps := schema["properties"].(map[string]any)["spec"].(map[string]any)["properties"].(map[string]any)["storage"].(map[string]any)["properties"].(map[string]any)
|
||||
sc, ok := storageProps["storageClass"].(map[string]any)
|
||||
if !ok {
|
||||
t.Fatal("storageClass not found in spec.storage.properties")
|
||||
}
|
||||
assertStorageClassListInput(t, sc)
|
||||
}
|
||||
|
||||
func TestApplyListInputOverrides_StorageClassKafka(t *testing.T) {
|
||||
schema := map[string]any{}
|
||||
applyListInputOverrides(schema, "Kafka", map[string]any{})
|
||||
|
||||
specProps := schema["properties"].(map[string]any)["spec"].(map[string]any)["properties"].(map[string]any)
|
||||
|
||||
kafkaSC, ok := specProps["kafka"].(map[string]any)["properties"].(map[string]any)["storageClass"].(map[string]any)
|
||||
if !ok {
|
||||
t.Fatal("storageClass not found in spec.kafka.properties")
|
||||
}
|
||||
assertStorageClassListInput(t, kafkaSC)
|
||||
|
||||
zkSC, ok := specProps["zookeeper"].(map[string]any)["properties"].(map[string]any)["storageClass"].(map[string]any)
|
||||
if !ok {
|
||||
t.Fatal("storageClass not found in spec.zookeeper.properties")
|
||||
}
|
||||
assertStorageClassListInput(t, zkSC)
|
||||
}
|
||||
|
||||
// assertStorageClassListInput verifies that a field is a correctly configured storageClass listInput.
|
||||
func assertStorageClassListInput(t *testing.T, field map[string]any) {
|
||||
t.Helper()
|
||||
if field["type"] != "listInput" {
|
||||
t.Errorf("expected type listInput, got %v", field["type"])
|
||||
}
|
||||
customProps, ok := field["customProps"].(map[string]any)
|
||||
if !ok {
|
||||
t.Fatal("customProps not found")
|
||||
}
|
||||
expectedURI := "/api/clusters/{cluster}/k8s/apis/storage.k8s.io/v1/storageclasses"
|
||||
if customProps["valueUri"] != expectedURI {
|
||||
t.Errorf("expected valueUri %s, got %v", expectedURI, customProps["valueUri"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyListInputOverrides_UnknownKind(t *testing.T) {
|
||||
schema := map[string]any{}
|
||||
applyListInputOverrides(schema, "SomeOtherKind", map[string]any{})
|
||||
|
||||
@@ -582,14 +582,15 @@ type factoryFlags struct {
|
||||
Secrets bool
|
||||
}
|
||||
|
||||
// factoryFeatureFlags determines which tabs to show based on whether the
|
||||
// ApplicationDefinition has non-empty Include resource selectors.
|
||||
// Workloads tab is always shown.
|
||||
// factoryFeatureFlags tries several conventional locations so you can evolve the API
|
||||
// without breaking the controller. Defaults are false (hidden).
|
||||
func factoryFeatureFlags(crd *cozyv1alpha1.ApplicationDefinition) factoryFlags {
|
||||
return factoryFlags{
|
||||
Workloads: true,
|
||||
Ingresses: len(crd.Spec.Ingresses.Include) > 0,
|
||||
Services: len(crd.Spec.Services.Include) > 0,
|
||||
Secrets: len(crd.Spec.Secrets.Include) > 0,
|
||||
}
|
||||
var f factoryFlags
|
||||
|
||||
f.Workloads = true
|
||||
f.Ingresses = true
|
||||
f.Services = true
|
||||
f.Secrets = true
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
@@ -299,6 +299,10 @@ func (m *Manager) buildExpectedResourceSet(crds []cozyv1alpha1.ApplicationDefini
|
||||
|
||||
// Add other stock sidebars that are created for each CRD
|
||||
stockSidebars := []string{
|
||||
"stock-instance-api-form",
|
||||
"stock-instance-api-table",
|
||||
"stock-instance-builtin-form",
|
||||
"stock-instance-builtin-table",
|
||||
"stock-project-factory-marketplace",
|
||||
"stock-project-factory-workloadmonitor-details",
|
||||
"stock-project-api-form",
|
||||
@@ -307,10 +311,6 @@ func (m *Manager) buildExpectedResourceSet(crds []cozyv1alpha1.ApplicationDefini
|
||||
"stock-project-builtin-table",
|
||||
"stock-project-crd-form",
|
||||
"stock-project-crd-table",
|
||||
"stock-instance-api-form",
|
||||
"stock-instance-api-table",
|
||||
"stock-instance-builtin-form",
|
||||
"stock-instance-builtin-table",
|
||||
}
|
||||
for _, sidebarID := range stockSidebars {
|
||||
expected["Sidebar"][sidebarID] = true
|
||||
|
||||
@@ -17,7 +17,8 @@ import (
|
||||
|
||||
// ensureSidebar creates/updates multiple Sidebar resources that share the same menu:
|
||||
// - The "details" sidebar tied to the current kind (stock-project-factory-<kind>-details)
|
||||
// - The stock-project sidebars: api-form, api-table, builtin-form, builtin-table, crd-form, crd-table
|
||||
// - The stock-instance sidebars: api-form, api-table, builtin-form, builtin-table
|
||||
// - The stock-project sidebars: api-form, api-table, builtin-form, builtin-table, crd-form, crd-table
|
||||
//
|
||||
// Menu rules:
|
||||
// - The first section is "Marketplace" with two hardcoded entries:
|
||||
@@ -175,23 +176,23 @@ func (m *Manager) ensureSidebar(ctx context.Context, crd *cozyv1alpha1.Applicati
|
||||
|
||||
// Add hardcoded Backups section
|
||||
menuItems = append(menuItems, map[string]any{
|
||||
"key": "backups-category",
|
||||
"key": "backups",
|
||||
"label": "Backups",
|
||||
"children": []any{
|
||||
map[string]any{
|
||||
"key": "plans",
|
||||
"label": "Plans",
|
||||
"link": "/openapi-ui/{cluster}/{namespace}/api-table/backups.cozystack.io/v1alpha1/plans",
|
||||
"link": "/openapi-ui/{clusterName}/{namespace}/api-table/backups.cozystack.io/v1alpha1/plans",
|
||||
},
|
||||
map[string]any{
|
||||
"key": "backupjobs",
|
||||
"label": "BackupJobs",
|
||||
"link": "/openapi-ui/{cluster}/{namespace}/api-table/backups.cozystack.io/v1alpha1/backupjobs",
|
||||
"link": "/openapi-ui/{clusterName}/{namespace}/api-table/backups.cozystack.io/v1alpha1/backupjobs",
|
||||
},
|
||||
map[string]any{
|
||||
"key": "backups",
|
||||
"label": "Backups",
|
||||
"link": "/openapi-ui/{cluster}/{namespace}/api-table/backups.cozystack.io/v1alpha1/backups",
|
||||
"link": "/openapi-ui/{clusterName}/{namespace}/api-table/backups.cozystack.io/v1alpha1/backups",
|
||||
},
|
||||
},
|
||||
})
|
||||
@@ -214,7 +215,7 @@ func (m *Manager) ensureSidebar(ctx context.Context, crd *cozyv1alpha1.Applicati
|
||||
map[string]any{
|
||||
"key": "loadbalancer-services",
|
||||
"label": "External IPs",
|
||||
"link": "/openapi-ui/{cluster}/{namespace}/factory/external-ips",
|
||||
"link": "/openapi-ui/{clusterName}/{namespace}/factory/external-ips",
|
||||
},
|
||||
map[string]any{
|
||||
"key": "tenants",
|
||||
@@ -227,7 +228,13 @@ func (m *Manager) ensureSidebar(ctx context.Context, crd *cozyv1alpha1.Applicati
|
||||
// 6) Prepare the list of Sidebar IDs to upsert with the SAME content
|
||||
// Create sidebars for ALL CRDs with dashboard config
|
||||
targetIDs := []string{
|
||||
// stock-project sidebars (namespace-level, full menu)
|
||||
// stock-instance sidebars
|
||||
"stock-instance-api-form",
|
||||
"stock-instance-api-table",
|
||||
"stock-instance-builtin-form",
|
||||
"stock-instance-builtin-table",
|
||||
|
||||
// stock-project sidebars
|
||||
"stock-project-factory-marketplace",
|
||||
"stock-project-factory-workloadmonitor-details",
|
||||
"stock-project-factory-kube-service-details",
|
||||
@@ -243,11 +250,6 @@ func (m *Manager) ensureSidebar(ctx context.Context, crd *cozyv1alpha1.Applicati
|
||||
"stock-project-builtin-table",
|
||||
"stock-project-crd-form",
|
||||
"stock-project-crd-table",
|
||||
// stock-instance sidebars (namespace-level pages after namespace is selected)
|
||||
"stock-instance-api-form",
|
||||
"stock-instance-api-table",
|
||||
"stock-instance-builtin-form",
|
||||
"stock-instance-builtin-table",
|
||||
}
|
||||
|
||||
// Add details sidebars for all CRDs with dashboard config
|
||||
|
||||
@@ -503,27 +503,18 @@ func CreateAllCustomFormsOverrides() []*dashboardv1alpha1.CustomFormsOverride {
|
||||
createFormItem("metadata.namespace", "Namespace", "text"),
|
||||
createFormItem("spec.applicationRef.kind", "Application Kind", "text"),
|
||||
createFormItem("spec.applicationRef.name", "Application Name", "text"),
|
||||
createFormItemWithAPI("spec.backupClassName", "Backup Class", "select", map[string]any{
|
||||
"api": map[string]any{
|
||||
"fetchUrl": "/api/clusters/{clusterName}/k8s/apis/backups.cozystack.io/v1alpha1/backupclasses",
|
||||
"pathToItems": []any{"items"},
|
||||
"pathToValue": []any{"metadata", "name"},
|
||||
"pathToLabel": []any{"metadata", "name"},
|
||||
"clusterNameVar": "clusterName",
|
||||
},
|
||||
}),
|
||||
createFormItem("spec.schedule.type", "Schedule Type", "text"),
|
||||
createFormItem("spec.schedule.cron", "Schedule Cron", "text"),
|
||||
},
|
||||
"schema": createSchema(map[string]any{
|
||||
"backupClassName": listInputScemaItemBackupClass(),
|
||||
}),
|
||||
}),
|
||||
|
||||
// BackupJobs form override - backups.cozystack.io/v1alpha1
|
||||
createCustomFormsOverride("default-/backups.cozystack.io/v1alpha1/backupjobs", map[string]any{
|
||||
"formItems": []any{
|
||||
createFormItem("metadata.name", "Name", "text"),
|
||||
createFormItem("metadata.namespace", "Namespace", "text"),
|
||||
createFormItem("spec.planRef.name", "Plan Name (optional)", "text"),
|
||||
createFormItem("spec.applicationRef.apiGroup", "Application API Group", "text"),
|
||||
createFormItem("spec.applicationRef.kind", "Application Kind", "text"),
|
||||
createFormItem("spec.applicationRef.name", "Application Name", "text"),
|
||||
},
|
||||
"schema": createSchema(map[string]any{
|
||||
"backupClassName": listInputScemaItemBackupClass(),
|
||||
}),
|
||||
}),
|
||||
}
|
||||
}
|
||||
@@ -2051,9 +2042,9 @@ func createCustomFormsOverride(customizationId string, spec map[string]any) *das
|
||||
"strategy": "merge",
|
||||
}
|
||||
|
||||
// Merge into newSpec caller-provided fields without: customizationId, hidden, strategy
|
||||
// Merge caller-provided fields (like formItems) into newSpec
|
||||
for key, value := range spec {
|
||||
if key != "customizationId" && key != "hidden" && key != "strategy" {
|
||||
if key != "customizationId" && key != "hidden" && key != "schema" && key != "strategy" {
|
||||
newSpec[key] = value
|
||||
}
|
||||
}
|
||||
@@ -2098,28 +2089,6 @@ func createNavigation(name string, spec map[string]any) *dashboardv1alpha1.Navig
|
||||
}
|
||||
}
|
||||
|
||||
func listInputScemaItemBackupClass() map[string]any {
|
||||
return map[string]any{
|
||||
"type": "listInput",
|
||||
"customProps": map[string]any{
|
||||
"valueUri": "/api/clusters/{cluster}/k8s/apis/backups.cozystack.io/v1alpha1/backupclasses",
|
||||
"keysToValue": []any{"metadata", "name"},
|
||||
"keysToLabel": []any{"metadata", "name"},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// backupClassSchema returns the schema for spec.backupClassName as listInput (BackupJob/Plan).
|
||||
func createSchema(customProps map[string]any) map[string]any {
|
||||
return map[string]any{
|
||||
"properties": map[string]any{
|
||||
"spec": map[string]any{
|
||||
"properties": customProps,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// createFormItem creates a form item for CustomFormsOverride
|
||||
func createFormItem(path, label, fieldType string) map[string]any {
|
||||
return map[string]any{
|
||||
|
||||
@@ -17,13 +17,3 @@ spec:
|
||||
bucketClaimName: {{ .Release.Name }}
|
||||
credentialsSecretName: {{ .Release.Name }}
|
||||
protocol: s3
|
||||
---
|
||||
apiVersion: objectstorage.k8s.io/v1alpha1
|
||||
kind: BucketAccess
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-readonly
|
||||
spec:
|
||||
bucketAccessClassName: {{ $seaweedfs }}-readonly
|
||||
bucketClaimName: {{ .Release.Name }}
|
||||
credentialsSecretName: {{ .Release.Name }}-readonly
|
||||
protocol: s3
|
||||
|
||||
@@ -10,7 +10,6 @@ rules:
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}
|
||||
- {{ .Release.Name }}-credentials
|
||||
- {{ .Release.Name }}-readonly
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.0.0@sha256:3753b735b0315bee90de54cb25cfebc63bd2cc90ad11ca4fdc0e70439abd5096
|
||||
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.0.0@sha256:7deeee117e7eec599cb453836ca95eadd131dfc8c875dc457ef29dc1433395e0
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.0.0@sha256:434aa3b8e2a3cbf6681426b174e1c4fde23bafd12a6cccd046b5cb1749092ec4
|
||||
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.0.0@sha256:604561e23df1b8eb25c24cf73fd93c7aaa6d1e7c56affbbda5c6f0f83424e4b1
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/ubuntu-container-disk:v1.35@sha256:39f626c802dd84f95720ffb54fcd80dfb8a58ac280498870d0a1aa30d4252f94
|
||||
ghcr.io/cozystack/cozystack/ubuntu-container-disk:v1.33@sha256:19ee4c76f0b3b7b40b97995ca78988ad8c82f6e9c75288d8b7b4b88a64f75d50
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
{{- $targetTenant := .Values._namespace.monitoring }}
|
||||
{{- $clusterDomain := (index .Values._cluster "cluster-domain") | default "cozy.local" }}
|
||||
{{- if .Values.addons.monitoringAgents.enabled }}
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
@@ -50,7 +49,7 @@ spec:
|
||||
cluster: {{ .Release.Name }}
|
||||
tenant: {{ .Release.Namespace }}
|
||||
remoteWrite:
|
||||
url: http://vminsert-shortterm.{{ $targetTenant }}.svc.{{ $clusterDomain }}:8480/insert/0/prometheus
|
||||
url: http://vminsert-shortterm.{{ $targetTenant }}.svc:8480/insert/0/prometheus
|
||||
fluent-bit:
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
@@ -73,7 +72,7 @@ spec:
|
||||
[OUTPUT]
|
||||
Name http
|
||||
Match kube.*
|
||||
Host vlogs-generic.{{ $targetTenant }}.svc.{{ $clusterDomain }}
|
||||
Host vlogs-generic.{{ $targetTenant }}.svc
|
||||
port 9428
|
||||
compress gzip
|
||||
uri /insert/jsonline?_stream_fields=stream,kubernetes_pod_name,kubernetes_container_name,kubernetes_namespace_name&_msg_field=log&_time_field=date
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
.helmignore
|
||||
/logos
|
||||
/Makefile
|
||||
@@ -1,7 +0,0 @@
|
||||
apiVersion: v2
|
||||
name: openbao
|
||||
description: Managed OpenBAO secrets management service
|
||||
icon: /logos/openbao.svg
|
||||
type: application
|
||||
version: 0.0.0 # Placeholder, the actual version will be automatically set during the build process
|
||||
appVersion: "2.5.0"
|
||||
@@ -1,5 +0,0 @@
|
||||
include ../../../hack/package.mk
|
||||
|
||||
generate:
|
||||
cozyvalues-gen -v values.yaml -s values.schema.json -r README.md
|
||||
../../../hack/update-crd.sh
|
||||
@@ -1,27 +0,0 @@
|
||||
# Managed OpenBAO Service
|
||||
|
||||
OpenBAO is an open-source secrets management solution forked from HashiCorp Vault.
|
||||
It provides identity-based secrets and encryption management for cloud infrastructure.
|
||||
|
||||
## Parameters
|
||||
|
||||
### Common parameters
|
||||
|
||||
| Name | Description | Type | Value |
|
||||
| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------- | ------- |
|
||||
| `replicas` | Number of OpenBAO replicas. HA with Raft is automatically enabled when replicas > 1. Switching between standalone (file storage) and HA (Raft storage) modes requires data migration. | `int` | `1` |
|
||||
| `resources` | Explicit CPU and memory configuration for each OpenBAO replica. When omitted, the preset defined in `resourcesPreset` is applied. | `object` | `{}` |
|
||||
| `resources.cpu` | CPU available to each replica. | `quantity` | `""` |
|
||||
| `resources.memory` | Memory (RAM) available to each replica. | `quantity` | `""` |
|
||||
| `resourcesPreset` | Default sizing preset used when `resources` is omitted. | `string` | `small` |
|
||||
| `size` | Persistent Volume Claim size for data storage. | `quantity` | `10Gi` |
|
||||
| `storageClass` | StorageClass used to store the data. | `string` | `""` |
|
||||
| `external` | Enable external access from outside the cluster. | `bool` | `false` |
|
||||
|
||||
|
||||
### Application-specific parameters
|
||||
|
||||
| Name | Description | Type | Value |
|
||||
| ---- | -------------------------- | ------ | ------ |
|
||||
| `ui` | Enable the OpenBAO web UI. | `bool` | `true` |
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
../../../library/cozy-lib
|
||||
@@ -1,11 +0,0 @@
|
||||
<svg width="144" height="144" viewBox="0 0 144 144" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<rect width="144" height="144" rx="24" fill="url(#paint0_linear)"/>
|
||||
<rect width="144" height="144" rx="24" fill="black" fill-opacity="0.3"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M72 30C53.222 30 38 45.222 38 64v8c-3.314 0-6 2.686-6 6v30c0 3.314 2.686 6 6 6h68c3.314 0 6-2.686 6-6V78c0-3.314-2.686-6-6-6v-8C106 45.222 90.778 30 72 30zm-8 42v-8c0-4.418 3.582-8 8-8s8 3.582 8 8v8H64zm26 0v-8c0-8.837-7.163-16-16-16s-16 7.163-16 16v8h-2v28h60V72H90zm-22 14a4 4 0 118 0 4 4 0 01-8 0zm4-8a8 8 0 100 16 8 8 0 000-16z" fill="white"/>
|
||||
<defs>
|
||||
<linearGradient id="paint0_linear" x1="10" y1="15.5" x2="144" y2="131.5" gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="#87d6be"/>
|
||||
<stop offset="1" stop-color="#79c0ab"/>
|
||||
</linearGradient>
|
||||
</defs>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 852 B |
@@ -1,49 +0,0 @@
|
||||
{{/*
|
||||
Copyright Broadcom, Inc. All Rights Reserved.
|
||||
SPDX-License-Identifier: APACHE-2.0
|
||||
*/}}
|
||||
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
|
||||
{{/*
|
||||
Return a resource request/limit object based on a given preset.
|
||||
These presets are for basic testing and not meant to be used in production
|
||||
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||
*/}}
|
||||
{{- define "resources.preset" -}}
|
||||
{{- $presets := dict
|
||||
"nano" (dict
|
||||
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"micro" (dict
|
||||
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"small" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"medium" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"large" (dict
|
||||
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"xlarge" (dict
|
||||
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"2xlarge" (dict
|
||||
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
}}
|
||||
{{- if hasKey $presets .type -}}
|
||||
{{- index $presets .type | toYaml -}}
|
||||
{{- else -}}
|
||||
{{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
@@ -1,31 +0,0 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}
|
||||
- {{ .Release.Name }}-internal
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups:
|
||||
- cozystack.io
|
||||
resources:
|
||||
- workloadmonitors
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
subjects:
|
||||
{{ include "cozy-lib.rbac.subjectsForTenantAndAccessLevel" (list "use" .Release.Namespace) }}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
@@ -1,99 +0,0 @@
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-system
|
||||
labels:
|
||||
sharding.fluxcd.io/key: tenants
|
||||
spec:
|
||||
chartRef:
|
||||
kind: ExternalArtifact
|
||||
name: cozystack-openbao-application-default-openbao-system
|
||||
namespace: cozy-system
|
||||
interval: 5m
|
||||
timeout: 10m
|
||||
install:
|
||||
remediation:
|
||||
retries: -1
|
||||
upgrade:
|
||||
force: true
|
||||
remediation:
|
||||
retries: -1
|
||||
valuesFrom:
|
||||
- kind: Secret
|
||||
name: cozystack-values
|
||||
values:
|
||||
openbao:
|
||||
fullnameOverride: {{ .Release.Name }}
|
||||
global:
|
||||
tlsDisable: true
|
||||
server:
|
||||
podManagementPolicy: Parallel
|
||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.resourcesPreset .Values.resources $) | nindent 10 }}
|
||||
dataStorage:
|
||||
enabled: true
|
||||
size: {{ .Values.size }}
|
||||
{{- with .Values.storageClass }}
|
||||
storageClass: {{ . }}
|
||||
{{- end }}
|
||||
{{- if gt (int .Values.replicas) 1 }}
|
||||
standalone:
|
||||
enabled: false
|
||||
ha:
|
||||
enabled: true
|
||||
replicas: {{ .Values.replicas }}
|
||||
raft:
|
||||
enabled: true
|
||||
setNodeId: true
|
||||
config: |
|
||||
ui = {{ .Values.ui }}
|
||||
|
||||
listener "tcp" {
|
||||
address = "[::]:8200"
|
||||
cluster_address = "[::]:8201"
|
||||
tls_disable = true
|
||||
}
|
||||
|
||||
storage "raft" {
|
||||
path = "/openbao/data"
|
||||
{{- range $i := until (int $.Values.replicas) }}
|
||||
retry_join {
|
||||
leader_api_addr = "http://{{ $.Release.Name }}-{{ $i }}.{{ $.Release.Name }}-internal:8200"
|
||||
}
|
||||
{{- end }}
|
||||
}
|
||||
|
||||
service_registration "kubernetes" {}
|
||||
{{- else }}
|
||||
standalone:
|
||||
enabled: true
|
||||
config: |
|
||||
ui = {{ .Values.ui }}
|
||||
|
||||
listener "tcp" {
|
||||
address = "[::]:8200"
|
||||
cluster_address = "[::]:8201"
|
||||
tls_disable = true
|
||||
}
|
||||
|
||||
storage "file" {
|
||||
path = "/openbao/data"
|
||||
}
|
||||
# Note: service_registration "kubernetes" {} is intentionally omitted
|
||||
# in standalone mode — it requires an HA-capable storage backend and
|
||||
# causes a fatal error with storage "file".
|
||||
ha:
|
||||
enabled: false
|
||||
{{- end }}
|
||||
{{- if .Values.external }}
|
||||
service:
|
||||
type: LoadBalancer
|
||||
{{- end }}
|
||||
ui:
|
||||
enabled: {{ .Values.ui }}
|
||||
{{- if .Values.external }}
|
||||
serviceType: LoadBalancer
|
||||
{{- end }}
|
||||
injector:
|
||||
enabled: false
|
||||
csi:
|
||||
enabled: false
|
||||
@@ -1,13 +0,0 @@
|
||||
---
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicas }}
|
||||
minReplicas: 1
|
||||
kind: openbao
|
||||
type: openbao
|
||||
selector:
|
||||
app.kubernetes.io/instance: {{ $.Release.Name }}-system
|
||||
version: {{ $.Chart.Version }}
|
||||
@@ -1,87 +0,0 @@
|
||||
{
|
||||
"title": "Chart Values",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"external": {
|
||||
"description": "Enable external access from outside the cluster.",
|
||||
"type": "boolean",
|
||||
"default": false
|
||||
},
|
||||
"replicas": {
|
||||
"description": "Number of OpenBAO replicas. HA with Raft is automatically enabled when replicas \u003e 1. Switching between standalone (file storage) and HA (Raft storage) modes requires data migration.",
|
||||
"type": "integer",
|
||||
"default": 1
|
||||
},
|
||||
"resources": {
|
||||
"description": "Explicit CPU and memory configuration for each OpenBAO replica. When omitted, the preset defined in `resourcesPreset` is applied.",
|
||||
"type": "object",
|
||||
"default": {},
|
||||
"properties": {
|
||||
"cpu": {
|
||||
"description": "CPU available to each replica.",
|
||||
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"x-kubernetes-int-or-string": true
|
||||
},
|
||||
"memory": {
|
||||
"description": "Memory (RAM) available to each replica.",
|
||||
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"x-kubernetes-int-or-string": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"description": "Default sizing preset used when `resources` is omitted.",
|
||||
"type": "string",
|
||||
"default": "small",
|
||||
"enum": [
|
||||
"nano",
|
||||
"micro",
|
||||
"small",
|
||||
"medium",
|
||||
"large",
|
||||
"xlarge",
|
||||
"2xlarge"
|
||||
]
|
||||
},
|
||||
"size": {
|
||||
"description": "Persistent Volume Claim size for data storage.",
|
||||
"default": "10Gi",
|
||||
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"x-kubernetes-int-or-string": true
|
||||
},
|
||||
"storageClass": {
|
||||
"description": "StorageClass used to store the data.",
|
||||
"type": "string",
|
||||
"default": ""
|
||||
},
|
||||
"ui": {
|
||||
"description": "Enable the OpenBAO web UI.",
|
||||
"type": "boolean",
|
||||
"default": true
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
##
|
||||
## @section Common parameters
|
||||
##
|
||||
|
||||
## @typedef {struct} Resources - Explicit CPU and memory configuration for each OpenBAO replica.
|
||||
## @field {quantity} [cpu] - CPU available to each replica.
|
||||
## @field {quantity} [memory] - Memory (RAM) available to each replica.
|
||||
|
||||
## @enum {string} ResourcesPreset - Default sizing preset.
|
||||
## @value nano
|
||||
## @value micro
|
||||
## @value small
|
||||
## @value medium
|
||||
## @value large
|
||||
## @value xlarge
|
||||
## @value 2xlarge
|
||||
|
||||
## @param {int} replicas - Number of OpenBAO replicas. HA with Raft is automatically enabled when replicas > 1. Switching between standalone (file storage) and HA (Raft storage) modes requires data migration.
|
||||
replicas: 1
|
||||
|
||||
## @param {Resources} [resources] - Explicit CPU and memory configuration for each OpenBAO replica. When omitted, the preset defined in `resourcesPreset` is applied.
|
||||
resources: {}
|
||||
|
||||
## @param {ResourcesPreset} resourcesPreset="small" - Default sizing preset used when `resources` is omitted.
|
||||
resourcesPreset: "small"
|
||||
|
||||
## @param {quantity} size - Persistent Volume Claim size for data storage.
|
||||
size: 10Gi
|
||||
|
||||
## @param {string} storageClass - StorageClass used to store the data.
|
||||
storageClass: ""
|
||||
|
||||
## @param {bool} external - Enable external access from outside the cluster.
|
||||
external: false
|
||||
|
||||
##
|
||||
## @section Application-specific parameters
|
||||
##
|
||||
|
||||
## @param {bool} ui - Enable the OpenBAO web UI.
|
||||
ui: true
|
||||
@@ -4,4 +4,4 @@ description: Managed RabbitMQ service
|
||||
icon: /logos/rabbitmq.svg
|
||||
type: application
|
||||
version: 0.0.0 # Placeholder, the actual version will be automatically set during the build process
|
||||
appVersion: "4.2.4"
|
||||
appVersion: "3.13.2"
|
||||
|
||||
@@ -3,7 +3,3 @@ include ../../../hack/package.mk
|
||||
generate:
|
||||
cozyvalues-gen -v values.yaml -s values.schema.json -r README.md
|
||||
../../../hack/update-crd.sh
|
||||
|
||||
update:
|
||||
hack/update-versions.sh
|
||||
make generate
|
||||
|
||||
@@ -23,7 +23,6 @@ The service utilizes official RabbitMQ operator. This ensures the reliability an
|
||||
| `size` | Persistent Volume Claim size available for application data. | `quantity` | `10Gi` |
|
||||
| `storageClass` | StorageClass used to store the data. | `string` | `""` |
|
||||
| `external` | Enable external access from outside the cluster. | `bool` | `false` |
|
||||
| `version` | RabbitMQ major.minor version to deploy | `string` | `v4.2` |
|
||||
|
||||
|
||||
### Application-specific parameters
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
"v4.2": "4.2.4"
|
||||
"v4.1": "4.1.8"
|
||||
"v4.0": "4.0.9"
|
||||
"v3.13": "3.13.7"
|
||||
@@ -1,129 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
RABBITMQ_DIR="$(cd "${SCRIPT_DIR}/.." && pwd)"
|
||||
VALUES_FILE="${RABBITMQ_DIR}/values.yaml"
|
||||
VERSIONS_FILE="${RABBITMQ_DIR}/files/versions.yaml"
|
||||
GITHUB_API_URL="https://api.github.com/repos/rabbitmq/rabbitmq-server/releases"
|
||||
|
||||
# Check if jq is installed
|
||||
if ! command -v jq &> /dev/null; then
|
||||
echo "Error: jq is not installed. Please install jq and try again." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Fetch releases from GitHub API
|
||||
echo "Fetching releases from GitHub API..."
|
||||
RELEASES_JSON=$(curl -sSL "${GITHUB_API_URL}?per_page=100")
|
||||
|
||||
if [ -z "$RELEASES_JSON" ]; then
|
||||
echo "Error: Could not fetch releases from GitHub API" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Extract stable release tags (format: v3.13.7, v4.0.3, etc.)
|
||||
# Filter out pre-releases and draft releases
|
||||
RELEASE_TAGS=$(echo "$RELEASES_JSON" | jq -r '.[] | select(.prerelease == false) | select(.draft == false) | .tag_name' | grep -E '^v[0-9]+\.[0-9]+\.[0-9]+$' | sort -V)
|
||||
|
||||
if [ -z "$RELEASE_TAGS" ]; then
|
||||
echo "Error: Could not find any stable release tags" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Found release tags: $(echo "$RELEASE_TAGS" | tr '\n' ' ')"
|
||||
|
||||
# Supported major.minor versions (newest first)
|
||||
# We support the last few minor releases of each active major
|
||||
SUPPORTED_MAJORS=("4.2" "4.1" "4.0" "3.13")
|
||||
|
||||
# Build versions map: major.minor -> latest patch version
|
||||
declare -A VERSION_MAP
|
||||
MAJOR_VERSIONS=()
|
||||
|
||||
for major_minor in "${SUPPORTED_MAJORS[@]}"; do
|
||||
# Find the latest patch version for this major.minor
|
||||
MATCHING=$(echo "$RELEASE_TAGS" | grep -E "^v${major_minor//./\\.}\.[0-9]+$" | tail -n1)
|
||||
|
||||
if [ -n "$MATCHING" ]; then
|
||||
# Strip the 'v' prefix for the value (Docker tag format is e.g. 3.13.7)
|
||||
TAG_VERSION="${MATCHING#v}"
|
||||
VERSION_MAP["v${major_minor}"]="${TAG_VERSION}"
|
||||
MAJOR_VERSIONS+=("v${major_minor}")
|
||||
echo "Found version: v${major_minor} -> ${TAG_VERSION}"
|
||||
else
|
||||
echo "Warning: No stable releases found for ${major_minor}, skipping..." >&2
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${#MAJOR_VERSIONS[@]} -eq 0 ]; then
|
||||
echo "Error: No matching versions found" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Major versions to add: ${MAJOR_VERSIONS[*]}"
|
||||
|
||||
# Create/update versions.yaml file
|
||||
echo "Updating $VERSIONS_FILE..."
|
||||
{
|
||||
for major_ver in "${MAJOR_VERSIONS[@]}"; do
|
||||
echo "\"${major_ver}\": \"${VERSION_MAP[$major_ver]}\""
|
||||
done
|
||||
} > "$VERSIONS_FILE"
|
||||
|
||||
echo "Successfully updated $VERSIONS_FILE"
|
||||
|
||||
# Update values.yaml - enum with major.minor versions only
|
||||
TEMP_FILE=$(mktemp)
|
||||
trap "rm -f $TEMP_FILE" EXIT
|
||||
|
||||
# Build new version section
|
||||
NEW_VERSION_SECTION="## @enum {string} Version"
|
||||
for major_ver in "${MAJOR_VERSIONS[@]}"; do
|
||||
NEW_VERSION_SECTION="${NEW_VERSION_SECTION}
|
||||
## @value $major_ver"
|
||||
done
|
||||
NEW_VERSION_SECTION="${NEW_VERSION_SECTION}
|
||||
|
||||
## @param {Version} version - RabbitMQ major.minor version to deploy
|
||||
version: ${MAJOR_VERSIONS[0]}"
|
||||
|
||||
# Check if version section already exists
|
||||
if grep -q "^## @enum {string} Version" "$VALUES_FILE"; then
|
||||
# Version section exists, update it using awk
|
||||
echo "Updating existing version section in $VALUES_FILE..."
|
||||
|
||||
awk -v new_section="$NEW_VERSION_SECTION" '
|
||||
/^## @enum {string} Version/ {
|
||||
in_section = 1
|
||||
print new_section
|
||||
next
|
||||
}
|
||||
in_section && /^version: / {
|
||||
in_section = 0
|
||||
next
|
||||
}
|
||||
in_section {
|
||||
next
|
||||
}
|
||||
{ print }
|
||||
' "$VALUES_FILE" > "$TEMP_FILE.tmp"
|
||||
mv "$TEMP_FILE.tmp" "$VALUES_FILE"
|
||||
else
|
||||
# Version section doesn't exist, insert it before Application-specific parameters section
|
||||
echo "Inserting new version section in $VALUES_FILE..."
|
||||
|
||||
awk -v new_section="$NEW_VERSION_SECTION" '
|
||||
/^## @section Application-specific parameters/ {
|
||||
print new_section
|
||||
print ""
|
||||
}
|
||||
{ print }
|
||||
' "$VALUES_FILE" > "$TEMP_FILE.tmp"
|
||||
mv "$TEMP_FILE.tmp" "$VALUES_FILE"
|
||||
fi
|
||||
|
||||
echo "Successfully updated $VALUES_FILE with major.minor versions: ${MAJOR_VERSIONS[*]}"
|
||||
@@ -1,8 +0,0 @@
|
||||
{{- define "rabbitmq.versionMap" }}
|
||||
{{- $versionMap := .Files.Get "files/versions.yaml" | fromYaml }}
|
||||
{{- if not (hasKey $versionMap .Values.version) }}
|
||||
{{- printf `RabbitMQ version %s is not supported, allowed versions are %s` $.Values.version (keys $versionMap) | fail }}
|
||||
{{- end }}
|
||||
{{- index $versionMap .Values.version }}
|
||||
{{- end }}
|
||||
|
||||
@@ -7,7 +7,6 @@ metadata:
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicas }}
|
||||
image: 'rabbitmq:{{ include "rabbitmq.versionMap" $ }}-management'
|
||||
{{- if .Values.external }}
|
||||
service:
|
||||
type: LoadBalancer
|
||||
|
||||
@@ -92,17 +92,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"version": {
|
||||
"description": "RabbitMQ major.minor version to deploy",
|
||||
"type": "string",
|
||||
"default": "v4.2",
|
||||
"enum": [
|
||||
"v4.2",
|
||||
"v4.1",
|
||||
"v4.0",
|
||||
"v3.13"
|
||||
]
|
||||
},
|
||||
"vhosts": {
|
||||
"description": "Virtual hosts configuration map.",
|
||||
"type": "object",
|
||||
|
||||
@@ -34,15 +34,6 @@ storageClass: ""
|
||||
external: false
|
||||
|
||||
##
|
||||
## @enum {string} Version
|
||||
## @value v4.2
|
||||
## @value v4.1
|
||||
## @value v4.0
|
||||
## @value v3.13
|
||||
|
||||
## @param {Version} version - RabbitMQ major.minor version to deploy
|
||||
version: v4.2
|
||||
|
||||
## @section Application-specific parameters
|
||||
##
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ spec:
|
||||
name: cozystack-etcd-application-default-etcd
|
||||
namespace: cozy-system
|
||||
interval: 5m
|
||||
timeout: 30m
|
||||
timeout: 10m
|
||||
install:
|
||||
remediation:
|
||||
retries: -1
|
||||
|
||||
@@ -10,8 +10,6 @@ metadata:
|
||||
labels:
|
||||
cozystack.io/system: "true"
|
||||
pod-security.kubernetes.io/enforce: privileged
|
||||
annotations:
|
||||
helm.sh/resource-policy: keep
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
cozystackOperator:
|
||||
# Deployment variant: talos, generic, hosted
|
||||
variant: talos
|
||||
image: ghcr.io/cozystack/cozystack/cozystack-operator:v1.0.0@sha256:9e5229764b6077809a1c16566881a524c33e8986e36597e6833f8857a7e6a335
|
||||
image: ghcr.io/cozystack/cozystack/cozystack-operator:v1.0.0-beta.6@sha256:c7490da9c1ccb51bff4dd5657ca6a33a29ac71ad9861dfa8c72fdfc8b5765b93
|
||||
platformSourceUrl: 'oci://ghcr.io/cozystack/cozystack/cozystack-packages'
|
||||
platformSourceRef: 'digest=sha256:ef3e4ba7d21572a61794d8be594805f063aa04f4a8c3753351fc89c7804d337e'
|
||||
platformSourceRef: 'digest=sha256:b29b87d1a2b80452ffd4db7516a102c30c55121552dcdb237055d4124d12c55d'
|
||||
# Generic variant configuration (only used when cozystackOperator.variant=generic)
|
||||
cozystack:
|
||||
# Kubernetes API server host (IP only, no protocol/port)
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
# Migration 26 --> 27
|
||||
# Migrate monitoring resources from extra/monitoring to system/monitoring
|
||||
# This migration re-labels resources so they become owned by monitoring-system HelmRelease
|
||||
# and deletes old helm release secrets so that helm does not diff old vs new chart manifests.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
@@ -36,39 +35,10 @@ relabel_resources() {
|
||||
done
|
||||
}
|
||||
|
||||
# Delete all helm release secrets for a given release name in a namespace.
|
||||
# Uses both label selector and name-pattern matching to ensure complete cleanup.
|
||||
delete_helm_secrets() {
|
||||
local ns="$1"
|
||||
local release="$2"
|
||||
|
||||
# Primary: delete by label selector
|
||||
kubectl delete secrets -n "$ns" -l "name=${release},owner=helm" --ignore-not-found
|
||||
|
||||
# Fallback: find and delete by name pattern (in case labels were modified)
|
||||
local remaining
|
||||
remaining=$(kubectl get secrets -n "$ns" -o name | { grep "^secret/sh\.helm\.release\.v1\.${release}\." || true; })
|
||||
if [ -n "$remaining" ]; then
|
||||
echo " Found secrets not matched by label selector, deleting by name..."
|
||||
echo "$remaining" | while IFS= read -r secret; do
|
||||
echo " Deleting $secret"
|
||||
kubectl delete -n "$ns" "$secret" --ignore-not-found
|
||||
done
|
||||
fi
|
||||
|
||||
# Verify all secrets are gone
|
||||
remaining=$(kubectl get secrets -n "$ns" -o name | { grep "^secret/sh\.helm\.release\.v1\.${release}\." || true; })
|
||||
if [ -n "$remaining" ]; then
|
||||
echo " ERROR: Failed to delete helm release secrets:"
|
||||
echo "$remaining"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Find all tenant namespaces with monitoring HelmRelease
|
||||
echo "Finding tenant namespaces with monitoring HelmRelease..."
|
||||
NAMESPACES=$(kubectl get hr --all-namespaces -l cozystack.io/ui=true --field-selector=metadata.name=monitoring \
|
||||
-o jsonpath='{range .items[*]}{.metadata.namespace}{"\n"}{end}' | sort -u)
|
||||
NAMESPACES=$(kubectl get hr --all-namespaces -l apps.cozystack.io/application.kind=Monitoring \
|
||||
-o jsonpath='{range .items[*]}{.metadata.namespace}{"\n"}{end}' 2>/dev/null | sort -u || true)
|
||||
|
||||
if [ -z "$NAMESPACES" ]; then
|
||||
echo "No monitoring HelmReleases found in tenant namespaces, skipping migration"
|
||||
@@ -96,7 +66,7 @@ for ns in $NAMESPACES; do
|
||||
# Step 1: Suspend the HelmRelease
|
||||
echo ""
|
||||
echo "Step 1: Suspending HelmRelease monitoring..."
|
||||
kubectl patch hr -n "$ns" monitoring --type=merge -p '{"spec":{"suspend":true}}'
|
||||
kubectl patch hr -n "$ns" monitoring --type=merge -p '{"spec":{"suspend":true}}' 2>/dev/null || true
|
||||
|
||||
# Wait a moment for reconciliation to stop
|
||||
sleep 2
|
||||
@@ -104,7 +74,7 @@ for ns in $NAMESPACES; do
|
||||
# Step 2: Delete helm secrets for the monitoring release
|
||||
echo ""
|
||||
echo "Step 2: Deleting helm secrets for monitoring release..."
|
||||
delete_helm_secrets "$ns" "monitoring"
|
||||
kubectl delete secrets -n "$ns" -l name=monitoring,owner=helm --ignore-not-found
|
||||
|
||||
# Step 3: Relabel resources to be owned by monitoring-system
|
||||
echo ""
|
||||
@@ -151,9 +121,7 @@ for ns in $NAMESPACES; do
|
||||
echo "Processing Cozystack resources..."
|
||||
relabel_resources "$ns" "workloadmonitors.cozystack.io"
|
||||
|
||||
# Step 4: Delete the suspended HelmRelease
|
||||
# Helm secrets are already gone, so flux finalizer will find no release to uninstall
|
||||
# and will simply remove the finalizer without deleting any resources.
|
||||
# Step 4: Delete the suspended HelmRelease (Flux won't delete resources when HR is suspended)
|
||||
echo ""
|
||||
echo "Step 4: Deleting suspended HelmRelease monitoring..."
|
||||
kubectl delete hr -n "$ns" monitoring --ignore-not-found
|
||||
|
||||
@@ -5,24 +5,10 @@ set -euo pipefail
|
||||
|
||||
# Migrate Piraeus CRDs to piraeus-operator-crds Helm release
|
||||
for crd in linstorclusters.piraeus.io linstornodeconnections.piraeus.io linstorsatelliteconfigurations.piraeus.io linstorsatellites.piraeus.io; do
|
||||
if kubectl get crd "$crd" >/dev/null 2>&1; then
|
||||
echo " Relabeling CRD $crd"
|
||||
kubectl annotate crd "$crd" meta.helm.sh/release-namespace=cozy-linstor meta.helm.sh/release-name=piraeus-operator-crds --overwrite
|
||||
kubectl label crd "$crd" app.kubernetes.io/managed-by=Helm helm.toolkit.fluxcd.io/namespace=cozy-linstor helm.toolkit.fluxcd.io/name=piraeus-operator-crds --overwrite
|
||||
else
|
||||
echo " CRD $crd not found, skipping"
|
||||
fi
|
||||
kubectl annotate crd "$crd" meta.helm.sh/release-namespace=cozy-linstor meta.helm.sh/release-name=piraeus-operator-crds --overwrite
|
||||
kubectl label crd "$crd" app.kubernetes.io/managed-by=Helm helm.toolkit.fluxcd.io/namespace=cozy-linstor helm.toolkit.fluxcd.io/name=piraeus-operator-crds --overwrite
|
||||
done
|
||||
|
||||
# Delete old piraeus-operator helm secrets (by label and by name pattern)
|
||||
kubectl delete secret -n cozy-linstor -l name=piraeus-operator,owner=helm --ignore-not-found
|
||||
remaining=$(kubectl get secrets -n cozy-linstor -o name 2>/dev/null | { grep "^secret/sh\.helm\.release\.v1\.piraeus-operator\." || true; })
|
||||
if [ -n "$remaining" ]; then
|
||||
echo " Deleting remaining piraeus-operator helm secrets by name..."
|
||||
echo "$remaining" | while IFS= read -r secret; do
|
||||
kubectl delete -n cozy-linstor "$secret" --ignore-not-found
|
||||
done
|
||||
fi
|
||||
|
||||
# Stamp version
|
||||
kubectl create configmap -n cozy-system cozystack-version \
|
||||
|
||||
@@ -348,7 +348,7 @@ PVCEOF
|
||||
# --- 3g: Clone Secrets ---
|
||||
echo " --- Clone Secrets ---"
|
||||
for secret in $(kubectl -n "$NAMESPACE" get secret -o name 2>/dev/null \
|
||||
| { grep "secret/${OLD_NAME}" || true; } | { grep -v "sh.helm.release" || true; }); do
|
||||
| grep "secret/${OLD_NAME}" | grep -v "sh.helm.release"); do
|
||||
old_secret_name="${secret#secret/}"
|
||||
new_secret_name="${NEW_NAME}${old_secret_name#${OLD_NAME}}"
|
||||
clone_resource "$NAMESPACE" "secret" "$old_secret_name" "$new_secret_name" "$OLD_NAME" "$NEW_NAME"
|
||||
@@ -357,7 +357,7 @@ PVCEOF
|
||||
# --- 3h: Clone ConfigMaps ---
|
||||
echo " --- Clone ConfigMaps ---"
|
||||
for cm in $(kubectl -n "$NAMESPACE" get configmap -o name 2>/dev/null \
|
||||
| { grep "configmap/${OLD_NAME}" || true; }); do
|
||||
| grep "configmap/${OLD_NAME}"); do
|
||||
old_cm_name="${cm#configmap/}"
|
||||
new_cm_name="${NEW_NAME}${old_cm_name#${OLD_NAME}}"
|
||||
clone_resource "$NAMESPACE" "configmap" "$old_cm_name" "$new_cm_name" "$OLD_NAME" "$NEW_NAME"
|
||||
@@ -468,13 +468,13 @@ PVCEOF
|
||||
fi
|
||||
|
||||
for secret in $(kubectl -n "$NAMESPACE" get secret -o name 2>/dev/null \
|
||||
| { grep "secret/${OLD_NAME}" || true; } | { grep -v "sh.helm.release" || true; }); do
|
||||
| grep "secret/${OLD_NAME}" | grep -v "sh.helm.release"); do
|
||||
old_secret_name="${secret#secret/}"
|
||||
delete_resource "$NAMESPACE" "secret" "$old_secret_name"
|
||||
done
|
||||
|
||||
for cm in $(kubectl -n "$NAMESPACE" get configmap -o name 2>/dev/null \
|
||||
| { grep "configmap/${OLD_NAME}" || true; }); do
|
||||
| grep "configmap/${OLD_NAME}"); do
|
||||
old_cm_name="${cm#configmap/}"
|
||||
delete_resource "$NAMESPACE" "configmap" "$old_cm_name"
|
||||
done
|
||||
@@ -611,19 +611,6 @@ done
|
||||
echo ""
|
||||
echo "=== Migration complete (${#INSTANCES[@]} instance(s)) ==="
|
||||
|
||||
# ============================================================
|
||||
# STEP 8: Clean up orphaned mysql-rd system HelmRelease
|
||||
# ============================================================
|
||||
echo ""
|
||||
echo "--- Step 8: Clean up orphaned mysql-rd HelmRelease ---"
|
||||
if kubectl -n cozy-system get hr mysql-rd --no-headers 2>/dev/null | grep -q .; then
|
||||
echo " [DELETE] hr/mysql-rd"
|
||||
kubectl -n cozy-system delete hr mysql-rd --wait=false
|
||||
else
|
||||
echo " [SKIP] hr/mysql-rd already gone"
|
||||
fi
|
||||
kubectl -n cozy-system delete secret -l "owner=helm,name=mysql-rd" --ignore-not-found
|
||||
|
||||
# Stamp version
|
||||
kubectl create configmap -n cozy-system cozystack-version \
|
||||
--from-literal=version=29 --dry-run=client -o yaml | kubectl apply -f-
|
||||
|
||||
@@ -9,6 +9,8 @@ set -euo pipefail
|
||||
OLD_PREFIX="virtual-machine"
|
||||
NEW_DISK_PREFIX="vm-disk"
|
||||
NEW_INSTANCE_PREFIX="vm-instance"
|
||||
PROTECTION_WEBHOOK_NAME="protection-webhook"
|
||||
PROTECTION_WEBHOOK_NS="protection-webhook"
|
||||
CDI_APISERVER_NS="cozy-kubevirt-cdi"
|
||||
CDI_APISERVER_DEPLOY="cdi-apiserver"
|
||||
CDI_VALIDATING_WEBHOOKS="cdi-api-datavolume-validate cdi-api-dataimportcron-validate cdi-api-populator-validate cdi-api-validate"
|
||||
@@ -86,6 +88,7 @@ echo " Total: ${#INSTANCES[@]} instance(s)"
|
||||
# STEP 2: Migrate each instance
|
||||
# ============================================================
|
||||
ALL_PV_NAMES=()
|
||||
ALL_PROTECTED_RESOURCES=()
|
||||
|
||||
for entry in "${INSTANCES[@]}"; do
|
||||
NAMESPACE="${entry%%/*}"
|
||||
@@ -312,7 +315,7 @@ PVCEOF
|
||||
# --- 2i: Clone Secrets ---
|
||||
echo " --- Clone Secrets ---"
|
||||
kubectl -n "$NAMESPACE" get secret -o name 2>/dev/null \
|
||||
| { grep "secret/${OLD_NAME}" || true; } | { grep -v "sh.helm.release" || true; } | { grep -v "values" || true; } \
|
||||
| grep "secret/${OLD_NAME}" | grep -v "sh.helm.release" | grep -v "values" \
|
||||
| while IFS= read -r secret; do
|
||||
old_secret_name="${secret#secret/}"
|
||||
suffix="${old_secret_name#${OLD_NAME}}"
|
||||
@@ -539,7 +542,7 @@ SVCEOF
|
||||
# --- 2q: Delete old resources ---
|
||||
echo " --- Delete old resources ---"
|
||||
kubectl -n "$NAMESPACE" get secret -o name 2>/dev/null \
|
||||
| { grep "secret/${OLD_NAME}" || true; } | { grep -v "sh.helm.release" || true; } | { grep -v "values" || true; } \
|
||||
| grep "secret/${OLD_NAME}" | grep -v "sh.helm.release" | grep -v "values" \
|
||||
| while IFS= read -r secret; do
|
||||
old_secret_name="${secret#secret/}"
|
||||
delete_resource "$NAMESPACE" "secret" "$old_secret_name"
|
||||
@@ -561,17 +564,71 @@ SVCEOF
|
||||
delete_resource "$NAMESPACE" "secret" "$VALUES_SECRET"
|
||||
fi
|
||||
|
||||
# Delete old service (if exists)
|
||||
# Collect protected resources for batch deletion
|
||||
if resource_exists "$NAMESPACE" "svc" "$OLD_NAME"; then
|
||||
delete_resource "$NAMESPACE" "svc" "$OLD_NAME"
|
||||
ALL_PROTECTED_RESOURCES+=("${NAMESPACE}:svc/${OLD_NAME}")
|
||||
fi
|
||||
done
|
||||
|
||||
# ============================================================
|
||||
# STEP 3: Restore PV reclaim policies
|
||||
# STEP 3: Delete protected resources (Services)
|
||||
# ============================================================
|
||||
echo ""
|
||||
echo "--- Step 3: Restore PV reclaim policies ---"
|
||||
echo "--- Step 3: Delete protected resources ---"
|
||||
|
||||
if [ ${#ALL_PROTECTED_RESOURCES[@]} -gt 0 ]; then
|
||||
WEBHOOK_EXISTS=false
|
||||
if kubectl -n "$PROTECTION_WEBHOOK_NS" get deploy "$PROTECTION_WEBHOOK_NAME" --no-headers 2>/dev/null | grep -q .; then
|
||||
WEBHOOK_EXISTS=true
|
||||
fi
|
||||
|
||||
if [ "$WEBHOOK_EXISTS" = "true" ]; then
|
||||
echo " --- Temporarily disabling protection-webhook ---"
|
||||
|
||||
WEBHOOK_REPLICAS=$(kubectl -n "$PROTECTION_WEBHOOK_NS" get deploy "$PROTECTION_WEBHOOK_NAME" \
|
||||
-o jsonpath='{.spec.replicas}' 2>/dev/null || echo "1")
|
||||
|
||||
echo " [SCALE] ${PROTECTION_WEBHOOK_NAME} -> 0 (was ${WEBHOOK_REPLICAS})"
|
||||
kubectl -n "$PROTECTION_WEBHOOK_NS" scale deploy "$PROTECTION_WEBHOOK_NAME" --replicas=0
|
||||
|
||||
echo " [PATCH] Set failurePolicy=Ignore on ValidatingWebhookConfiguration/${PROTECTION_WEBHOOK_NAME}"
|
||||
kubectl get validatingwebhookconfiguration "$PROTECTION_WEBHOOK_NAME" -o json | \
|
||||
jq '.webhooks[].failurePolicy = "Ignore"' | \
|
||||
kubectl apply -f - 2>/dev/null || true
|
||||
|
||||
echo " Waiting for webhook pods to terminate..."
|
||||
kubectl -n "$PROTECTION_WEBHOOK_NS" wait --for=delete pod \
|
||||
-l app.kubernetes.io/name=protection-webhook --timeout=60s 2>/dev/null || true
|
||||
sleep 3
|
||||
fi
|
||||
|
||||
for entry in "${ALL_PROTECTED_RESOURCES[@]}"; do
|
||||
ns="${entry%%:*}"
|
||||
res="${entry#*:}"
|
||||
echo " [DELETE] ${ns}/${res}"
|
||||
kubectl -n "$ns" delete "$res" --wait=false 2>/dev/null || true
|
||||
done
|
||||
|
||||
if [ "$WEBHOOK_EXISTS" = "true" ]; then
|
||||
echo " [PATCH] Set failurePolicy=Fail on ValidatingWebhookConfiguration/${PROTECTION_WEBHOOK_NAME}"
|
||||
kubectl get validatingwebhookconfiguration "$PROTECTION_WEBHOOK_NAME" -o json | \
|
||||
jq '.webhooks[].failurePolicy = "Fail"' | \
|
||||
kubectl apply -f - 2>/dev/null || true
|
||||
|
||||
echo " [SCALE] ${PROTECTION_WEBHOOK_NAME} -> ${WEBHOOK_REPLICAS}"
|
||||
kubectl -n "$PROTECTION_WEBHOOK_NS" scale deploy "$PROTECTION_WEBHOOK_NAME" \
|
||||
--replicas="$WEBHOOK_REPLICAS"
|
||||
echo " --- protection-webhook restored ---"
|
||||
fi
|
||||
else
|
||||
echo " [SKIP] No protected resources to delete"
|
||||
fi
|
||||
|
||||
# ============================================================
|
||||
# STEP 4: Restore PV reclaim policies
|
||||
# ============================================================
|
||||
echo ""
|
||||
echo "--- Step 4: Restore PV reclaim policies ---"
|
||||
for pv_name in "${ALL_PV_NAMES[@]}"; do
|
||||
if [ -n "$pv_name" ]; then
|
||||
current_policy=$(kubectl get pv "$pv_name" \
|
||||
@@ -586,7 +643,7 @@ for pv_name in "${ALL_PV_NAMES[@]}"; do
|
||||
done
|
||||
|
||||
# ============================================================
|
||||
# STEP 4: Temporarily disable CDI datavolume webhooks
|
||||
# STEP 5: Temporarily disable CDI datavolume webhooks
|
||||
# ============================================================
|
||||
# CDI's datavolume-validate webhook rejects DataVolume creation when a PVC
|
||||
# with the same name already exists. We must disable it so that vm-disk
|
||||
@@ -595,7 +652,7 @@ done
|
||||
# cdi-apiserver (which serves the webhooks), then delete webhook configs.
|
||||
# Both are restored after vm-disk HRs reconcile.
|
||||
echo ""
|
||||
echo "--- Step 4: Temporarily disable CDI webhooks ---"
|
||||
echo "--- Step 5: Temporarily disable CDI webhooks ---"
|
||||
|
||||
CDI_OPERATOR_REPLICAS=$(kubectl -n "$CDI_APISERVER_NS" get deploy cdi-operator \
|
||||
-o jsonpath='{.spec.replicas}' 2>/dev/null || echo "1")
|
||||
@@ -628,10 +685,10 @@ done
|
||||
sleep 2
|
||||
|
||||
# ============================================================
|
||||
# STEP 5: Unsuspend vm-disk HelmReleases first
|
||||
# STEP 6: Unsuspend vm-disk HelmReleases first
|
||||
# ============================================================
|
||||
echo ""
|
||||
echo "--- Step 5: Unsuspend vm-disk HelmReleases ---"
|
||||
echo "--- Step 6: Unsuspend vm-disk HelmReleases ---"
|
||||
for entry in "${INSTANCES[@]}"; do
|
||||
ns="${entry%%/*}"
|
||||
instance="${entry#*/}"
|
||||
@@ -648,7 +705,7 @@ for entry in "${INSTANCES[@]}"; do
|
||||
# Force immediate reconciliation
|
||||
echo " [TRIGGER] Reconcile ${ns}/hr/${disk_name}"
|
||||
kubectl -n "$ns" annotate hr "$disk_name" --overwrite \
|
||||
"reconcile.fluxcd.io/requestedAt=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" 2>/dev/null || true
|
||||
"reconcile.fluxcd.io/requestedAt=$(date +%s)" 2>/dev/null || true
|
||||
fi
|
||||
done
|
||||
|
||||
@@ -672,12 +729,12 @@ for entry in "${INSTANCES[@]}"; do
|
||||
done
|
||||
|
||||
# ============================================================
|
||||
# STEP 6: Restore CDI webhooks
|
||||
# STEP 7: Restore CDI webhooks
|
||||
# ============================================================
|
||||
# Scale cdi-operator and cdi-apiserver back up.
|
||||
# cdi-apiserver will recreate webhook configurations automatically on start.
|
||||
echo ""
|
||||
echo "--- Step 6: Restore CDI webhooks ---"
|
||||
echo "--- Step 7: Restore CDI webhooks ---"
|
||||
|
||||
echo " [SCALE] cdi-operator -> ${CDI_OPERATOR_REPLICAS}"
|
||||
kubectl -n "$CDI_APISERVER_NS" scale deploy cdi-operator \
|
||||
@@ -692,10 +749,10 @@ kubectl -n "$CDI_APISERVER_NS" rollout status deploy "$CDI_APISERVER_DEPLOY" --t
|
||||
echo " --- CDI webhooks restored ---"
|
||||
|
||||
# ============================================================
|
||||
# STEP 7: Unsuspend vm-instance HelmReleases
|
||||
# STEP 8: Unsuspend vm-instance HelmReleases
|
||||
# ============================================================
|
||||
echo ""
|
||||
echo "--- Step 7: Unsuspend vm-instance HelmReleases ---"
|
||||
echo "--- Step 8: Unsuspend vm-instance HelmReleases ---"
|
||||
for entry in "${INSTANCES[@]}"; do
|
||||
ns="${entry%%/*}"
|
||||
instance="${entry#*/}"
|
||||
@@ -715,19 +772,6 @@ done
|
||||
echo ""
|
||||
echo "=== Migration complete (${#INSTANCES[@]} instance(s)) ==="
|
||||
|
||||
# ============================================================
|
||||
# STEP 8: Clean up orphaned virtual-machine-rd system HelmRelease
|
||||
# ============================================================
|
||||
echo ""
|
||||
echo "--- Step 8: Clean up orphaned virtual-machine-rd HelmRelease ---"
|
||||
if kubectl -n cozy-system get hr virtual-machine-rd --no-headers 2>/dev/null | grep -q .; then
|
||||
echo " [DELETE] hr/virtual-machine-rd"
|
||||
kubectl -n cozy-system delete hr virtual-machine-rd --wait=false
|
||||
else
|
||||
echo " [SKIP] hr/virtual-machine-rd already gone"
|
||||
fi
|
||||
kubectl -n cozy-system delete secret -l "owner=helm,name=virtual-machine-rd" --ignore-not-found
|
||||
|
||||
# Stamp version
|
||||
kubectl create configmap -n cozy-system cozystack-version \
|
||||
--from-literal=version=30 --dry-run=client -o yaml | kubectl apply -f-
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
#!/bin/sh
|
||||
# Migration 33 --> 34
|
||||
# Clean up orphaned system -rd HelmReleases left after application renames.
|
||||
#
|
||||
# These HelmReleases reference ExternalArtifacts that no longer exist:
|
||||
# ferretdb-rd -> replaced by mongodb-rd
|
||||
# mysql-rd -> replaced by mariadb-rd (migration 28 handled user HRs only)
|
||||
# virtual-machine-rd -> replaced by vm-disk-rd + vm-instance-rd (migration 29 handled user HRs only)
|
||||
#
|
||||
# Idempotent: safe to re-run.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
echo "=== Cleaning up orphaned -rd HelmReleases ==="
|
||||
|
||||
for hr_name in ferretdb-rd mysql-rd virtual-machine-rd; do
|
||||
if kubectl -n cozy-system get hr "$hr_name" --no-headers 2>/dev/null | grep -q .; then
|
||||
echo " [DELETE] hr/${hr_name}"
|
||||
kubectl -n cozy-system delete hr "$hr_name" --wait=false
|
||||
else
|
||||
echo " [SKIP] hr/${hr_name} already gone"
|
||||
fi
|
||||
kubectl -n cozy-system delete secret -l "owner=helm,name=${hr_name}" --ignore-not-found
|
||||
done
|
||||
|
||||
echo "=== Cleanup complete ==="
|
||||
|
||||
# Stamp version
|
||||
kubectl create configmap -n cozy-system cozystack-version \
|
||||
--from-literal=version=34 --dry-run=client -o yaml | kubectl apply -f-
|
||||
@@ -1,37 +0,0 @@
|
||||
#!/bin/sh
|
||||
# Migration 34 --> 35
|
||||
# Backfill spec.version on rabbitmq.apps.cozystack.io resources.
|
||||
#
|
||||
# Before this migration RabbitMQ had no user-selectable version; the
|
||||
# operator always used its built-in default image (v3.x). A version field
|
||||
# was added in this release. Without this migration every existing cluster
|
||||
# would be upgraded to the new default (v4.2) on the next reconcile.
|
||||
#
|
||||
# Set spec.version to "v3.13" for any rabbitmq app resource that does not
|
||||
# already have it set.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
DEFAULT_VERSION="v3.13"
|
||||
RABBITMQS=$(kubectl get rabbitmqs.apps.cozystack.io -A -o jsonpath='{range .items[*]}{.metadata.namespace}/{.metadata.name}{"\n"}{end}')
|
||||
for resource in $RABBITMQS; do
|
||||
NS="${resource%%/*}"
|
||||
APP_NAME="${resource##*/}"
|
||||
|
||||
# Skip if spec.version is already set
|
||||
CURRENT_VER=$(kubectl get rabbitmqs.apps.cozystack.io -n "$NS" "$APP_NAME" \
|
||||
-o jsonpath='{.spec.version}')
|
||||
if [ -n "$CURRENT_VER" ]; then
|
||||
echo "SKIP $NS/$APP_NAME: spec.version already set to '$CURRENT_VER'"
|
||||
continue
|
||||
fi
|
||||
|
||||
echo "Patching rabbitmq/$APP_NAME in $NS: setting version=$DEFAULT_VERSION"
|
||||
|
||||
kubectl patch rabbitmqs.apps.cozystack.io -n "$NS" "$APP_NAME" --type=merge \
|
||||
--patch "{\"spec\":{\"version\":\"${DEFAULT_VERSION}\"}}"
|
||||
done
|
||||
|
||||
# Stamp version
|
||||
kubectl create configmap -n cozy-system cozystack-version \
|
||||
--from-literal=version=35 --dry-run=client -o yaml | kubectl apply -f-
|
||||
@@ -24,7 +24,7 @@ if [ "$CURRENT_VERSION" -ge "$TARGET_VERSION" ]; then
|
||||
fi
|
||||
|
||||
# Run migrations sequentially from current version to target version
|
||||
for i in $(seq $CURRENT_VERSION $((TARGET_VERSION - 1))); do
|
||||
for i in $(seq $((CURRENT_VERSION + 1)) $TARGET_VERSION); do
|
||||
if [ -f "/migrations/$i" ]; then
|
||||
echo "Running migration $i"
|
||||
chmod +x /migrations/$i
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
---
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: PackageSource
|
||||
metadata:
|
||||
name: cozystack.openbao-application
|
||||
spec:
|
||||
sourceRef:
|
||||
kind: OCIRepository
|
||||
name: cozystack-packages
|
||||
namespace: cozy-system
|
||||
path: /
|
||||
variants:
|
||||
- name: default
|
||||
dependsOn:
|
||||
- cozystack.networking
|
||||
libraries:
|
||||
- name: cozy-lib
|
||||
path: library/cozy-lib
|
||||
components:
|
||||
- name: openbao-system
|
||||
path: system/openbao
|
||||
- name: openbao
|
||||
path: apps/openbao
|
||||
libraries: ["cozy-lib"]
|
||||
- name: openbao-rd
|
||||
path: system/openbao-rd
|
||||
install:
|
||||
namespace: cozy-system
|
||||
releaseName: openbao-rd
|
||||
@@ -16,7 +16,6 @@
|
||||
{{include "cozystack.platform.package.default" (list "cozystack.mariadb-application" $) }}
|
||||
{{include "cozystack.platform.package.default" (list "cozystack.mongodb-application" $) }}
|
||||
{{include "cozystack.platform.package.default" (list "cozystack.nats-application" $) }}
|
||||
{{include "cozystack.platform.package.default" (list "cozystack.openbao-application" $) }}
|
||||
{{include "cozystack.platform.package.default" (list "cozystack.postgres-application" $) }}
|
||||
{{include "cozystack.platform.package.default" (list "cozystack.qdrant-application" $) }}
|
||||
{{include "cozystack.platform.package.default" (list "cozystack.rabbitmq-application" $) }}
|
||||
|
||||
@@ -6,8 +6,6 @@ kind: ConfigMap
|
||||
metadata:
|
||||
name: cozystack-version
|
||||
namespace: {{ .Release.Namespace }}
|
||||
annotations:
|
||||
helm.sh/resource-policy: keep
|
||||
data:
|
||||
version: {{ .Values.migrations.targetVersion | quote }}
|
||||
{{- end }}
|
||||
|
||||
@@ -5,8 +5,8 @@ sourceRef:
|
||||
path: /
|
||||
migrations:
|
||||
enabled: false
|
||||
image: ghcr.io/cozystack/cozystack/platform-migrations:v1.0.0@sha256:68dabdebc38ac439228ae07031cc70e0fa184a24bd4e5b3b22c17466b2a55201
|
||||
targetVersion: 35
|
||||
image: ghcr.io/cozystack/cozystack/platform-migrations:v1.0.0-beta.6@sha256:37c78dafcedbdad94acd9912550db0b4875897150666b8a06edfa894de99064e
|
||||
targetVersion: 33
|
||||
# Bundle deployment configuration
|
||||
bundles:
|
||||
system:
|
||||
@@ -46,7 +46,7 @@ publishing:
|
||||
apiServerEndpoint: "" # example: "https://api.example.org"
|
||||
externalIPs: []
|
||||
certificates:
|
||||
solver: http01 # "http01" or "dns01"
|
||||
solver: http01 # "http01" or "dns01"
|
||||
issuerName: letsencrypt-prod
|
||||
# Authentication configuration
|
||||
authentication:
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
e2e:
|
||||
image: ghcr.io/cozystack/cozystack/e2e-sandbox:v1.0.0@sha256:0eae9f519669667d60b160ebb93c127843c470ad9ca3447fceaa54604503a7ba
|
||||
image: ghcr.io/cozystack/cozystack/e2e-sandbox:v1.0.0-beta.6@sha256:09af5901abcbed2b612d2d93c163e8ad3948bc55a1d8beae714b4fb2b8f7d91d
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/matchbox:v1.0.0@sha256:c48eb7b23f01a8ff58d409fdb51c88e771f819cb914eee03da89471e62302f33
|
||||
ghcr.io/cozystack/cozystack/matchbox:v1.0.0-beta.6@sha256:212f624957447f5a932fd5d4564eb8c97694d336b7dc877a2833c1513c0d074d
|
||||
|
||||
@@ -104,7 +104,6 @@ spec:
|
||||
- {{ .Release.Name }}
|
||||
secretName: etcd-peer-ca-tls
|
||||
privateKey:
|
||||
rotationPolicy: Never
|
||||
algorithm: RSA
|
||||
size: 4096
|
||||
issuerRef:
|
||||
@@ -131,7 +130,6 @@ spec:
|
||||
- {{ .Release.Name }}
|
||||
secretName: etcd-ca-tls
|
||||
privateKey:
|
||||
rotationPolicy: Never
|
||||
algorithm: RSA
|
||||
size: 4096
|
||||
issuerRef:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Managed SeaweedFS Service
|
||||
# Managed NATS Service
|
||||
|
||||
## Parameters
|
||||
|
||||
@@ -13,68 +13,46 @@
|
||||
|
||||
### SeaweedFS Components Configuration
|
||||
|
||||
| Name | Description | Type | Value |
|
||||
| ------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------- | ------- |
|
||||
| `db` | Database configuration. | `object` | `{}` |
|
||||
| `db.replicas` | Number of database replicas. | `int` | `2` |
|
||||
| `db.size` | Persistent Volume size. | `quantity` | `10Gi` |
|
||||
| `db.storageClass` | StorageClass used to store the data. | `string` | `""` |
|
||||
| `db.resources` | Explicit CPU and memory configuration. When omitted, the preset defined in `resourcesPreset` is applied. | `object` | `{}` |
|
||||
| `db.resources.cpu` | Number of CPU cores allocated. | `quantity` | `""` |
|
||||
| `db.resources.memory` | Amount of memory allocated. | `quantity` | `""` |
|
||||
| `db.resourcesPreset` | Default sizing preset used when `resources` is omitted. | `string` | `small` |
|
||||
| `master` | Master service configuration. | `object` | `{}` |
|
||||
| `master.replicas` | Number of master replicas. | `int` | `3` |
|
||||
| `master.resources` | Explicit CPU and memory configuration. When omitted, the preset defined in `resourcesPreset` is applied. | `object` | `{}` |
|
||||
| `master.resources.cpu` | Number of CPU cores allocated. | `quantity` | `""` |
|
||||
| `master.resources.memory` | Amount of memory allocated. | `quantity` | `""` |
|
||||
| `master.resourcesPreset` | Default sizing preset used when `resources` is omitted. | `string` | `small` |
|
||||
| `filer` | Filer service configuration. | `object` | `{}` |
|
||||
| `filer.replicas` | Number of filer replicas. | `int` | `2` |
|
||||
| `filer.resources` | Explicit CPU and memory configuration. When omitted, the preset defined in `resourcesPreset` is applied. | `object` | `{}` |
|
||||
| `filer.resources.cpu` | Number of CPU cores allocated. | `quantity` | `""` |
|
||||
| `filer.resources.memory` | Amount of memory allocated. | `quantity` | `""` |
|
||||
| `filer.resourcesPreset` | Default sizing preset used when `resources` is omitted. | `string` | `small` |
|
||||
| `filer.grpcHost` | The hostname used to expose or access the filer service externally. | `string` | `""` |
|
||||
| `filer.grpcPort` | The port used to access the filer service externally. | `int` | `443` |
|
||||
| `filer.whitelist` | A list of IP addresses or CIDR ranges that are allowed to access the filer service. | `[]string` | `[]` |
|
||||
| `volume` | Volume service configuration. | `object` | `{}` |
|
||||
| `volume.replicas` | Number of volume replicas. | `int` | `2` |
|
||||
| `volume.size` | Persistent Volume size. | `quantity` | `10Gi` |
|
||||
| `volume.storageClass` | StorageClass used to store the data. | `string` | `""` |
|
||||
| `volume.diskType` | SeaweedFS disk type tag for the default volume servers (e.g., "hdd", "ssd"). | `string` | `""` |
|
||||
| `volume.resources` | Explicit CPU and memory configuration. When omitted, the preset defined in `resourcesPreset` is applied. | `object` | `{}` |
|
||||
| `volume.resources.cpu` | Number of CPU cores allocated. | `quantity` | `""` |
|
||||
| `volume.resources.memory` | Amount of memory allocated. | `quantity` | `""` |
|
||||
| `volume.resourcesPreset` | Default sizing preset used when `resources` is omitted. | `string` | `small` |
|
||||
| `volume.zones` | A map of zones for MultiZone topology. Each zone can have its own number of replicas and size. | `map[string]object` | `{}` |
|
||||
| `volume.zones[name].replicas` | Number of replicas in the zone. | `int` | `0` |
|
||||
| `volume.zones[name].size` | Zone storage size. | `quantity` | `""` |
|
||||
| `volume.zones[name].dataCenter` | SeaweedFS data center name for this zone. Defaults to the zone name. | `string` | `""` |
|
||||
| `volume.zones[name].nodeSelector` | YAML nodeSelector for this zone (default: topology.kubernetes.io/zone: <zoneName>). | `string` | `""` |
|
||||
| `volume.zones[name].storageClass` | StorageClass used to store zone data. Defaults to volume.storageClass. | `string` | `""` |
|
||||
| `volume.zones[name].pools` | A map of storage pools for this zone. Each pool creates a separate Volume StatefulSet per zone. | `map[string]object` | `{}` |
|
||||
| `volume.zones[name].pools[name].diskType` | SeaweedFS disk type tag (e.g., "ssd", "hdd", "nvme"). | `string` | `""` |
|
||||
| `volume.zones[name].pools[name].replicas` | Number of volume replicas. Defaults to volume.replicas (Simple) or zone.replicas/volume.replicas (MultiZone). | `int` | `0` |
|
||||
| `volume.zones[name].pools[name].size` | Persistent Volume size. Defaults to volume.size (Simple) or zone.size/volume.size (MultiZone). | `quantity` | `""` |
|
||||
| `volume.zones[name].pools[name].storageClass` | Kubernetes StorageClass for the pool. Defaults to volume.storageClass. | `string` | `""` |
|
||||
| `volume.zones[name].pools[name].resources` | Explicit CPU and memory configuration. When omitted, the preset defined in `resourcesPreset` is applied. | `object` | `{}` |
|
||||
| `volume.zones[name].pools[name].resources.cpu` | Number of CPU cores allocated. | `quantity` | `""` |
|
||||
| `volume.zones[name].pools[name].resources.memory` | Amount of memory allocated. | `quantity` | `""` |
|
||||
| `volume.zones[name].pools[name].resourcesPreset` | Default sizing preset used when `resources` is omitted. Defaults to volume.resourcesPreset. | `string` | `{}` |
|
||||
| `volume.pools` | A map of storage pools. Each pool creates a separate Volume StatefulSet with its own disk type. | `map[string]object` | `{}` |
|
||||
| `volume.pools[name].diskType` | SeaweedFS disk type tag (e.g., "ssd", "hdd", "nvme"). | `string` | `""` |
|
||||
| `volume.pools[name].replicas` | Number of volume replicas. Defaults to volume.replicas (Simple) or zone.replicas/volume.replicas (MultiZone). | `int` | `0` |
|
||||
| `volume.pools[name].size` | Persistent Volume size. Defaults to volume.size (Simple) or zone.size/volume.size (MultiZone). | `quantity` | `""` |
|
||||
| `volume.pools[name].storageClass` | Kubernetes StorageClass for the pool. Defaults to volume.storageClass. | `string` | `""` |
|
||||
| `volume.pools[name].resources` | Explicit CPU and memory configuration. When omitted, the preset defined in `resourcesPreset` is applied. | `object` | `{}` |
|
||||
| `volume.pools[name].resources.cpu` | Number of CPU cores allocated. | `quantity` | `""` |
|
||||
| `volume.pools[name].resources.memory` | Amount of memory allocated. | `quantity` | `""` |
|
||||
| `volume.pools[name].resourcesPreset` | Default sizing preset used when `resources` is omitted. Defaults to volume.resourcesPreset. | `string` | `{}` |
|
||||
| `s3` | S3 service configuration. | `object` | `{}` |
|
||||
| `s3.replicas` | Number of S3 replicas. | `int` | `2` |
|
||||
| `s3.resources` | Explicit CPU and memory configuration. When omitted, the preset defined in `resourcesPreset` is applied. | `object` | `{}` |
|
||||
| `s3.resources.cpu` | Number of CPU cores allocated. | `quantity` | `""` |
|
||||
| `s3.resources.memory` | Amount of memory allocated. | `quantity` | `""` |
|
||||
| `s3.resourcesPreset` | Default sizing preset used when `resources` is omitted. | `string` | `small` |
|
||||
| Name | Description | Type | Value |
|
||||
| ----------------------------- | -------------------------------------------------------------------------------------------------------- | ------------------- | ------- |
|
||||
| `db` | Database configuration. | `object` | `{}` |
|
||||
| `db.replicas` | Number of database replicas. | `int` | `2` |
|
||||
| `db.size` | Persistent Volume size. | `quantity` | `10Gi` |
|
||||
| `db.storageClass` | StorageClass used to store the data. | `string` | `""` |
|
||||
| `db.resources` | Explicit CPU and memory configuration. When omitted, the preset defined in `resourcesPreset` is applied. | `object` | `{}` |
|
||||
| `db.resources.cpu` | Number of CPU cores allocated. | `quantity` | `""` |
|
||||
| `db.resources.memory` | Amount of memory allocated. | `quantity` | `""` |
|
||||
| `db.resourcesPreset` | Default sizing preset used when `resources` is omitted. | `string` | `small` |
|
||||
| `master` | Master service configuration. | `object` | `{}` |
|
||||
| `master.replicas` | Number of master replicas. | `int` | `3` |
|
||||
| `master.resources` | Explicit CPU and memory configuration. When omitted, the preset defined in `resourcesPreset` is applied. | `object` | `{}` |
|
||||
| `master.resources.cpu` | Number of CPU cores allocated. | `quantity` | `""` |
|
||||
| `master.resources.memory` | Amount of memory allocated. | `quantity` | `""` |
|
||||
| `master.resourcesPreset` | Default sizing preset used when `resources` is omitted. | `string` | `small` |
|
||||
| `filer` | Filer service configuration. | `object` | `{}` |
|
||||
| `filer.replicas` | Number of filer replicas. | `int` | `2` |
|
||||
| `filer.resources` | Explicit CPU and memory configuration. When omitted, the preset defined in `resourcesPreset` is applied. | `object` | `{}` |
|
||||
| `filer.resources.cpu` | Number of CPU cores allocated. | `quantity` | `""` |
|
||||
| `filer.resources.memory` | Amount of memory allocated. | `quantity` | `""` |
|
||||
| `filer.resourcesPreset` | Default sizing preset used when `resources` is omitted. | `string` | `small` |
|
||||
| `filer.grpcHost` | The hostname used to expose or access the filer service externally. | `string` | `""` |
|
||||
| `filer.grpcPort` | The port used to access the filer service externally. | `int` | `443` |
|
||||
| `filer.whitelist` | A list of IP addresses or CIDR ranges that are allowed to access the filer service. | `[]string` | `[]` |
|
||||
| `volume` | Volume service configuration. | `object` | `{}` |
|
||||
| `volume.replicas` | Number of volume replicas. | `int` | `2` |
|
||||
| `volume.size` | Persistent Volume size. | `quantity` | `10Gi` |
|
||||
| `volume.storageClass` | StorageClass used to store the data. | `string` | `""` |
|
||||
| `volume.resources` | Explicit CPU and memory configuration. When omitted, the preset defined in `resourcesPreset` is applied. | `object` | `{}` |
|
||||
| `volume.resources.cpu` | Number of CPU cores allocated. | `quantity` | `""` |
|
||||
| `volume.resources.memory` | Amount of memory allocated. | `quantity` | `""` |
|
||||
| `volume.resourcesPreset` | Default sizing preset used when `resources` is omitted. | `string` | `small` |
|
||||
| `volume.zones` | A map of zones for MultiZone topology. Each zone can have its own number of replicas and size. | `map[string]object` | `{}` |
|
||||
| `volume.zones[name].replicas` | Number of replicas in the zone. | `int` | `0` |
|
||||
| `volume.zones[name].size` | Zone storage size. | `quantity` | `""` |
|
||||
| `s3` | S3 service configuration. | `object` | `{}` |
|
||||
| `s3.replicas` | Number of S3 replicas. | `int` | `2` |
|
||||
| `s3.resources` | Explicit CPU and memory configuration. When omitted, the preset defined in `resourcesPreset` is applied. | `object` | `{}` |
|
||||
| `s3.resources.cpu` | Number of CPU cores allocated. | `quantity` | `""` |
|
||||
| `s3.resources.memory` | Amount of memory allocated. | `quantity` | `""` |
|
||||
| `s3.resourcesPreset` | Default sizing preset used when `resources` is omitted. | `string` | `small` |
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/objectstorage-sidecar:v1.0.0@sha256:2a3595cd88b30af55b2000d3ca204899beecef0012b0e0402754c3914aad1f7f
|
||||
ghcr.io/cozystack/cozystack/objectstorage-sidecar:v1.0.0-beta.6@sha256:235b194a531b70e266a10ef78d2955d19f5b659513f23d8b3cfbbc0dff7fc1c0
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/seaweedfs/seaweedfs-cosi-driver:v0.3.0
|
||||
ghcr.io/seaweedfs/seaweedfs-cosi-driver:v0.2.0
|
||||
|
||||
@@ -25,21 +25,8 @@ rules:
|
||||
resourceNames:
|
||||
- {{ $.Release.Name }}-master
|
||||
- {{ $.Release.Name }}-filer
|
||||
- {{ $.Release.Name }}-db
|
||||
- {{ $.Release.Name }}-s3
|
||||
{{- if eq .Values.topology "Simple" }}
|
||||
- {{ $.Release.Name }}-volume
|
||||
{{- range $poolName, $pool := .Values.volume.pools }}
|
||||
- {{ $.Release.Name }}-volume-{{ $poolName }}
|
||||
{{- end }}
|
||||
{{- else if eq .Values.topology "MultiZone" }}
|
||||
{{- range $zoneName, $zone := .Values.volume.zones }}
|
||||
- {{ $.Release.Name }}-volume-{{ $zoneName }}
|
||||
{{- range $poolName, $pool := (dig "pools" dict $zone) }}
|
||||
- {{ $.Release.Name }}-volume-{{ $zoneName }}-{{ $poolName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
- {{ $.Release.Name }}-db
|
||||
verbs: ["get", "list", "watch"]
|
||||
{{- end }}
|
||||
|
||||
|
||||
@@ -16,65 +16,6 @@
|
||||
{{- fail "replicationFactor must be less than or equal to the number of zones defined in .Values.volume.zones." }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if and (eq .Values.topology "Client") (gt (len .Values.volume.pools) 0) }}
|
||||
{{- fail "volume.pools is not supported with Client topology." }}
|
||||
{{- end }}
|
||||
{{- if and (eq .Values.topology "MultiZone") (gt (len .Values.volume.pools) 0) }}
|
||||
{{- fail "volume.pools is not supported with MultiZone topology. Use volume.zones[name].pools instead." }}
|
||||
{{- end }}
|
||||
{{- if and .Values.volume.diskType (not (regexMatch "^[a-z0-9]+$" .Values.volume.diskType)) }}
|
||||
{{- fail (printf "volume.diskType must be lowercase alphanumeric (got: %s)." .Values.volume.diskType) }}
|
||||
{{- end }}
|
||||
|
||||
{{- /* Collect and validate all pools from volume.pools and zones[].pools */ -}}
|
||||
{{- $allPools := dict }}
|
||||
{{- range $poolName, $pool := .Values.volume.pools }}
|
||||
{{- if not (regexMatch "^[a-z0-9]([a-z0-9-]*[a-z0-9])?$" $poolName) }}
|
||||
{{- fail (printf "volume.pools key '%s' must be a valid DNS label (lowercase alphanumeric and hyphens, no dots)." $poolName) }}
|
||||
{{- end }}
|
||||
{{- if or (hasSuffix "-worm" $poolName) (hasSuffix "-readonly" $poolName) }}
|
||||
{{- fail (printf "volume.pools key '%s' must not end with '-worm' or '-readonly' (reserved suffixes for COSI resources)." $poolName) }}
|
||||
{{- end }}
|
||||
{{- if not $pool.diskType }}
|
||||
{{- fail (printf "volume.pools.%s.diskType is required." $poolName) }}
|
||||
{{- end }}
|
||||
{{- if not (regexMatch "^[a-z0-9]+$" $pool.diskType) }}
|
||||
{{- fail (printf "volume.pools.%s.diskType must be lowercase alphanumeric (got: %s)." $poolName $pool.diskType) }}
|
||||
{{- end }}
|
||||
{{- if and $.Values.volume.diskType (eq $pool.diskType $.Values.volume.diskType) }}
|
||||
{{- fail (printf "volume.pools.%s.diskType '%s' must differ from volume.diskType." $poolName $pool.diskType) }}
|
||||
{{- end }}
|
||||
{{- $_ := set $allPools $poolName $pool.diskType }}
|
||||
{{- end }}
|
||||
{{- if eq .Values.topology "MultiZone" }}
|
||||
{{- range $zoneName, $zone := .Values.volume.zones }}
|
||||
{{- range $poolName, $pool := (dig "pools" dict $zone) }}
|
||||
{{- if not (regexMatch "^[a-z0-9]([a-z0-9-]*[a-z0-9])?$" $poolName) }}
|
||||
{{- fail (printf "volume.zones.%s.pools key '%s' must be a valid DNS label." $zoneName $poolName) }}
|
||||
{{- end }}
|
||||
{{- if or (hasSuffix "-worm" $poolName) (hasSuffix "-readonly" $poolName) }}
|
||||
{{- fail (printf "volume.zones.%s.pools key '%s' must not end with '-worm' or '-readonly' (reserved suffixes for COSI resources)." $zoneName $poolName) }}
|
||||
{{- end }}
|
||||
{{- if not $pool.diskType }}
|
||||
{{- fail (printf "volume.zones.%s.pools.%s.diskType is required." $zoneName $poolName) }}
|
||||
{{- end }}
|
||||
{{- if not (regexMatch "^[a-z0-9]+$" $pool.diskType) }}
|
||||
{{- fail (printf "volume.zones.%s.pools.%s.diskType must be lowercase alphanumeric (got: %s)." $zoneName $poolName $pool.diskType) }}
|
||||
{{- end }}
|
||||
{{- if and $.Values.volume.diskType (eq $pool.diskType $.Values.volume.diskType) }}
|
||||
{{- fail (printf "volume.zones.%s.pools.%s.diskType '%s' must differ from volume.diskType." $zoneName $poolName $pool.diskType) }}
|
||||
{{- end }}
|
||||
{{- if and (hasKey $allPools $poolName) (ne (get $allPools $poolName) $pool.diskType) }}
|
||||
{{- fail (printf "Pool '%s' has inconsistent diskType across zones (expected '%s', got '%s' in zone '%s')." $poolName (get $allPools $poolName) $pool.diskType $zoneName) }}
|
||||
{{- end }}
|
||||
{{- $_ := set $allPools $poolName $pool.diskType }}
|
||||
{{- $composedName := printf "%s-%s" $zoneName $poolName }}
|
||||
{{- if hasKey $.Values.volume.zones $composedName }}
|
||||
{{- fail (printf "Composed volume name '%s' (from zone '%s' and pool '%s') collides with an existing zone name." $composedName $zoneName $poolName) }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- $detectedTopology := "Unknown" }}
|
||||
{{- $configMap := lookup "v1" "ConfigMap" .Release.Namespace (printf "%s-deployed-topology" .Release.Name) }}
|
||||
@@ -153,77 +94,30 @@ spec:
|
||||
storageClass: {{ . }}
|
||||
{{- end }}
|
||||
maxVolumes: 0
|
||||
{{- if .Values.volume.diskType }}
|
||||
extraArgs:
|
||||
- "-disk={{ .Values.volume.diskType }}"
|
||||
{{- end }}
|
||||
{{- if or (and (eq .Values.topology "Simple") (gt (len .Values.volume.pools) 0)) (eq .Values.topology "MultiZone") }}
|
||||
{{ if eq .Values.topology "MultiZone" }}
|
||||
volumes:
|
||||
{{- if eq .Values.topology "Simple" }}
|
||||
{{- range $poolName, $pool := .Values.volume.pools }}
|
||||
{{ $poolName }}:
|
||||
replicas: {{ ternary $pool.replicas $.Values.volume.replicas (hasKey $pool "replicas") }}
|
||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list ($pool.resourcesPreset | default $.Values.volume.resourcesPreset) (default dict $pool.resources) $) | nindent 12 }}
|
||||
dataDirs:
|
||||
- name: data1
|
||||
type: "persistentVolumeClaim"
|
||||
size: "{{ $pool.size | default $.Values.volume.size }}"
|
||||
{{- with ($pool.storageClass | default $.Values.volume.storageClass) }}
|
||||
storageClass: "{{ . }}"
|
||||
{{- end }}
|
||||
maxVolumes: 0
|
||||
extraArgs:
|
||||
- "-disk={{ $pool.diskType }}"
|
||||
{{- end }}
|
||||
{{- else if eq .Values.topology "MultiZone" }}
|
||||
{{- range $zoneName, $zone := .Values.volume.zones }}
|
||||
{{ $zoneName }}:
|
||||
replicas: {{ ternary $zone.replicas $.Values.volume.replicas (hasKey $zone "replicas") }}
|
||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list $.Values.volume.resourcesPreset $.Values.volume.resources $) | nindent 12 }}
|
||||
dataDirs:
|
||||
- name: data1
|
||||
type: "persistentVolumeClaim"
|
||||
size: "{{ $zone.size | default $.Values.volume.size }}"
|
||||
{{- with ($zone.storageClass | default $.Values.volume.storageClass) }}
|
||||
storageClass: "{{ . }}"
|
||||
{{- end }}
|
||||
maxVolumes: 0
|
||||
nodeSelector: |
|
||||
{{- with $zone.nodeSelector }}
|
||||
{{ . | indent 12 }}
|
||||
{{- else }}
|
||||
topology.kubernetes.io/zone: {{ $zoneName }}
|
||||
{{- end }}
|
||||
dataCenter: {{ $zone.dataCenter | default $zoneName }}
|
||||
{{- if $.Values.volume.diskType }}
|
||||
extraArgs:
|
||||
- "-disk={{ $.Values.volume.diskType }}"
|
||||
{{ with $zone.replicas }}
|
||||
replicas: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- range $zoneName, $zone := .Values.volume.zones }}
|
||||
{{- range $poolName, $pool := (dig "pools" dict $zone) }}
|
||||
{{ $zoneName }}-{{ $poolName }}:
|
||||
replicas: {{ ternary $pool.replicas (ternary $zone.replicas $.Values.volume.replicas (hasKey $zone "replicas")) (hasKey $pool "replicas") }}
|
||||
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list ($pool.resourcesPreset | default $.Values.volume.resourcesPreset) (default dict $pool.resources) $) | nindent 12 }}
|
||||
dataDirs:
|
||||
- name: data1
|
||||
type: "persistentVolumeClaim"
|
||||
size: "{{ $pool.size | default $zone.size | default $.Values.volume.size }}"
|
||||
{{- with ($pool.storageClass | default $zone.storageClass | default $.Values.volume.storageClass) }}
|
||||
storageClass: "{{ . }}"
|
||||
{{- if $zone.size }}
|
||||
size: "{{ $zone.size }}"
|
||||
{{- else }}
|
||||
size: "{{ $.Values.volume.size }}"
|
||||
{{- end }}
|
||||
{{- if $zone.storageClass }}
|
||||
storageClass: {{ $zone.storageClass }}
|
||||
{{- else if $.Values.volume.storageClass }}
|
||||
storageClass: {{ $.Values.volume.storageClass }}
|
||||
{{- end }}
|
||||
maxVolumes: 0
|
||||
nodeSelector: |
|
||||
{{- with $zone.nodeSelector }}
|
||||
{{ . | indent 12 }}
|
||||
{{- else }}
|
||||
topology.kubernetes.io/zone: {{ $zoneName }}
|
||||
{{- end }}
|
||||
dataCenter: {{ $zone.dataCenter | default $zoneName }}
|
||||
extraArgs:
|
||||
- "-disk={{ $pool.diskType }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
filer:
|
||||
@@ -305,22 +199,6 @@ spec:
|
||||
app.kubernetes.io/component: volume
|
||||
app.kubernetes.io/name: seaweedfs
|
||||
version: {{ $.Chart.Version }}
|
||||
{{- range $poolName, $pool := .Values.volume.pools }}
|
||||
---
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}-volume-{{ $poolName }}
|
||||
spec:
|
||||
replicas: {{ ternary $pool.replicas $.Values.volume.replicas (hasKey $pool "replicas") }}
|
||||
minReplicas: 1
|
||||
kind: seaweedfs
|
||||
type: volume
|
||||
selector:
|
||||
app.kubernetes.io/component: volume-{{ $poolName }}
|
||||
app.kubernetes.io/name: seaweedfs
|
||||
version: {{ $.Chart.Version }}
|
||||
{{- end }}
|
||||
{{- else if eq .Values.topology "MultiZone" }}
|
||||
{{- range $zoneName, $zoneSpec := .Values.volume.zones }}
|
||||
---
|
||||
@@ -329,7 +207,7 @@ kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}-volume-{{ $zoneName }}
|
||||
spec:
|
||||
replicas: {{ ternary $zoneSpec.replicas $.Values.volume.replicas (hasKey $zoneSpec "replicas") }}
|
||||
replicas: {{ default $.Values.volume.replicas $zoneSpec.replicas }}
|
||||
minReplicas: 1
|
||||
kind: seaweedfs
|
||||
type: volume
|
||||
@@ -337,22 +215,6 @@ spec:
|
||||
app.kubernetes.io/component: volume-{{ $zoneName }}
|
||||
app.kubernetes.io/name: seaweedfs
|
||||
version: {{ $.Chart.Version }}
|
||||
{{- range $poolName, $pool := (dig "pools" dict $zoneSpec) }}
|
||||
---
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}-volume-{{ $zoneName }}-{{ $poolName }}
|
||||
spec:
|
||||
replicas: {{ ternary $pool.replicas (ternary $zoneSpec.replicas $.Values.volume.replicas (hasKey $zoneSpec "replicas")) (hasKey $pool "replicas") }}
|
||||
minReplicas: 1
|
||||
kind: seaweedfs
|
||||
type: volume
|
||||
selector:
|
||||
app.kubernetes.io/component: volume-{{ $zoneName }}-{{ $poolName }}
|
||||
app.kubernetes.io/name: seaweedfs
|
||||
version: {{ $.Chart.Version }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
---
|
||||
|
||||
@@ -1,55 +0,0 @@
|
||||
{{- if ne .Values.topology "Client" }}
|
||||
{{- /* Collect unique pools from volume.pools and zones[].pools */ -}}
|
||||
{{- $uniquePools := dict }}
|
||||
{{- range $poolName, $pool := .Values.volume.pools }}
|
||||
{{- $_ := set $uniquePools $poolName $pool.diskType }}
|
||||
{{- end }}
|
||||
{{- if eq .Values.topology "MultiZone" }}
|
||||
{{- range $zoneName, $zone := .Values.volume.zones }}
|
||||
{{- range $poolName, $pool := (dig "pools" dict $zone) }}
|
||||
{{- $_ := set $uniquePools $poolName $pool.diskType }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- range $poolName, $diskType := $uniquePools }}
|
||||
---
|
||||
kind: BucketClass
|
||||
apiVersion: objectstorage.k8s.io/v1alpha1
|
||||
metadata:
|
||||
name: {{ $.Release.Namespace }}-{{ $poolName }}
|
||||
driverName: {{ $.Release.Namespace }}.seaweedfs.objectstorage.k8s.io
|
||||
deletionPolicy: Delete
|
||||
parameters:
|
||||
disk: {{ $diskType }}
|
||||
---
|
||||
kind: BucketClass
|
||||
apiVersion: objectstorage.k8s.io/v1alpha1
|
||||
metadata:
|
||||
name: {{ $.Release.Namespace }}-{{ $poolName }}-worm
|
||||
driverName: {{ $.Release.Namespace }}.seaweedfs.objectstorage.k8s.io
|
||||
deletionPolicy: Retain
|
||||
parameters:
|
||||
disk: {{ $diskType }}
|
||||
objectLockEnabled: "true"
|
||||
objectLockRetentionMode: COMPLIANCE
|
||||
objectLockRetentionDays: "36500"
|
||||
---
|
||||
kind: BucketAccessClass
|
||||
apiVersion: objectstorage.k8s.io/v1alpha1
|
||||
metadata:
|
||||
name: {{ $.Release.Namespace }}-{{ $poolName }}
|
||||
driverName: {{ $.Release.Namespace }}.seaweedfs.objectstorage.k8s.io
|
||||
authenticationType: KEY
|
||||
parameters:
|
||||
accessPolicy: readwrite
|
||||
---
|
||||
kind: BucketAccessClass
|
||||
apiVersion: objectstorage.k8s.io/v1alpha1
|
||||
metadata:
|
||||
name: {{ $.Release.Namespace }}-{{ $poolName }}-readonly
|
||||
driverName: {{ $.Release.Namespace }}.seaweedfs.objectstorage.k8s.io
|
||||
authenticationType: KEY
|
||||
parameters:
|
||||
accessPolicy: readonly
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -300,94 +300,6 @@
|
||||
"type": "object",
|
||||
"default": {},
|
||||
"properties": {
|
||||
"diskType": {
|
||||
"description": "SeaweedFS disk type tag for the default volume servers (e.g., \"hdd\", \"ssd\").",
|
||||
"type": "string",
|
||||
"default": ""
|
||||
},
|
||||
"pools": {
|
||||
"description": "A map of storage pools. Each pool creates a separate Volume StatefulSet with its own disk type.",
|
||||
"type": "object",
|
||||
"default": {},
|
||||
"additionalProperties": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"diskType"
|
||||
],
|
||||
"properties": {
|
||||
"diskType": {
|
||||
"description": "SeaweedFS disk type tag (e.g., \"ssd\", \"hdd\", \"nvme\").",
|
||||
"type": "string"
|
||||
},
|
||||
"replicas": {
|
||||
"description": "Number of volume replicas. Defaults to volume.replicas (Simple) or zone.replicas/volume.replicas (MultiZone).",
|
||||
"type": "integer"
|
||||
},
|
||||
"resources": {
|
||||
"description": "Explicit CPU and memory configuration. When omitted, the preset defined in `resourcesPreset` is applied.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"cpu": {
|
||||
"description": "Number of CPU cores allocated.",
|
||||
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"x-kubernetes-int-or-string": true
|
||||
},
|
||||
"memory": {
|
||||
"description": "Amount of memory allocated.",
|
||||
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"x-kubernetes-int-or-string": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"description": "Default sizing preset used when `resources` is omitted. Defaults to volume.resourcesPreset.",
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"nano",
|
||||
"micro",
|
||||
"small",
|
||||
"medium",
|
||||
"large",
|
||||
"xlarge",
|
||||
"2xlarge"
|
||||
]
|
||||
},
|
||||
"size": {
|
||||
"description": "Persistent Volume size. Defaults to volume.size (Simple) or zone.size/volume.size (MultiZone).",
|
||||
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"x-kubernetes-int-or-string": true
|
||||
},
|
||||
"storageClass": {
|
||||
"description": "Kubernetes StorageClass for the pool. Defaults to volume.storageClass.",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"replicas": {
|
||||
"description": "Number of volume replicas.",
|
||||
"type": "integer",
|
||||
@@ -466,96 +378,6 @@
|
||||
"additionalProperties": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"dataCenter": {
|
||||
"description": "SeaweedFS data center name for this zone. Defaults to the zone name.",
|
||||
"type": "string"
|
||||
},
|
||||
"nodeSelector": {
|
||||
"description": "YAML nodeSelector for this zone (default: topology.kubernetes.io/zone: \u003czoneName\u003e).",
|
||||
"type": "string"
|
||||
},
|
||||
"pools": {
|
||||
"description": "A map of storage pools for this zone. Each pool creates a separate Volume StatefulSet per zone.",
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"diskType"
|
||||
],
|
||||
"properties": {
|
||||
"diskType": {
|
||||
"description": "SeaweedFS disk type tag (e.g., \"ssd\", \"hdd\", \"nvme\").",
|
||||
"type": "string"
|
||||
},
|
||||
"replicas": {
|
||||
"description": "Number of volume replicas. Defaults to volume.replicas (Simple) or zone.replicas/volume.replicas (MultiZone).",
|
||||
"type": "integer"
|
||||
},
|
||||
"resources": {
|
||||
"description": "Explicit CPU and memory configuration. When omitted, the preset defined in `resourcesPreset` is applied.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"cpu": {
|
||||
"description": "Number of CPU cores allocated.",
|
||||
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"x-kubernetes-int-or-string": true
|
||||
},
|
||||
"memory": {
|
||||
"description": "Amount of memory allocated.",
|
||||
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"x-kubernetes-int-or-string": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"description": "Default sizing preset used when `resources` is omitted. Defaults to volume.resourcesPreset.",
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"nano",
|
||||
"micro",
|
||||
"small",
|
||||
"medium",
|
||||
"large",
|
||||
"xlarge",
|
||||
"2xlarge"
|
||||
]
|
||||
},
|
||||
"size": {
|
||||
"description": "Persistent Volume size. Defaults to volume.size (Simple) or zone.size/volume.size (MultiZone).",
|
||||
"pattern": "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$",
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"x-kubernetes-int-or-string": true
|
||||
},
|
||||
"storageClass": {
|
||||
"description": "Kubernetes StorageClass for the pool. Defaults to volume.storageClass.",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"replicas": {
|
||||
"description": "Number of replicas in the zone.",
|
||||
"type": "integer"
|
||||
@@ -572,10 +394,6 @@
|
||||
}
|
||||
],
|
||||
"x-kubernetes-int-or-string": true
|
||||
},
|
||||
"storageClass": {
|
||||
"description": "StorageClass used to store zone data. Defaults to volume.storageClass.",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -76,49 +76,26 @@ filer:
|
||||
grpcPort: 443
|
||||
whitelist: []
|
||||
|
||||
## @typedef {struct} StoragePool - Storage pool configuration for separating buckets by disk type.
|
||||
## @field {string} diskType - SeaweedFS disk type tag (e.g., "ssd", "hdd", "nvme").
|
||||
## @field {int} [replicas] - Number of volume replicas. Defaults to volume.replicas (Simple) or zone.replicas/volume.replicas (MultiZone).
|
||||
## @field {quantity} [size] - Persistent Volume size. Defaults to volume.size (Simple) or zone.size/volume.size (MultiZone).
|
||||
## @field {string} [storageClass] - Kubernetes StorageClass for the pool. Defaults to volume.storageClass.
|
||||
## @field {Resources} [resources] - Explicit CPU and memory configuration. When omitted, the preset defined in `resourcesPreset` is applied.
|
||||
## @field {ResourcesPreset} [resourcesPreset] - Default sizing preset used when `resources` is omitted. Defaults to volume.resourcesPreset.
|
||||
|
||||
## @typedef {struct} Zone - Zone configuration.
|
||||
## @field {int} [replicas] - Number of replicas in the zone.
|
||||
## @field {quantity} [size] - Zone storage size.
|
||||
## @field {string} [dataCenter] - SeaweedFS data center name for this zone. Defaults to the zone name.
|
||||
## @field {string} [nodeSelector] - YAML nodeSelector for this zone (default: topology.kubernetes.io/zone: <zoneName>).
|
||||
## @field {string} [storageClass] - StorageClass used to store zone data. Defaults to volume.storageClass.
|
||||
## @field {map[string]StoragePool} [pools] - A map of storage pools for this zone. Each pool creates a separate Volume StatefulSet per zone.
|
||||
## NOTE: Zone-level resources/resourcesPreset are inherited from volume.* settings. Pools within a zone can define their own resources.
|
||||
|
||||
## @typedef {struct} Volume - Volume service configuration.
|
||||
## @field {int} [replicas] - Number of volume replicas.
|
||||
## @field {quantity} [size] - Persistent Volume size.
|
||||
## @field {string} [storageClass] - StorageClass used to store the data.
|
||||
## @field {string} [diskType] - SeaweedFS disk type tag for the default volume servers (e.g., "hdd", "ssd").
|
||||
## @field {Resources} [resources] - Explicit CPU and memory configuration. When omitted, the preset defined in `resourcesPreset` is applied.
|
||||
## @field {ResourcesPreset} [resourcesPreset] - Default sizing preset used when `resources` is omitted.
|
||||
## @field {map[string]Zone} [zones] - A map of zones for MultiZone topology. Each zone can have its own number of replicas and size.
|
||||
## @field {map[string]StoragePool} [pools] - A map of storage pools. Each pool creates a separate Volume StatefulSet with its own disk type.
|
||||
|
||||
## @param {Volume} [volume] - Volume service configuration.
|
||||
volume:
|
||||
replicas: 2
|
||||
size: 10Gi
|
||||
storageClass: ""
|
||||
diskType: ""
|
||||
resources: {}
|
||||
resourcesPreset: "small"
|
||||
zones: {}
|
||||
pools: {}
|
||||
#pools:
|
||||
# fast:
|
||||
# diskType: ssd
|
||||
# replicas: 2
|
||||
# size: 50Gi
|
||||
# storageClass: "local-nvme"
|
||||
|
||||
## @typedef {struct} S3 - S3 service configuration.
|
||||
## @field {int} [replicas] - Number of S3 replicas.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
backupController:
|
||||
image: "ghcr.io/cozystack/cozystack/backup-controller:v1.0.0@sha256:e1a6c8ac7ba64442812464b59c53e782e373a339c18b379c2692921b44c6edb5"
|
||||
image: "ghcr.io/cozystack/cozystack/backup-controller:v1.0.0-beta.6@sha256:365214a74ffc34a9314a62a7d4b491590051fc5486f6bae9913c0c1289983d43"
|
||||
replicas: 2
|
||||
debug: false
|
||||
metrics:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
backupStrategyController:
|
||||
image: "ghcr.io/cozystack/cozystack/backupstrategy-controller:v1.0.0@sha256:29735d945c69c6bbaab21068bf4ea30f6b63f4c71a7a8d95590f370abcb4b328"
|
||||
image: "ghcr.io/cozystack/cozystack/backupstrategy-controller:v1.0.0-beta.6@sha256:aa04ee61dce11950162606fc8db2d5cbc6f5b32ba700f790b3f1eee10d65efb1"
|
||||
replicas: 2
|
||||
debug: false
|
||||
metrics:
|
||||
|
||||
@@ -33,7 +33,6 @@ spec:
|
||||
- resourceNames:
|
||||
- bucket-{{ .name }}
|
||||
- bucket-{{ .name }}-credentials
|
||||
- bucket-{{ .name }}-readonly
|
||||
ingresses:
|
||||
exclude: []
|
||||
include:
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/s3manager:v0.5.0@sha256:279008f87460d709e99ed25ee8a1e4568a290bb9afa0e3dd3a06d524163a132b
|
||||
ghcr.io/cozystack/cozystack/s3manager:v0.5.0@sha256:291427de7db54a1d19dc9c2c807bdcc664a14caa9538786f31317e8c01a4a008
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
export NAME=cert-manager-crds
|
||||
export NAMESPACE=cozy-cert-manager
|
||||
|
||||
include ../../../hack/package.mk
|
||||
|
||||
update:
|
||||
rm -rf charts
|
||||
helm repo add jetstack https://charts.jetstack.io
|
||||
helm repo update jetstack
|
||||
helm pull jetstack/cert-manager --untar --untardir charts
|
||||
rm -f -- `find charts/cert-manager/templates -maxdepth 1 -mindepth 1 | grep -v 'crds.yaml\|_helpers.tpl'`
|
||||
|
||||
@@ -0,0 +1,26 @@
|
||||
annotations:
|
||||
artifacthub.io/category: security
|
||||
artifacthub.io/license: Apache-2.0
|
||||
artifacthub.io/prerelease: "false"
|
||||
artifacthub.io/signKey: |
|
||||
fingerprint: 1020CF3C033D4F35BAE1C19E1226061C665DF13E
|
||||
url: https://cert-manager.io/public-keys/cert-manager-keyring-2021-09-20-1020CF3C033D4F35BAE1C19E1226061C665DF13E.gpg
|
||||
apiVersion: v2
|
||||
appVersion: v1.16.3
|
||||
description: A Helm chart for cert-manager
|
||||
home: https://cert-manager.io
|
||||
icon: https://raw.githubusercontent.com/cert-manager/community/4d35a69437d21b76322157e6284be4cd64e6d2b7/logo/logo-small.png
|
||||
keywords:
|
||||
- cert-manager
|
||||
- kube-lego
|
||||
- letsencrypt
|
||||
- tls
|
||||
kubeVersion: '>= 1.22.0-0'
|
||||
maintainers:
|
||||
- email: cert-manager-maintainers@googlegroups.com
|
||||
name: cert-manager-maintainers
|
||||
url: https://cert-manager.io
|
||||
name: cert-manager
|
||||
sources:
|
||||
- https://github.com/cert-manager/cert-manager
|
||||
version: v1.16.3
|
||||
1994
packages/system/cert-manager-crds/charts/cert-manager/README.md
Normal file
1994
packages/system/cert-manager-crds/charts/cert-manager/README.md
Normal file
File diff suppressed because it is too large
Load Diff
@@ -187,17 +187,6 @@ See https://github.com/cert-manager/cert-manager/issues/6329 for a list of linke
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Labels for the CRD resources.
|
||||
*/}}
|
||||
{{- define "cert-manager.crd-labels" -}}
|
||||
app: "{{ template "cert-manager.name" . }}"
|
||||
app.kubernetes.io/name: "{{ template "cert-manager.name" . }}"
|
||||
app.kubernetes.io/instance: "{{ .Release.Name }}"
|
||||
app.kubernetes.io/component: "crds"
|
||||
{{ include "labels" . }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Check that the user has not set both .installCRDs and .crds.enabled or
|
||||
set .installCRDs and disabled .crds.keep.
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user