Compare commits

...

36 Commits

Author SHA1 Message Date
Andrei Kvapil
4568432d74 Release v0.38.8 (#1839)
This PR prepares the release `v0.38.8`.
2026-01-09 08:23:15 +01:00
cozystack-bot
820024ff97 Prepare release v0.38.8
Signed-off-by: cozystack-bot <217169706+cozystack-bot@users.noreply.github.com>
2026-01-09 01:33:55 +00:00
Andrei Kvapil
9bef5a8840 [multus] Remove memory limit (#1834)
Removes multus memory limit due to short but unpredictable and large
memory consumption in some cases, such as starting up after a node
reboot (reported up to 3Gi).
The root cause will be fixed in future releases.

```release-note
Multus memory limit removed.
```

<!-- This is an auto-generated comment: release notes by coderabbit.ai
-->

* **Chores**
* Updated Multus system namespace configuration and DaemonSet naming for
improved environment organization.
* Adjusted container resource allocation: increased memory requests and
refined memory limits for optimized performance.
* Updated deployment template specifications to reflect infrastructure
changes.

<sub>✏️ Tip: You can customize this high-level summary in your review
settings.</sub>

<!-- end of auto-generated comment: release notes by coderabbit.ai -->
2026-01-08 23:23:15 +01:00
Andrei Kvapil
63ff4dbc5c Release v0.38.7 (#1820)
This PR prepares the release `v0.38.7`.
2026-01-07 21:17:26 +01:00
cozystack-bot
6cbe6d9fd1 Prepare release v0.38.7
Signed-off-by: cozystack-bot <217169706+cozystack-bot@users.noreply.github.com>
2026-01-07 14:45:37 +00:00
Andrei Kvapil
5495fde0e3 [Backport release-0.38] [kubevirt-operator] Revert incorrect case change in VM alerts (#1805)
# Description
Backport of #1804 to `release-0.38`.
2026-01-05 16:29:22 +01:00
Aleksei Sviridkin
c7cb1ba97c [kubevirt-operator] Revert incorrect case change in VM alerts
Revert PR #1770 which incorrectly changed status check from lowercase
to uppercase. The actual metrics use lowercase:
- kubevirt_vm_info uses status="running" (not "Running")
- kubevirt_vmi_info uses phase="running" (not "Running")

Verified by querying virt-controller metrics in instories cluster.

Co-Authored-By: Claude <noreply@anthropic.com>
Signed-off-by: Aleksei Sviridkin <f@lex.la>
(cherry picked from commit 36836fd84e)
2026-01-05 15:19:57 +00:00
Andrei Kvapil
4dfb3086a1 Release v0.38.6 (#1799)
This PR prepares the release `v0.38.6`.
2026-01-04 09:25:03 +01:00
cozystack-bot
3dbd069e18 Prepare release v0.38.6
Signed-off-by: cozystack-bot <217169706+cozystack-bot@users.noreply.github.com>
2026-01-04 01:35:38 +00:00
Andrei Kvapil
4e3c5f4759 [Backport release-0.38] [kubernetes] Add lb tests for tenant k8s (#1792)
# Description
Backport of #1783 to `release-0.38`.
2026-01-03 08:31:17 +01:00
Andrei Kvapil
a435db3bb2 fix(e2e): correct Service selector to match Deployment labels
The Service selector was using app: "${test_name}-backend" but the
Deployment pod template has app: backend. Fixed selector to match
the actual pod labels so endpoints are created correctly.

Co-Authored-By: Claude <noreply@anthropic.com>
Signed-off-by: Andrei Kvapil <kvapss@gmail.com>
(cherry picked from commit 3a5977ff60)
2026-01-03 07:30:45 +00:00
Andrei Kvapil
e6d8dc2446 fix(e2e): run LB check curl from testing environment
Run curl directly from the testing container instead of creating
a separate pod with kubectl run. This avoids PodSecurity policy
violations and simplifies the test execution.

Co-Authored-By: Claude <noreply@anthropic.com>
Signed-off-by: Andrei Kvapil <kvapss@gmail.com>
(cherry picked from commit dd0bbd375f)
2026-01-03 07:30:45 +00:00
IvanHunters
2857a6e725 add lb tests for tenant k8s
Signed-off-by: IvanHunters <xorokhotnikov@gmail.com>
(cherry picked from commit 5638a7eae9)
2026-01-03 07:30:44 +00:00
Andrei Kvapil
e04e21ebab Release v0.38.5 (#1790)
This PR prepares the release `v0.38.5`.
2026-01-03 08:21:33 +01:00
cozystack-bot
106f4b8edc Prepare release v0.38.5
Signed-off-by: cozystack-bot <217169706+cozystack-bot@users.noreply.github.com>
2026-01-03 01:38:44 +00:00
Nikita
26da347de8 [Backport release-0.38] [cilium] Enable automatic pod rollout on configmap updates (#1745)
# Description
Backport of #1728 to `release-0.38`.
2025-12-23 15:41:56 +03:00
Nikita
dd2798dbda [Backport release-0.38] [virtual-machine,vm-instance] Add nodeAffinity for Windows VMs based on scheduling config (#1744)
# Description
Backport of #1693 to `release-0.38`.
2025-12-23 15:41:33 +03:00
Andrei Kvapil
fb5b4da2b2 [cilium] Enable automatic pod rollout on configmap updates
Signed-off-by: Andrei Kvapil <kvapss@gmail.com>
(cherry picked from commit 97e8d2aa49)
2025-12-23 12:41:25 +00:00
Andrei Kvapil
540e1c6e0d [virtual-machine,vm-instance] Add nodeAffinity for Windows VMs based on scheduling config
Signed-off-by: Andrei Kvapil <kvapss@gmail.com>
(cherry picked from commit 3975da93c6)
2025-12-23 12:39:33 +00:00
Nikita
1dd8e00e17 [Backport release-0.38] Update SeaweedFS v4.02 (#1732)
# Description
Backport of #1725 to `release-0.38`.
2025-12-18 13:40:31 +03:00
Andrei Kvapil
79165ca2f9 Update SeaweedFS v4.02
Signed-off-by: Andrei Kvapil <kvapss@gmail.com>
(cherry picked from commit 1805be3c48)
2025-12-18 10:39:52 +00:00
Andrei Kvapil
a8bd179f0d [Backport release-0.38] [apps] Refactor apiserver to use typed objects and fix UnstructuredList GVK (#1709)
# Description
Backport of #1679 to `release-0.38`.
2025-12-10 19:28:16 +01:00
Andrei Kvapil
1491535b35 [apps] Refactor apiserver to use typed objects and fix UnstructuredList GVK
This commit refactors the apiserver REST handlers to use typed objects
(appsv1alpha1.Application) instead of unstructured.Unstructured, eliminating
the need for runtime conversions and simplifying the codebase.

Additionally, it fixes an issue where UnstructuredList objects were using
the first registered kind from typeToGVK instead of the kind from the
object's field when multiple kinds are registered with the same Go type.

This is a more comprehensive fix for the problem addressed in
https://github.com/cozystack/cozystack/pull/1630, which was reverted in
https://github.com/cozystack/cozystack/pull/1677.

The fix includes the upstream fix from kubernetes/kubernetes#135537,
which enables short-circuit path for UnstructuredList similar to regular
Unstructured objects, using GVK from the object field instead of
typeToGVK.

Changes:
- Refactored rest.go handlers to use typed Application objects
- Removed unstructured.Unstructured conversions
- Fixed UnstructuredList GVK handling
- Updated dependencies in go.mod/go.sum
- Added e2e test for OpenAPI validation
- Updated Dockerfile

Signed-off-by: Andrei Kvapil <kvapss@gmail.com>
(cherry picked from commit ca29fc855a)
2025-12-10 12:22:57 +00:00
Andrei Kvapil
290c6be04b Release v0.38.4 (#1704)
This PR prepares the release `v0.38.4`.
2025-12-09 19:54:27 +01:00
cozystack-bot
03328dc4e4 Prepare release v0.38.4
Signed-off-by: cozystack-bot <217169706+cozystack-bot@users.noreply.github.com>
2025-12-09 16:23:14 +00:00
Andrei Kvapil
9311a9e547 [Backport release-0.38] [virtual-machine] Improve check for resizing job (#1701)
# Description
Backport of #1688 to `release-0.38`.
2025-12-09 17:10:14 +01:00
Andrei Kvapil
548b2c0ed3 [virtual-machine] Improve check for resizing job
Signed-off-by: Andrei Kvapil <kvapss@gmail.com>
(cherry picked from commit 0bab895026)
2025-12-09 16:08:54 +00:00
Andrei Kvapil
202ff3433e [Backport release-0.38] [dashboard] Fix CustomFormsOverride schema to nest properties under spec.properties (#1700)
# Description
Backport of #1692 to `release-0.38`.
2025-12-09 17:05:22 +01:00
Andrei Kvapil
891195018f [dashboard] Fix CustomFormsOverride schema to nest properties under spec.properties
Signed-off-by: Andrei Kvapil <kvapss@gmail.com>
(cherry picked from commit 578a810413)
2025-12-09 16:04:57 +00:00
Andrei Kvapil
d53861837f [Backport release-0.38] [linstor] Update piraeus-operator v2.10.2 (#1697)
# Description
Backport of #1689 to `release-0.38`.
2025-12-09 14:58:13 +01:00
Andrei Kvapil
f1a75ab864 [linstor] Update piraeus-operator v2.10.2
Signed-off-by: Andrei Kvapil <kvapss@gmail.com>
(cherry picked from commit 58dd1f5881)
2025-12-09 13:43:46 +00:00
Nikita
2110534e63 Release v0.38.3 (#1686)
This PR prepares the release `v0.38.3`.
2025-12-04 20:07:33 +03:00
cozystack-bot
2e22a6579e Prepare release v0.38.3
Signed-off-by: cozystack-bot <217169706+cozystack-bot@users.noreply.github.com>
2025-12-04 16:19:17 +00:00
Nikita
35907dd474 [core:installer] Address buildx warnings (#1682)
## What this PR does
Buildx is worried about Dockerfile syntax, this pr fixes it.
```
 - FromAsCasing: 'as' and 'FROM' keywords' casing do not match (line 1)
 - FromAsCasing: 'as' and 'FROM' keywords' casing do not match (line 16)
```

### Release note
```release-note
Buildx warnings addressed.
```
2025-12-03 19:42:24 +03:00
Nikita
af56c105c2 [system:coredns] update coredns app labels to match Talos coredns labels (#1675)
## What this PR does
Updates coredns app labels to match Talos coredns labels

### Release note

```release-note
Coredns app labels updated to match Talos coredns labels.
```
2025-12-03 19:42:14 +03:00
Nikita
30e5b71e3f [system:monitoring-agents] rename coredns metrics service (#1676)
## What this PR does
Renames coredns metrics service

### Release note
```release-note
Renamed coredns metrics service not to interfere with coredns service used for name resolution in tenant k8s clusters.
```
2025-12-03 19:41:59 +03:00
64 changed files with 770 additions and 489 deletions

11
go.mod
View File

@@ -6,11 +6,15 @@ go 1.23.0
require (
github.com/fluxcd/helm-controller/api v1.1.0
github.com/go-logr/logr v1.4.2
github.com/go-logr/zapr v1.3.0
github.com/google/gofuzz v1.2.0
github.com/onsi/ginkgo/v2 v2.19.0
github.com/onsi/gomega v1.33.1
github.com/prometheus/client_golang v1.19.1
github.com/spf13/cobra v1.8.1
github.com/stretchr/testify v1.9.0
go.uber.org/zap v1.27.0
gopkg.in/yaml.v2 v2.4.0
k8s.io/api v0.31.2
k8s.io/apiextensions-apiserver v0.31.2
@@ -44,9 +48,7 @@ require (
github.com/fluxcd/pkg/apis/meta v1.6.1 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-logr/zapr v1.3.0 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
@@ -74,7 +76,6 @@ require (
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_golang v1.19.1 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.55.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
@@ -94,7 +95,6 @@ require (
go.opentelemetry.io/otel/trace v1.28.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/crypto v0.31.0 // indirect
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
golang.org/x/net v0.33.0 // indirect
@@ -119,3 +119,6 @@ require (
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
)
// See: issues.k8s.io/135537
replace k8s.io/apimachinery => github.com/cozystack/apimachinery v0.0.0-20251201201312-18e522a87614

4
go.sum
View File

@@ -18,6 +18,8 @@ github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cozystack/apimachinery v0.0.0-20251201201312-18e522a87614 h1:jH9elECUvhiIs3IMv3oS5k1JgCLVsSK6oU4dmq5gyW8=
github.com/cozystack/apimachinery v0.0.0-20251201201312-18e522a87614/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -291,8 +293,6 @@ k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0=
k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk=
k8s.io/apiextensions-apiserver v0.31.2 h1:W8EwUb8+WXBLu56ser5IudT2cOho0gAKeTOnywBLxd0=
k8s.io/apiextensions-apiserver v0.31.2/go.mod h1:i+Geh+nGCJEGiCGR3MlBDkS7koHIIKWVfWeRFiOsUcM=
k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw=
k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
k8s.io/apiserver v0.31.2 h1:VUzOEUGRCDi6kX1OyQ801m4A7AUPglpsmGvdsekmcI4=
k8s.io/apiserver v0.31.2/go.mod h1:o3nKZR7lPlJqkU5I3Ove+Zx3JuoFjQobGX1Gctw6XuE=
k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc=

View File

@@ -72,7 +72,7 @@ EOF
kubectl wait --for=condition=TenantControlPlaneCreated kamajicontrolplane -n tenant-test kubernetes-${test_name} --timeout=4m
# Wait for Kubernetes resources to be ready (timeout after 2 minutes)
kubectl wait tcp -n tenant-test kubernetes-${test_name} --timeout=2m --for=jsonpath='{.status.kubernetesResources.version.status}'=Ready
kubectl wait tcp -n tenant-test kubernetes-${test_name} --timeout=5m --for=jsonpath='{.status.kubernetesResources.version.status}'=Ready
# Wait for all required deployments to be available (timeout after 4 minutes)
kubectl wait deploy --timeout=4m --for=condition=available -n tenant-test kubernetes-${test_name} kubernetes-${test_name}-cluster-autoscaler kubernetes-${test_name}-kccm kubernetes-${test_name}-kcsi-controller
@@ -87,7 +87,7 @@ EOF
# Set up port forwarding to the Kubernetes API server for a 200 second timeout
bash -c 'timeout 300s kubectl port-forward service/kubernetes-'"${test_name}"' -n tenant-test '"${port}"':6443 > /dev/null 2>&1 &'
bash -c 'timeout 500s kubectl port-forward service/kubernetes-'"${test_name}"' -n tenant-test '"${port}"':6443 > /dev/null 2>&1 &'
# Verify the Kubernetes version matches what we expect (retry for up to 20 seconds)
timeout 20 sh -ec 'until kubectl --kubeconfig tenantkubeconfig-'"${test_name}"' version 2>/dev/null | grep -Fq "Server Version: ${k8s_version}"; do sleep 5; done'
@@ -124,6 +124,100 @@ EOF
exit 1
fi
kubectl --kubeconfig tenantkubeconfig-${test_name} apply -f - <<EOF
apiVersion: v1
kind: Namespace
metadata:
name: tenant-test
EOF
# Backend 1
kubectl apply --kubeconfig tenantkubeconfig-${test_name} -f- <<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: "${test_name}-backend"
namespace: tenant-test
spec:
replicas: 1
selector:
matchLabels:
app: backend
backend: "${test_name}-backend"
template:
metadata:
labels:
app: backend
backend: "${test_name}-backend"
spec:
containers:
- name: nginx
image: nginx:alpine
ports:
- containerPort: 80
readinessProbe:
httpGet:
path: /
port: 80
initialDelaySeconds: 2
periodSeconds: 2
EOF
# LoadBalancer Service
kubectl apply --kubeconfig tenantkubeconfig-${test_name} -f- <<EOF
apiVersion: v1
kind: Service
metadata:
name: "${test_name}-backend"
namespace: tenant-test
spec:
type: LoadBalancer
selector:
app: backend
backend: "${test_name}-backend"
ports:
- port: 80
targetPort: 80
EOF
# Wait for pods readiness
kubectl wait deployment --kubeconfig tenantkubeconfig-${test_name} ${test_name}-backend -n tenant-test --for=condition=Available --timeout=90s
# Wait for LoadBalancer to be provisioned (IP or hostname)
timeout 90 sh -ec "
until kubectl get svc ${test_name}-backend --kubeconfig tenantkubeconfig-${test_name} -n tenant-test \
-o jsonpath='{.status.loadBalancer.ingress[0]}' | grep -q .; do
sleep 5
done
"
LB_ADDR=$(
kubectl get svc --kubeconfig tenantkubeconfig-${test_name} "${test_name}-backend" \
-n tenant-test \
-o jsonpath='{.status.loadBalancer.ingress[0].ip}{.status.loadBalancer.ingress[0].hostname}'
)
if [ -z "$LB_ADDR" ]; then
echo "LoadBalancer address is empty" >&2
exit 1
fi
for i in $(seq 1 20); do
echo "Attempt $i"
curl --silent --fail "http://${LB_ADDR}" && break
sleep 3
done
if [ "$i" -eq 20 ]; then
echo "LoadBalancer not reachable" >&2
exit 1
fi
# Cleanup
kubectl delete deployment --kubeconfig tenantkubeconfig-${test_name} "${test_name}-backend" -n tenant-test
kubectl delete service --kubeconfig tenantkubeconfig-${test_name} "${test_name}-backend" -n tenant-test
# Wait for all machine deployment replicas to be ready (timeout after 10 minutes)
kubectl wait machinedeployment kubernetes-${test_name}-md0 -n tenant-test --timeout=10m --for=jsonpath='{.status.v1beta2.readyReplicas}'=2

View File

@@ -21,14 +21,33 @@
}
@test "Test kinds" {
val=$(kubectl get --raw /apis/apps.cozystack.io/v1alpha1/tenants | jq -r '.kind')
if [ "$val" != "TenantList" ]; then
echo "Expected kind to be TenantList, got $val"
exit 1
fi
val=$(kubectl get --raw /apis/apps.cozystack.io/v1alpha1/tenants | jq -r '.items[0].kind')
if [ "$val" != "Tenant" ]; then
echo "Expected kind to be Tenant, got $val"
exit 1
fi
val=$(kubectl get --raw /apis/apps.cozystack.io/v1alpha1/ingresses | jq -r '.kind')
if [ "$val" != "IngressList" ]; then
echo "Expected kind to be IngressList, got $val"
exit 1
fi
val=$(kubectl get --raw /apis/apps.cozystack.io/v1alpha1/ingresses | jq -r '.items[0].kind')
if [ "$val" != "Ingress" ]; then
echo "Expected kind to be Ingress, got $val"
exit 1
fi
}
@test "Create and delete namespace" {
kubectl create ns cozy-test-create-and-delete-namespace --dry-run=client -o yaml | kubectl apply -f -
if ! kubectl delete ns cozy-test-create-and-delete-namespace; then
echo "Failed to delete namespace"
kubectl describe ns cozy-test-create-and-delete-namespace
exit 1
fi
}

View File

@@ -105,8 +105,26 @@ func buildMultilineStringSchema(openAPISchema string) (map[string]any, error) {
"properties": map[string]any{},
}
// Check if there's a spec property
specProp, ok := props["spec"].(map[string]any)
if !ok {
return map[string]any{}, nil
}
specProps, ok := specProp["properties"].(map[string]any)
if !ok {
return map[string]any{}, nil
}
// Create spec.properties structure in schema
schemaProps := schema["properties"].(map[string]any)
specSchema := map[string]any{
"properties": map[string]any{},
}
schemaProps["spec"] = specSchema
// Process spec properties recursively
processSpecProperties(props, schema["properties"].(map[string]any))
processSpecProperties(specProps, specSchema["properties"].(map[string]any))
return schema, nil
}

View File

@@ -9,41 +9,46 @@ func TestBuildMultilineStringSchema(t *testing.T) {
// Test OpenAPI schema with various field types
openAPISchema := `{
"properties": {
"simpleString": {
"type": "string",
"description": "A simple string field"
},
"stringWithEnum": {
"type": "string",
"enum": ["option1", "option2"],
"description": "String with enum should be skipped"
},
"numberField": {
"type": "number",
"description": "Number field should be skipped"
},
"nestedObject": {
"spec": {
"type": "object",
"properties": {
"nestedString": {
"simpleString": {
"type": "string",
"description": "Nested string should get multilineString"
"description": "A simple string field"
},
"nestedStringWithEnum": {
"stringWithEnum": {
"type": "string",
"enum": ["a", "b"],
"description": "Nested string with enum should be skipped"
}
}
},
"arrayOfObjects": {
"type": "array",
"items": {
"type": "object",
"properties": {
"itemString": {
"type": "string",
"description": "String in array item"
"enum": ["option1", "option2"],
"description": "String with enum should be skipped"
},
"numberField": {
"type": "number",
"description": "Number field should be skipped"
},
"nestedObject": {
"type": "object",
"properties": {
"nestedString": {
"type": "string",
"description": "Nested string should get multilineString"
},
"nestedStringWithEnum": {
"type": "string",
"enum": ["a", "b"],
"description": "Nested string with enum should be skipped"
}
}
},
"arrayOfObjects": {
"type": "array",
"items": {
"type": "object",
"properties": {
"itemString": {
"type": "string",
"description": "String in array item"
}
}
}
}
}
@@ -70,33 +75,44 @@ func TestBuildMultilineStringSchema(t *testing.T) {
t.Fatal("schema.properties is not a map")
}
// Check simpleString
simpleString, ok := props["simpleString"].(map[string]any)
// Check spec property exists
spec, ok := props["spec"].(map[string]any)
if !ok {
t.Fatal("simpleString not found in properties")
t.Fatal("spec not found in properties")
}
specProps, ok := spec["properties"].(map[string]any)
if !ok {
t.Fatal("spec.properties is not a map")
}
// Check simpleString
simpleString, ok := specProps["simpleString"].(map[string]any)
if !ok {
t.Fatal("simpleString not found in spec.properties")
}
if simpleString["type"] != "multilineString" {
t.Errorf("simpleString should have type multilineString, got %v", simpleString["type"])
}
// Check stringWithEnum should not be present (or should not have multilineString)
if stringWithEnum, ok := props["stringWithEnum"].(map[string]any); ok {
if stringWithEnum, ok := specProps["stringWithEnum"].(map[string]any); ok {
if stringWithEnum["type"] == "multilineString" {
t.Error("stringWithEnum should not have multilineString type")
}
}
// Check numberField should not be present
if numberField, ok := props["numberField"].(map[string]any); ok {
if numberField, ok := specProps["numberField"].(map[string]any); ok {
if numberField["type"] != nil {
t.Error("numberField should not have any type override")
}
}
// Check nested object
nestedObject, ok := props["nestedObject"].(map[string]any)
nestedObject, ok := specProps["nestedObject"].(map[string]any)
if !ok {
t.Fatal("nestedObject not found in properties")
t.Fatal("nestedObject not found in spec.properties")
}
nestedProps, ok := nestedObject["properties"].(map[string]any)
if !ok {
@@ -113,9 +129,9 @@ func TestBuildMultilineStringSchema(t *testing.T) {
}
// Check array of objects
arrayOfObjects, ok := props["arrayOfObjects"].(map[string]any)
arrayOfObjects, ok := specProps["arrayOfObjects"].(map[string]any)
if !ok {
t.Fatal("arrayOfObjects not found in properties")
t.Fatal("arrayOfObjects not found in spec.properties")
}
items, ok := arrayOfObjects["items"].(map[string]any)
if !ok {

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/nginx-cache:0.0.0@sha256:e0a07082bb6fc6aeaae2315f335386f1705a646c72f9e0af512aebbca5cb2b15
ghcr.io/cozystack/cozystack/nginx-cache:0.0.0@sha256:31ebc09cfa11d8b438d2bbb32fa61b133aaf4b48b1a1282c9e59b5c127af61c1

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.0.0@sha256:2d39989846c3579dd020b9f6c77e6e314cc81aa344eaac0f6d633e723c17196d
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.0.0@sha256:372ad087ae96bd0cd642e2b0855ec7ffb1369d6cf4f0b92204725557c11bc0ff

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.0.0@sha256:5335c044313b69ee13b30ca4941687e509005e55f4ae25723861edbf2fbd6dd2
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.0.0@sha256:feb5df18d485939114fa3792dd46d09be1faa3e826adf2f1d6c8f7d46455d017

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.0.0@sha256:d5c836ba33cf5dbed7e6f866784f668f80ffe69179e7c75847b680111984eefb
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.0.0@sha256:b42c6af641ee0eadb7e0a42e368021b4759f443cb7b71b7e745a64f0fc8b752e

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/ubuntu-container-disk:v1.33@sha256:a09724a7f95283f9130b3da2a89d81c4c6051c6edf0392a81b6fc90f404b76b6
ghcr.io/cozystack/cozystack/ubuntu-container-disk:v1.33@sha256:d25e567bc8b17b596e050f5ff410e36112c7966e33f4b372c752e7350bacc894

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/mariadb-backup:0.0.0@sha256:1c0beb1b23a109b0e13727b4c73d2c74830e11cede92858ab20101b66f45a858
ghcr.io/cozystack/cozystack/mariadb-backup:0.0.0@sha256:aca403030ff5d831415d72367866fdf291fab73ee2cfddbe4c93c2915a316ab1

View File

@@ -69,3 +69,36 @@ Generate a stable UUID for cloud-init re-initialization upon upgrade.
{{- end }}
{{- $uuid }}
{{- end }}
{{/*
Node Affinity for Windows VMs
*/}}
{{- define "virtual-machine.nodeAffinity" -}}
{{- $configMap := lookup "v1" "ConfigMap" "cozy-system" "cozystack-scheduling" -}}
{{- if $configMap -}}
{{- $dedicatedNodesForWindowsVMs := get $configMap.data "dedicatedNodesForWindowsVMs" -}}
{{- if eq $dedicatedNodesForWindowsVMs "true" -}}
{{- $isWindows := hasPrefix "windows" (toString .Values.instanceProfile) -}}
affinity:
nodeAffinity:
{{- if $isWindows }}
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: scheduling.cozystack.io/vm-windows
operator: In
values:
- "true"
{{- else }}
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
preference:
matchExpressions:
- key: scheduling.cozystack.io/vm-windows
operator: NotIn
values:
- "true"
{{- end }}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@@ -27,7 +27,11 @@
{{- if and $existingPVC $desiredStorage -}}
{{- $currentStorage := $existingPVC.spec.resources.requests.storage | toString -}}
{{- if not (eq $currentStorage $desiredStorage) -}}
{{- $needResizePVC = true -}}
{{- $oldSize := (include "cozy-lib.resources.toFloat" $currentStorage) | float64 -}}
{{- $newSize := (include "cozy-lib.resources.toFloat" $desiredStorage) | float64 -}}
{{- if gt $newSize $oldSize -}}
{{- $needResizePVC = true -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@@ -124,6 +124,8 @@ spec:
terminationGracePeriodSeconds: 30
{{- include "virtual-machine.nodeAffinity" . | nindent 6 }}
volumes:
- name: systemdisk
dataVolume:

View File

@@ -1,5 +1,17 @@
{{- $existingPVC := lookup "v1" "PersistentVolumeClaim" .Release.Namespace .Release.Name }}
{{- if and $existingPVC (ne ($existingPVC.spec.resources.requests.storage | toString) .Values.storage) -}}
{{- $shouldResize := false -}}
{{- if and $existingPVC .Values.storage -}}
{{- $currentStorage := $existingPVC.spec.resources.requests.storage | toString -}}
{{- if ne $currentStorage .Values.storage -}}
{{- $oldSize := (include "cozy-lib.resources.toFloat" $currentStorage) | float64 -}}
{{- $newSize := (include "cozy-lib.resources.toFloat" .Values.storage) | float64 -}}
{{- if gt $newSize $oldSize -}}
{{- $shouldResize = true -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- if $shouldResize -}}
apiVersion: batch/v1
kind: Job
metadata:
@@ -23,6 +35,7 @@ spec:
command: ["sh", "-xec"]
args:
- |
echo "Resizing PVC to {{ .Values.storage }}..."
kubectl patch pvc {{ .Release.Name }} -p '{"spec":{"resources":{"requests":{"storage":"{{ .Values.storage }}"}}}}'
---
apiVersion: v1

View File

@@ -69,3 +69,36 @@ Generate a stable UUID for cloud-init re-initialization upon upgrade.
{{- end }}
{{- $uuid }}
{{- end }}
{{/*
Node Affinity for Windows VMs
*/}}
{{- define "virtual-machine.nodeAffinity" -}}
{{- $configMap := lookup "v1" "ConfigMap" "cozy-system" "cozystack-scheduling" -}}
{{- if $configMap -}}
{{- $dedicatedNodesForWindowsVMs := get $configMap.data "dedicatedNodesForWindowsVMs" -}}
{{- if eq $dedicatedNodesForWindowsVMs "true" -}}
{{- $isWindows := hasPrefix "windows" (toString .Values.instanceProfile) -}}
affinity:
nodeAffinity:
{{- if $isWindows }}
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: scheduling.cozystack.io/vm-windows
operator: In
values:
- "true"
{{- else }}
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
preference:
matchExpressions:
- key: scheduling.cozystack.io/vm-windows
operator: NotIn
values:
- "true"
{{- end }}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@@ -95,6 +95,9 @@ spec:
noCloud: {}
{{- end }}
terminationGracePeriodSeconds: 30
{{- include "virtual-machine.nodeAffinity" . | nindent 6 }}
volumes:
{{- range .Values.disks }}
- name: disk-{{ .name }}

View File

@@ -1,4 +1,4 @@
FROM golang:1.24-alpine as k8s-await-election-builder
FROM golang:1.24-alpine AS k8s-await-election-builder
ARG K8S_AWAIT_ELECTION_GITREPO=https://github.com/LINBIT/k8s-await-election
ARG K8S_AWAIT_ELECTION_VERSION=0.4.1
@@ -13,7 +13,7 @@ RUN git clone ${K8S_AWAIT_ELECTION_GITREPO} /usr/local/go/k8s-await-election/ \
&& make \
&& mv ./out/k8s-await-election-${TARGETARCH} /k8s-await-election
FROM golang:1.24-alpine as builder
FROM golang:1.24-alpine AS builder
ARG TARGETOS
ARG TARGETARCH

View File

@@ -1,2 +1,2 @@
cozystack:
image: ghcr.io/cozystack/cozystack/installer:v0.38.2@sha256:9ff92b655de6f9bea3cba4cd42dcffabd9aace6966dcfb1cc02dda2420ea4a15
image: ghcr.io/cozystack/cozystack/installer:v0.38.8@sha256:2a82678c2b020047ca3f4baaaa79e1284810fbe5d5ffdc024b242bc9f33df168

View File

@@ -1,2 +1,2 @@
e2e:
image: ghcr.io/cozystack/cozystack/e2e-sandbox:v0.38.2@sha256:84be9e42bc2c04b0765c8b89e0a9728c49ebf4676a92522b007af96ae9aec68d
image: ghcr.io/cozystack/cozystack/e2e-sandbox:v0.38.8@sha256:bc12856b5c2dff2855b26b1a2909d05d07a4c4e2af5f0c36468de09585888573

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/matchbox:v0.38.2@sha256:9cd7f46fcae119a3f8e35b428b018d0cb6da7b0cdd2ce764cc9fbf6dcd903f27
ghcr.io/cozystack/cozystack/matchbox:v0.38.8@sha256:dbce65fee982b7d2b6b7baa9ac862e178f9e153f8918fc0c5c57be03ccfd6730

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/grafana:0.0.0@sha256:c63978e1ed0304e8518b31ddee56c4e8115541b997d8efbe1c0a74da57140399
ghcr.io/cozystack/cozystack/grafana:0.0.0@sha256:8ce0cd90c8f614cdabf5a41f8aa50b7dfbd02b31b9a0bd7897927e7f89968e07

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/objectstorage-sidecar:v0.38.2@sha256:ff3281fe53a97d2cd5cd94bd4c4d8ff08189508729869bb39b3f60c80da5f919
ghcr.io/cozystack/cozystack/objectstorage-sidecar:v0.38.8@sha256:2d1833c78c35b697a3634d4b3be9a3218edae95a77583e9e121c10a92e7433ec

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/s3manager:v0.5.0@sha256:3825c9b4b6238f88f1b0de73bd18866a7e5f83f178d28fe2830f3bf24efb187d
ghcr.io/cozystack/cozystack/s3manager:v0.5.0@sha256:ecb140d026ed72660306953a7eec140d7ac81e79544d5bbf1aba5f62aa5f8b69

View File

@@ -18,3 +18,6 @@ cilium:
digest: "sha256:81262986a41487bfa3d0465091d3a386def5bd1ab476350bd4af2fdee5846fe6"
envoy:
enabled: false
rollOutCiliumPods: true
operator:
rollOutPods: true

View File

@@ -3,3 +3,6 @@ coredns:
repository: registry.k8s.io/coredns/coredns
tag: v1.12.4
replicaCount: 2
k8sAppLabelOverride: kube-dns
service:
name: kube-dns

View File

@@ -1,5 +1,5 @@
cozystackAPI:
image: ghcr.io/cozystack/cozystack/cozystack-api:v0.38.2@sha256:d17f1c59658731e5a2063c3db348adbc03b5cd31720052016b68449164cf2f14
image: ghcr.io/cozystack/cozystack/cozystack-api:v0.38.8@sha256:51574c6bb61ae31e63193f84daf18c14ceb71580786e262191c4aa0ac44b1519
localK8sAPIEndpoint:
enabled: true
replicas: 2

View File

@@ -1,6 +1,6 @@
cozystackController:
image: ghcr.io/cozystack/cozystack/cozystack-controller:v0.38.2@sha256:468b2eccbc0aa00bd3d72d56624a46e6ba178fa279cdd19248af74d32ea7d319
image: ghcr.io/cozystack/cozystack/cozystack-controller:v0.38.8@sha256:6be8fa0a56c0dca71086deddef1b16d23c613b912c7ca379096ca3ad0e50dffb
debug: false
disableTelemetry: false
cozystackVersion: "v0.38.2"
cozystackVersion: "v0.38.8"
cozystackAPIKind: "DaemonSet"

View File

@@ -1,6 +1,6 @@
{{- $brandingConfig:= lookup "v1" "ConfigMap" "cozy-system" "cozystack-branding" }}
{{- $tenantText := "v0.38.2" }}
{{- $tenantText := "v0.38.8" }}
{{- $footerText := "Cozystack" }}
{{- $titleText := "Cozystack Dashboard" }}
{{- $logoText := "" }}

View File

@@ -1,6 +1,6 @@
openapiUI:
image: ghcr.io/cozystack/cozystack/openapi-ui:v0.38.2@sha256:5aafb6c864c5523418d021a9fe5b514990d36972b6f1de9c34a1cd41f9d8bf7e
image: ghcr.io/cozystack/cozystack/openapi-ui:v0.38.8@sha256:54f53571422c50f6aab613031d519a305564e4ec0b456baa23e98b7707ac001b
openapiUIK8sBff:
image: ghcr.io/cozystack/cozystack/openapi-ui-k8s-bff:v0.38.2@sha256:7ffd8ae7b9da73fec7ae61a71c9c821a718d89a1b1df0197e09fda57678e1220
image: ghcr.io/cozystack/cozystack/openapi-ui-k8s-bff:v0.38.8@sha256:1f7827a1978bd9c81ac924dd0e78f6a3ce834a9a64af55047e220812bc15a944
tokenProxy:
image: ghcr.io/cozystack/cozystack/token-proxy:v0.38.2@sha256:fad27112617bb17816702571e1f39d0ac3fe5283468d25eb12f79906cdab566b
image: ghcr.io/cozystack/cozystack/token-proxy:v0.38.8@sha256:4fc8a11f8a1a81aa0774ae2b1ed2e05d36d0b3ef1e37979cc4994e65114d93ae

View File

@@ -3,7 +3,7 @@ kamaji:
deploy: false
image:
pullPolicy: IfNotPresent
tag: v0.38.2@sha256:13741b8f6dfede3ea0fd16d8bbebae810bc19254a81d7e5a139535efa17eabff
tag: v0.38.8@sha256:4588de4380fb70c29c4a762fb19a9bbe210e68bc5ff67035c752c44daf319bfc
repository: ghcr.io/cozystack/cozystack/kamaji
resources:
limits:
@@ -13,4 +13,4 @@ kamaji:
cpu: 100m
memory: 100Mi
extraArgs:
- --migrate-image=ghcr.io/cozystack/cozystack/kamaji:v0.38.2@sha256:13741b8f6dfede3ea0fd16d8bbebae810bc19254a81d7e5a139535efa17eabff
- --migrate-image=ghcr.io/cozystack/cozystack/kamaji:v0.38.8@sha256:4588de4380fb70c29c4a762fb19a9bbe210e68bc5ff67035c752c44daf319bfc

View File

@@ -1,4 +1,4 @@
portSecurity: true
routes: ""
image: ghcr.io/cozystack/cozystack/kubeovn-plunger:v0.38.2@sha256:76c8af24cbec0261718c13c0150aa81c238a956626d4fd7baa8970b47fb3a6f0
image: ghcr.io/cozystack/cozystack/kubeovn-plunger:v0.38.8@sha256:14537e277f6de81ec4fda42b8ffc25b224834b9e3af81ef42b80c50cee6f68ef
ovnCentralName: ovn-central

View File

@@ -1,3 +1,3 @@
portSecurity: true
routes: ""
image: ghcr.io/cozystack/cozystack/kubeovn-webhook:v0.38.2@sha256:8e67b2971f8c079a8b0636be1d091a9545d6cb653d745ff222a5966f56f903bd
image: ghcr.io/cozystack/cozystack/kubeovn-webhook:v0.38.8@sha256:e6334c29d3aaf0dea766c88e3e05b53ad623d1bb497b3c836e6f76adade45b29

View File

@@ -65,4 +65,4 @@ global:
images:
kubeovn:
repository: kubeovn
tag: v1.14.11@sha256:8e6cf216687b4a80c35fa7c60bb4d511dd6aaaaf19d1ec53321dfef98d343f51
tag: v1.14.11@sha256:85612c4fb14feb930f04771b0ca377a51344fa5e3e2eb09d95c059aa19c4c8ae

View File

@@ -1,3 +1,3 @@
storageClass: replicated
csiDriver:
image: ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.0.0@sha256:d5c836ba33cf5dbed7e6f866784f668f80ffe69179e7c75847b680111984eefb
image: ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.0.0@sha256:b42c6af641ee0eadb7e0a42e368021b4759f443cb7b71b7e745a64f0fc8b752e

View File

@@ -27,7 +27,7 @@ spec:
expr: |
max_over_time(
kubevirt_vmi_info{
phase!="Running",
phase!="running",
exported_namespace=~".+",
name=~".+"
}[10m]

View File

@@ -1,5 +1,5 @@
lineageControllerWebhook:
image: ghcr.io/cozystack/cozystack/lineage-controller-webhook:v0.38.2@sha256:a5c750a0f46e8e25329b3ee2110d5dfb077c73e473195f1ed768d28d6f43902c
image: ghcr.io/cozystack/cozystack/lineage-controller-webhook:v0.38.8@sha256:d2525b2cef34a6eea59d77e2bbca52eb0cd377a526b41643e104873613e7b6a0
debug: false
localK8sAPIEndpoint:
enabled: true

View File

@@ -4,8 +4,8 @@ metallb:
controller:
image:
repository: ghcr.io/cozystack/cozystack/metallb-controller
tag: v0.15.2@sha256:0e9080234fc8eedab78ad2831fb38df375c383e901a752d72b353c8d13b9605f
tag: v0.15.2@sha256:623ce74b5802bff6e29f29478ccab29ce4162a64148be006c69e16cc3207e289
speaker:
image:
repository: ghcr.io/cozystack/cozystack/metallb-speaker
tag: v0.15.2@sha256:e14d4c328c3ab91a6eadfeea90da96388503492d165e7e8582f291b1872e53b2
tag: v0.15.2@sha256:f264058afd9228452a260ab9c9dd1859404745627a2a38c2ba4671e27f3b3bb2

View File

@@ -2,7 +2,7 @@
apiVersion: v1
kind: Service
metadata:
name: coredns
name: coredns-metrics
namespace: kube-system
labels:
app: coredns
@@ -19,7 +19,7 @@ spec:
apiVersion: operator.victoriametrics.com/v1beta1
kind: VMServiceScrape
metadata:
name: coredns
name: coredns-metrics
namespace: cozy-monitoring
spec:
selector:

View File

@@ -162,7 +162,6 @@ spec:
memory: "100Mi"
limits:
cpu: "100m"
memory: "300Mi"
securityContext:
privileged: true
terminationMessagePolicy: FallbackToLogsOnError

View File

@@ -1,3 +1,3 @@
objectstorage:
controller:
image: "ghcr.io/cozystack/cozystack/objectstorage-controller:v0.38.2@sha256:7d37495cce46d30d4613ecfacaa7b7f140e7ea8f3dbcc3e8c976e271de6cc71b"
image: "ghcr.io/cozystack/cozystack/objectstorage-controller:v0.38.8@sha256:cbf22bcbeed7049340aa41f41cc130596bdb962873116e0c4eb5bab123ae13b0"

View File

@@ -3,8 +3,8 @@ name: piraeus
description: |
The Piraeus Operator manages software defined storage clusters using LINSTOR in Kubernetes.
type: application
version: 2.10.1
appVersion: "v2.10.1"
version: 2.10.2
appVersion: "v2.10.2"
maintainers:
- name: Piraeus Datastore
url: https://piraeus.io

View File

@@ -23,10 +23,10 @@ data:
tag: v1.32.3
image: piraeus-server
linstor-csi:
tag: v1.10.2
tag: v1.10.3
image: piraeus-csi
nfs-server:
tag: v1.10.2
tag: v1.10.3
image: piraeus-csi-nfs-server
drbd-reactor:
tag: v1.10.0
@@ -44,7 +44,7 @@ data:
tag: v1.3.0
image: linstor-affinity-controller
drbd-module-loader:
tag: v9.2.15
tag: v9.2.16
# The special "match" attribute is used to select an image based on the node's reported OS.
# The operator will first check the k8s node's ".status.nodeInfo.osImage" field, and compare it against the list
# here. If one matches, that specific image name will be used instead of the fallback image.
@@ -99,7 +99,7 @@ data:
tag: v2.17.0
image: livenessprobe
csi-provisioner:
tag: v6.0.0
tag: v6.1.0
image: csi-provisioner
csi-snapshotter:
tag: v8.4.0

View File

@@ -993,6 +993,24 @@ spec:
- Retain
- Delete
type: string
evacuationStrategy:
description: EvacuationStrategy configures the evacuation of volumes
from a Satellite when DeletionPolicy "Evacuate" is used.
nullable: true
properties:
attachedVolumeReattachTimeout:
default: 5m
description: |-
AttachedVolumeReattachTimeout configures how long evacuation waits for attached volumes to reattach on
different nodes. Setting this to 0 disable this evacuation step.
type: string
unattachedVolumeAttachTimeout:
default: 5m
description: |-
UnattachedVolumeAttachTimeout configures how long evacuation waits for unattached volumes to attach on
different nodes. Setting this to 0 disable this evacuation step.
type: string
type: object
internalTLS:
description: |-
InternalTLS configures secure communication for the LINSTOR Satellite.
@@ -1683,6 +1701,23 @@ spec:
- Retain
- Delete
type: string
evacuationStrategy:
description: EvacuationStrategy configures the evacuation of volumes
from a Satellite when DeletionPolicy "Evacuate" is used.
properties:
attachedVolumeReattachTimeout:
default: 5m
description: |-
AttachedVolumeReattachTimeout configures how long evacuation waits for attached volumes to reattach on
different nodes. Setting this to 0 disable this evacuation step.
type: string
unattachedVolumeAttachTimeout:
default: 5m
description: |-
UnattachedVolumeAttachTimeout configures how long evacuation waits for unattached volumes to attach on
different nodes. Setting this to 0 disable this evacuation step.
type: string
type: object
internalTLS:
description: |-
InternalTLS configures secure communication for the LINSTOR Satellite.

View File

@@ -12,7 +12,6 @@ update:
sed -i.bak "/ARG VERSION/ s|=.*|=$${version}|g" images/seaweedfs/Dockerfile && \
rm -f images/seaweedfs/Dockerfile.bak
patch --no-backup-if-mismatch -p4 < patches/resize-api-server-annotation.diff
patch --no-backup-if-mismatch -p4 < patches/long-term-ca.diff
#patch --no-backup-if-mismatch -p4 < patches/retention-policy-delete.yaml
image:

View File

@@ -1,6 +1,6 @@
apiVersion: v1
description: SeaweedFS
name: seaweedfs
appVersion: "3.99"
appVersion: "4.02"
# Dev note: Trigger a helm chart release by `git tag -a helm-<version>`
version: 4.0.399
version: 4.0.402

View File

@@ -15,9 +15,9 @@ metadata:
{{- toYaml .Values.allInOne.annotations | nindent 4 }}
{{- end }}
spec:
replicas: 1
replicas: {{ .Values.allInOne.replicas | default 1 }}
strategy:
type: Recreate
type: {{ .Values.allInOne.updateStrategy.type | default "Recreate" }}
selector:
matchLabels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
@@ -130,12 +130,23 @@ spec:
value: {{ include "seaweedfs.cluster.masterAddress" . | quote }}
- name: {{ $clusterFilerKey }}
value: {{ include "seaweedfs.cluster.filerAddress" . | quote }}
{{- if .Values.allInOne.secretExtraEnvironmentVars }}
{{- range $key, $value := .Values.allInOne.secretExtraEnvironmentVars }}
- name: {{ $key }}
valueFrom:
{{ toYaml $value | nindent 16 }}
{{- end }}
{{- end }}
command:
- "/bin/sh"
- "-ec"
- |
/usr/bin/weed \
{{- if .Values.allInOne.loggingOverrideLevel }}
-v={{ .Values.allInOne.loggingOverrideLevel }} \
{{- else }}
-v={{ .Values.global.loggingLevel }} \
{{- end }}
server \
-dir=/data \
-master \
@@ -191,6 +202,9 @@ spec:
{{- else if .Values.master.metricsPort }}
-metricsPort={{ .Values.master.metricsPort }} \
{{- end }}
{{- if .Values.allInOne.metricsIp }}
-metricsIp={{ .Values.allInOne.metricsIp }} \
{{- end }}
-filer \
-filer.port={{ .Values.filer.port }} \
{{- if .Values.filer.disableDirListing }}
@@ -219,61 +233,75 @@ spec:
{{- end }}
{{- if .Values.allInOne.s3.enabled }}
-s3 \
-s3.port={{ .Values.s3.port }} \
{{- if .Values.s3.domainName }}
-s3.domainName={{ .Values.s3.domainName }} \
-s3.port={{ .Values.allInOne.s3.port | default .Values.s3.port }} \
{{- $domainName := .Values.allInOne.s3.domainName | default .Values.s3.domainName }}
{{- if $domainName }}
-s3.domainName={{ $domainName }} \
{{- end }}
{{- if .Values.global.enableSecurity }}
{{- if .Values.s3.httpsPort }}
-s3.port.https={{ .Values.s3.httpsPort }} \
{{- $httpsPort := .Values.allInOne.s3.httpsPort | default .Values.s3.httpsPort }}
{{- if $httpsPort }}
-s3.port.https={{ $httpsPort }} \
{{- end }}
-s3.cert.file=/usr/local/share/ca-certificates/client/tls.crt \
-s3.key.file=/usr/local/share/ca-certificates/client/tls.key \
{{- end }}
{{- if eq (typeOf .Values.s3.allowEmptyFolder) "bool" }}
-s3.allowEmptyFolder={{ .Values.s3.allowEmptyFolder }} \
{{- end }}
{{- if .Values.s3.enableAuth }}
{{- if or .Values.allInOne.s3.enableAuth .Values.s3.enableAuth .Values.filer.s3.enableAuth }}
-s3.config=/etc/sw/s3/seaweedfs_s3_config \
{{- end }}
{{- if .Values.s3.auditLogConfig }}
{{- $auditLogConfig := .Values.allInOne.s3.auditLogConfig | default .Values.s3.auditLogConfig }}
{{- if $auditLogConfig }}
-s3.auditLogConfig=/etc/sw/s3/s3_auditLogConfig.json \
{{- end }}
{{- end }}
{{- if .Values.allInOne.sftp.enabled }}
-sftp \
-sftp.port={{ .Values.sftp.port }} \
{{- if .Values.sftp.sshPrivateKey }}
-sftp.sshPrivateKey={{ .Values.sftp.sshPrivateKey }} \
-sftp.port={{ .Values.allInOne.sftp.port | default .Values.sftp.port }} \
{{- $sshPrivateKey := .Values.allInOne.sftp.sshPrivateKey | default .Values.sftp.sshPrivateKey }}
{{- if $sshPrivateKey }}
-sftp.sshPrivateKey={{ $sshPrivateKey }} \
{{- end }}
{{- if .Values.sftp.hostKeysFolder }}
-sftp.hostKeysFolder={{ .Values.sftp.hostKeysFolder }} \
{{- $hostKeysFolder := .Values.allInOne.sftp.hostKeysFolder | default .Values.sftp.hostKeysFolder }}
{{- if $hostKeysFolder }}
-sftp.hostKeysFolder={{ $hostKeysFolder }} \
{{- end }}
{{- if .Values.sftp.authMethods }}
-sftp.authMethods={{ .Values.sftp.authMethods }} \
{{- $authMethods := .Values.allInOne.sftp.authMethods | default .Values.sftp.authMethods }}
{{- if $authMethods }}
-sftp.authMethods={{ $authMethods }} \
{{- end }}
{{- if .Values.sftp.maxAuthTries }}
-sftp.maxAuthTries={{ .Values.sftp.maxAuthTries }} \
{{- $maxAuthTries := .Values.allInOne.sftp.maxAuthTries | default .Values.sftp.maxAuthTries }}
{{- if $maxAuthTries }}
-sftp.maxAuthTries={{ $maxAuthTries }} \
{{- end }}
{{- if .Values.sftp.bannerMessage }}
-sftp.bannerMessage="{{ .Values.sftp.bannerMessage }}" \
{{- $bannerMessage := .Values.allInOne.sftp.bannerMessage | default .Values.sftp.bannerMessage }}
{{- if $bannerMessage }}
-sftp.bannerMessage="{{ $bannerMessage }}" \
{{- end }}
{{- if .Values.sftp.loginGraceTime }}
-sftp.loginGraceTime={{ .Values.sftp.loginGraceTime }} \
{{- $loginGraceTime := .Values.allInOne.sftp.loginGraceTime | default .Values.sftp.loginGraceTime }}
{{- if $loginGraceTime }}
-sftp.loginGraceTime={{ $loginGraceTime }} \
{{- end }}
{{- if .Values.sftp.clientAliveInterval }}
-sftp.clientAliveInterval={{ .Values.sftp.clientAliveInterval }} \
{{- $clientAliveInterval := .Values.allInOne.sftp.clientAliveInterval | default .Values.sftp.clientAliveInterval }}
{{- if $clientAliveInterval }}
-sftp.clientAliveInterval={{ $clientAliveInterval }} \
{{- end }}
{{- if .Values.sftp.clientAliveCountMax }}
-sftp.clientAliveCountMax={{ .Values.sftp.clientAliveCountMax }} \
{{- $clientAliveCountMax := .Values.allInOne.sftp.clientAliveCountMax | default .Values.sftp.clientAliveCountMax }}
{{- if $clientAliveCountMax }}
-sftp.clientAliveCountMax={{ $clientAliveCountMax }} \
{{- end }}
{{- if or .Values.allInOne.sftp.enableAuth .Values.sftp.enableAuth }}
-sftp.userStoreFile=/etc/sw/sftp/seaweedfs_sftp_config \
{{- end }}
{{- end }}
{{- $extraArgsCount := len .Values.allInOne.extraArgs }}
{{- range $i, $arg := .Values.allInOne.extraArgs }}
{{ $arg | quote }}{{ if ne (add1 $i) $extraArgsCount }} \{{ end }}
{{- end }}
volumeMounts:
- name: data
mountPath: /data
{{- if and .Values.allInOne.s3.enabled (or .Values.s3.enableAuth .Values.filer.s3.enableAuth) }}
{{- if and .Values.allInOne.s3.enabled (or .Values.allInOne.s3.enableAuth .Values.s3.enableAuth .Values.filer.s3.enableAuth) }}
- name: config-s3-users
mountPath: /etc/sw/s3
readOnly: true
@@ -282,10 +310,12 @@ spec:
- name: config-ssh
mountPath: /etc/sw/ssh
readOnly: true
{{- if or .Values.allInOne.sftp.enableAuth .Values.sftp.enableAuth }}
- mountPath: /etc/sw/sftp
name: config-users
readOnly: true
{{- end }}
{{- end }}
{{- if .Values.filer.notificationConfig }}
- name: notification-config
mountPath: /etc/seaweedfs/notification.toml
@@ -332,15 +362,16 @@ spec:
- containerPort: {{ .Values.filer.grpcPort }}
name: swfs-fil-grpc
{{- if .Values.allInOne.s3.enabled }}
- containerPort: {{ .Values.s3.port }}
- containerPort: {{ .Values.allInOne.s3.port | default .Values.s3.port }}
name: swfs-s3
{{- if .Values.s3.httpsPort }}
- containerPort: {{ .Values.s3.httpsPort }}
{{- $httpsPort := .Values.allInOne.s3.httpsPort | default .Values.s3.httpsPort }}
{{- if $httpsPort }}
- containerPort: {{ $httpsPort }}
name: swfs-s3-tls
{{- end }}
{{- end }}
{{- if .Values.allInOne.sftp.enabled }}
- containerPort: {{ .Values.sftp.port }}
- containerPort: {{ .Values.allInOne.sftp.port | default .Values.sftp.port }}
name: swfs-sftp
{{- end }}
{{- if .Values.allInOne.metricsPort }}
@@ -352,7 +383,7 @@ spec:
httpGet:
path: {{ .Values.allInOne.readinessProbe.httpGet.path }}
port: {{ .Values.master.port }}
scheme: {{ .Values.allInOne.readinessProbe.scheme }}
scheme: {{ .Values.allInOne.readinessProbe.httpGet.scheme }}
initialDelaySeconds: {{ .Values.allInOne.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.allInOne.readinessProbe.periodSeconds }}
successThreshold: {{ .Values.allInOne.readinessProbe.successThreshold }}
@@ -364,7 +395,7 @@ spec:
httpGet:
path: {{ .Values.allInOne.livenessProbe.httpGet.path }}
port: {{ .Values.master.port }}
scheme: {{ .Values.allInOne.livenessProbe.scheme }}
scheme: {{ .Values.allInOne.livenessProbe.httpGet.scheme }}
initialDelaySeconds: {{ .Values.allInOne.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.allInOne.livenessProbe.periodSeconds }}
successThreshold: {{ .Values.allInOne.livenessProbe.successThreshold }}
@@ -389,26 +420,31 @@ spec:
path: {{ .Values.allInOne.data.hostPathPrefix }}/seaweedfs-all-in-one-data/
type: DirectoryOrCreate
{{- else if eq .Values.allInOne.data.type "persistentVolumeClaim" }}
persistentVolumeClaim:
claimName: {{ template "seaweedfs.name" . }}-all-in-one-data
{{- else if eq .Values.allInOne.data.type "existingClaim" }}
persistentVolumeClaim:
claimName: {{ .Values.allInOne.data.claimName }}
{{- else if eq .Values.allInOne.data.type "emptyDir" }}
emptyDir: {}
{{- end }}
{{- if and .Values.allInOne.s3.enabled (or .Values.s3.enableAuth .Values.filer.s3.enableAuth) }}
{{- if and .Values.allInOne.s3.enabled (or .Values.allInOne.s3.enableAuth .Values.s3.enableAuth .Values.filer.s3.enableAuth) }}
- name: config-s3-users
secret:
defaultMode: 420
secretName: {{ default (printf "%s-s3-secret" (include "seaweedfs.name" .)) (or .Values.s3.existingConfigSecret .Values.filer.s3.existingConfigSecret) }}
secretName: {{ default (printf "%s-s3-secret" (include "seaweedfs.name" .)) (or .Values.allInOne.s3.existingConfigSecret .Values.s3.existingConfigSecret .Values.filer.s3.existingConfigSecret) }}
{{- end }}
{{- if .Values.allInOne.sftp.enabled }}
- name: config-ssh
secret:
defaultMode: 420
secretName: {{ default (printf "%s-sftp-ssh-secret" (include "seaweedfs.name" .)) .Values.sftp.existingSshConfigSecret }}
secretName: {{ default (printf "%s-sftp-ssh-secret" (include "seaweedfs.name" .)) (or .Values.allInOne.sftp.existingSshConfigSecret .Values.sftp.existingSshConfigSecret) }}
{{- if or .Values.allInOne.sftp.enableAuth .Values.sftp.enableAuth }}
- name: config-users
secret:
defaultMode: 420
secretName: {{ default (printf "%s-sftp-secret" (include "seaweedfs.name" .)) .Values.sftp.existingConfigSecret }}
secretName: {{ default (printf "%s-sftp-secret" (include "seaweedfs.name" .)) (or .Values.allInOne.sftp.existingConfigSecret .Values.sftp.existingConfigSecret) }}
{{- end }}
{{- end }}
{{- if .Values.filer.notificationConfig }}
- name: notification-config

View File

@@ -1,21 +1,28 @@
{{- if and .Values.allInOne.enabled (eq .Values.allInOne.data.type "persistentVolumeClaim") }}
{{- if .Values.allInOne.enabled }}
{{- if eq .Values.allInOne.data.type "persistentVolumeClaim" }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ .Values.allInOne.data.claimName }}
name: {{ template "seaweedfs.name" . }}-all-in-one-data
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: seaweedfs-all-in-one
{{- if .Values.allInOne.annotations }}
{{- with .Values.allInOne.data.annotations }}
annotations:
{{- toYaml .Values.allInOne.annotations | nindent 4 }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ .Values.allInOne.data.size }}
{{- toYaml (.Values.allInOne.data.accessModes | default (list "ReadWriteOnce")) | nindent 4 }}
{{- if .Values.allInOne.data.storageClass }}
storageClassName: {{ .Values.allInOne.data.storageClass }}
{{- end }}
{{- end }}
resources:
requests:
storage: {{ .Values.allInOne.data.size | default "10Gi" }}
{{- end }}
{{- end }}

View File

@@ -15,6 +15,7 @@ metadata:
{{- toYaml .Values.allInOne.service.annotations | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.allInOne.service.type | default "ClusterIP" }}
internalTrafficPolicy: {{ .Values.allInOne.service.internalTrafficPolicy | default "Cluster" }}
ports:
# Master ports
@@ -50,13 +51,14 @@ spec:
# S3 ports (if enabled)
{{- if .Values.allInOne.s3.enabled }}
- name: "swfs-s3"
port: {{ if .Values.allInOne.s3.enabled }}{{ .Values.s3.port }}{{ else }}{{ .Values.filer.s3.port }}{{ end }}
targetPort: {{ if .Values.allInOne.s3.enabled }}{{ .Values.s3.port }}{{ else }}{{ .Values.filer.s3.port }}{{ end }}
port: {{ .Values.allInOne.s3.port | default .Values.s3.port }}
targetPort: {{ .Values.allInOne.s3.port | default .Values.s3.port }}
protocol: TCP
{{- if and .Values.allInOne.s3.enabled .Values.s3.httpsPort }}
{{- $httpsPort := .Values.allInOne.s3.httpsPort | default .Values.s3.httpsPort }}
{{- if $httpsPort }}
- name: "swfs-s3-tls"
port: {{ .Values.s3.httpsPort }}
targetPort: {{ .Values.s3.httpsPort }}
port: {{ $httpsPort }}
targetPort: {{ $httpsPort }}
protocol: TCP
{{- end }}
{{- end }}
@@ -64,8 +66,8 @@ spec:
# SFTP ports (if enabled)
{{- if .Values.allInOne.sftp.enabled }}
- name: "swfs-sftp"
port: {{ .Values.sftp.port }}
targetPort: {{ .Values.sftp.port }}
port: {{ .Values.allInOne.sftp.port | default .Values.sftp.port }}
targetPort: {{ .Values.allInOne.sftp.port | default .Values.sftp.port }}
protocol: TCP
{{- end }}
@@ -80,4 +82,4 @@ spec:
selector:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
app.kubernetes.io/component: seaweedfs-all-in-one
{{- end }}
{{- end }}

View File

@@ -13,8 +13,12 @@ spec:
secretName: {{ template "seaweedfs.name" . }}-ca-cert
commonName: "{{ template "seaweedfs.name" . }}-root-ca"
isCA: true
duration: 87600h
renewBefore: 720h
{{- if .Values.certificates.ca.duration }}
duration: {{ .Values.certificates.ca.duration }}
{{- end }}
{{- if .Values.certificates.ca.renewBefore }}
renewBefore: {{ .Values.certificates.ca.renewBefore }}
{{- end }}
issuerRef:
name: {{ template "seaweedfs.name" . }}-issuer
kind: Issuer

View File

@@ -1,5 +1,8 @@
{{- if .Values.filer.enabled }}
{{- if .Values.filer.ingress.enabled }}
{{- /* Filer ingress works for both normal mode (filer.enabled) and all-in-one mode (allInOne.enabled) */}}
{{- $filerEnabled := or .Values.filer.enabled .Values.allInOne.enabled }}
{{- if and $filerEnabled .Values.filer.ingress.enabled }}
{{- /* Determine service name based on deployment mode */}}
{{- $serviceName := ternary (printf "%s-all-in-one" (include "seaweedfs.name" .)) (printf "%s-filer" (include "seaweedfs.name" .)) .Values.allInOne.enabled }}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }}
@@ -33,16 +36,14 @@ spec:
backend:
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }}
service:
name: {{ template "seaweedfs.name" . }}-filer
name: {{ $serviceName }}
port:
number: {{ .Values.filer.port }}
#name:
{{- else }}
serviceName: {{ template "seaweedfs.name" . }}-filer
serviceName: {{ $serviceName }}
servicePort: {{ .Values.filer.port }}
{{- end }}
{{- if .Values.filer.ingress.host }}
host: {{ .Values.filer.ingress.host }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -213,9 +213,6 @@ spec:
-s3.cert.file=/usr/local/share/ca-certificates/client/tls.crt \
-s3.key.file=/usr/local/share/ca-certificates/client/tls.key \
{{- end }}
{{- if eq (typeOf .Values.filer.s3.allowEmptyFolder) "bool" }}
-s3.allowEmptyFolder={{ .Values.filer.s3.allowEmptyFolder }} \
{{- end }}
{{- if .Values.filer.s3.enableAuth }}
-s3.config=/etc/sw/seaweedfs_s3_config \
{{- end }}
@@ -289,7 +286,7 @@ spec:
httpGet:
path: {{ .Values.filer.readinessProbe.httpGet.path }}
port: {{ .Values.filer.port }}
scheme: {{ .Values.filer.readinessProbe.scheme }}
scheme: {{ .Values.filer.readinessProbe.httpGet.scheme }}
initialDelaySeconds: {{ .Values.filer.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.filer.readinessProbe.periodSeconds }}
successThreshold: {{ .Values.filer.readinessProbe.successThreshold }}
@@ -301,7 +298,7 @@ spec:
httpGet:
path: {{ .Values.filer.livenessProbe.httpGet.path }}
port: {{ .Values.filer.port }}
scheme: {{ .Values.filer.livenessProbe.scheme }}
scheme: {{ .Values.filer.livenessProbe.httpGet.scheme }}
initialDelaySeconds: {{ .Values.filer.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.filer.livenessProbe.periodSeconds }}
successThreshold: {{ .Values.filer.livenessProbe.successThreshold }}
@@ -392,10 +389,12 @@ spec:
nodeSelector:
{{ tpl .Values.filer.nodeSelector . | indent 8 | trim }}
{{- end }}
{{- if and (.Values.filer.enablePVC) (eq .Values.filer.data.type "persistentVolumeClaim") }}
{{- if and (.Values.filer.enablePVC) (not .Values.filer.data) }}
# DEPRECATION: Deprecate in favor of filer.data section below
volumeClaimTemplates:
- metadata:
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data-filer
spec:
accessModes:
@@ -411,7 +410,9 @@ spec:
{{- if $pvc_exists }}
volumeClaimTemplates:
{{- if eq .Values.filer.data.type "persistentVolumeClaim" }}
- metadata:
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data-filer
{{- with .Values.filer.data.annotations }}
annotations:
@@ -425,7 +426,9 @@ spec:
storage: {{ .Values.filer.data.size }}
{{- end }}
{{- if eq .Values.filer.logs.type "persistentVolumeClaim" }}
- metadata:
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: seaweedfs-filer-log-volume
{{- with .Values.filer.logs.annotations }}
annotations:

View File

@@ -235,7 +235,7 @@ spec:
httpGet:
path: {{ .Values.master.readinessProbe.httpGet.path }}
port: {{ .Values.master.port }}
scheme: {{ .Values.master.readinessProbe.scheme }}
scheme: {{ .Values.master.readinessProbe.httpGet.scheme }}
initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }}
successThreshold: {{ .Values.master.readinessProbe.successThreshold }}
@@ -247,7 +247,7 @@ spec:
httpGet:
path: {{ .Values.master.livenessProbe.httpGet.path }}
port: {{ .Values.master.port }}
scheme: {{ .Values.master.livenessProbe.scheme }}
scheme: {{ .Values.master.livenessProbe.httpGet.scheme }}
initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }}
successThreshold: {{ .Values.master.livenessProbe.successThreshold }}
@@ -327,7 +327,9 @@ spec:
{{- if $pvc_exists }}
volumeClaimTemplates:
{{- if eq .Values.master.data.type "persistentVolumeClaim"}}
- metadata:
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data-{{ .Release.Namespace }}
{{- with .Values.master.data.annotations }}
annotations:
@@ -341,7 +343,9 @@ spec:
storage: {{ .Values.master.data.size }}
{{- end }}
{{- if eq .Values.master.logs.type "persistentVolumeClaim"}}
- metadata:
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: seaweedfs-master-log-volume
{{- with .Values.master.logs.annotations }}
annotations:

View File

@@ -143,9 +143,6 @@ spec:
{{- if .Values.s3.domainName }}
-domainName={{ .Values.s3.domainName }} \
{{- end }}
{{- if eq (typeOf .Values.s3.allowEmptyFolder) "bool" }}
-allowEmptyFolder={{ .Values.s3.allowEmptyFolder }} \
{{- end }}
{{- if .Values.s3.enableAuth }}
-config=/etc/sw/seaweedfs_s3_config \
{{- end }}
@@ -204,7 +201,7 @@ spec:
httpGet:
path: {{ .Values.s3.readinessProbe.httpGet.path }}
port: {{ .Values.s3.port }}
scheme: {{ .Values.s3.readinessProbe.scheme }}
scheme: {{ .Values.s3.readinessProbe.httpGet.scheme }}
initialDelaySeconds: {{ .Values.s3.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.s3.readinessProbe.periodSeconds }}
successThreshold: {{ .Values.s3.readinessProbe.successThreshold }}
@@ -216,7 +213,7 @@ spec:
httpGet:
path: {{ .Values.s3.livenessProbe.httpGet.path }}
port: {{ .Values.s3.port }}
scheme: {{ .Values.s3.livenessProbe.scheme }}
scheme: {{ .Values.s3.livenessProbe.httpGet.scheme }}
initialDelaySeconds: {{ .Values.s3.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.s3.livenessProbe.periodSeconds }}
successThreshold: {{ .Values.s3.livenessProbe.successThreshold }}

View File

@@ -1,4 +1,9 @@
{{- if .Values.s3.ingress.enabled }}
{{- /* S3 ingress works for standalone S3 gateway (s3.enabled), S3 on Filer (filer.s3.enabled), and all-in-one mode (allInOne.s3.enabled) */}}
{{- $s3Enabled := or .Values.s3.enabled (and .Values.filer.s3.enabled (not .Values.allInOne.enabled)) (and .Values.allInOne.enabled .Values.allInOne.s3.enabled) }}
{{- if and $s3Enabled .Values.s3.ingress.enabled }}
{{- /* Determine service name based on deployment mode */}}
{{- $serviceName := ternary (printf "%s-all-in-one" (include "seaweedfs.name" .)) (printf "%s-s3" (include "seaweedfs.name" .)) .Values.allInOne.enabled }}
{{- $s3Port := .Values.allInOne.s3.port | default .Values.s3.port }}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }}
@@ -32,13 +37,12 @@ spec:
backend:
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }}
service:
name: {{ template "seaweedfs.name" . }}-s3
name: {{ $serviceName }}
port:
number: {{ .Values.s3.port }}
#name:
number: {{ $s3Port }}
{{- else }}
serviceName: {{ template "seaweedfs.name" . }}-s3
servicePort: {{ .Values.s3.port }}
serviceName: {{ $serviceName }}
servicePort: {{ $s3Port }}
{{- end }}
{{- if .Values.s3.ingress.host }}
host: {{ .Values.s3.ingress.host | quote }}

View File

@@ -1,6 +1,32 @@
{{- if .Values.master.enabled }}
{{- if .Values.filer.s3.enabled }}
{{- if .Values.filer.s3.createBuckets }}
{{- /* Support bucket creation for both standalone filer.s3 and allInOne modes */}}
{{- $createBuckets := list }}
{{- $s3Enabled := false }}
{{- $enableAuth := false }}
{{- $existingConfigSecret := "" }}
{{- /* Check allInOne mode first */}}
{{- if .Values.allInOne.enabled }}
{{- if .Values.allInOne.s3.enabled }}
{{- $s3Enabled = true }}
{{- if .Values.allInOne.s3.createBuckets }}
{{- $createBuckets = .Values.allInOne.s3.createBuckets }}
{{- end }}
{{- $enableAuth = or .Values.allInOne.s3.enableAuth .Values.s3.enableAuth .Values.filer.s3.enableAuth }}
{{- $existingConfigSecret = or .Values.allInOne.s3.existingConfigSecret .Values.s3.existingConfigSecret .Values.filer.s3.existingConfigSecret }}
{{- end }}
{{- else if .Values.master.enabled }}
{{- /* Check standalone filer.s3 mode */}}
{{- if .Values.filer.s3.enabled }}
{{- $s3Enabled = true }}
{{- if .Values.filer.s3.createBuckets }}
{{- $createBuckets = .Values.filer.s3.createBuckets }}
{{- end }}
{{- $enableAuth = .Values.filer.s3.enableAuth }}
{{- $existingConfigSecret = .Values.filer.s3.existingConfigSecret }}
{{- end }}
{{- end }}
{{- if and $s3Enabled $createBuckets }}
---
apiVersion: batch/v1
kind: Job
@@ -32,9 +58,9 @@ spec:
- name: WEED_CLUSTER_DEFAULT
value: "sw"
- name: WEED_CLUSTER_SW_MASTER
value: "{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}:{{ .Values.master.port }}"
value: {{ include "seaweedfs.cluster.masterAddress" . | quote }}
- name: WEED_CLUSTER_SW_FILER
value: "{{ template "seaweedfs.name" . }}-filer-client.{{ .Release.Namespace }}:{{ .Values.filer.port }}"
value: {{ include "seaweedfs.cluster.filerAddress" . | quote }}
- name: POD_IP
valueFrom:
fieldRef:
@@ -71,24 +97,29 @@ spec:
echo "Service at $url failed to become ready within 5 minutes"
exit 1
}
{{- if .Values.allInOne.enabled }}
wait_for_service "http://$WEED_CLUSTER_SW_MASTER{{ .Values.allInOne.readinessProbe.httpGet.path }}"
wait_for_service "http://$WEED_CLUSTER_SW_FILER{{ .Values.filer.readinessProbe.httpGet.path }}"
{{- else }}
wait_for_service "http://$WEED_CLUSTER_SW_MASTER{{ .Values.master.readinessProbe.httpGet.path }}"
wait_for_service "http://$WEED_CLUSTER_SW_FILER{{ .Values.filer.readinessProbe.httpGet.path }}"
{{- range $reg, $props := $.Values.filer.s3.createBuckets }}
exec /bin/echo \
"s3.bucket.create --name {{ $props.name }}" |\
{{- end }}
{{- range $createBuckets }}
/bin/echo \
"s3.bucket.create --name {{ .name }}" |\
/usr/bin/weed shell
{{- end }}
{{- range $reg, $props := $.Values.filer.s3.createBuckets }}
{{- if $props.anonymousRead }}
exec /bin/echo \
{{- range $createBuckets }}
{{- if .anonymousRead }}
/bin/echo \
"s3.configure --user anonymous \
--buckets {{ $props.name }} \
--buckets {{ .name }} \
--actions Read \
--apply true" |\
/usr/bin/weed shell
{{- end }}
{{- end }}
{{- if .Values.filer.s3.enableAuth }}
{{- if $enableAuth }}
volumeMounts:
- name: config-users
mountPath: /etc/sw
@@ -106,17 +137,15 @@ spec:
{{- if .Values.filer.containerSecurityContext.enabled }}
securityContext: {{- omit .Values.filer.containerSecurityContext "enabled" | toYaml | nindent 12 }}
{{- end }}
{{- if .Values.filer.s3.enableAuth }}
{{- if $enableAuth }}
volumes:
- name: config-users
secret:
defaultMode: 420
{{- if not (empty .Values.filer.s3.existingConfigSecret) }}
secretName: {{ .Values.filer.s3.existingConfigSecret }}
{{- if $existingConfigSecret }}
secretName: {{ $existingConfigSecret }}
{{- else }}
secretName: seaweedfs-s3-secret
secretName: {{ template "seaweedfs.name" . }}-s3-secret
{{- end }}
{{- end }}{{/** if .Values.filer.s3.enableAuth **/}}
{{- end }}{{/** if .Values.master.enabled **/}}
{{- end }}{{/** if .Values.filer.s3.enabled **/}}
{{- end }}{{/** if .Values.filer.s3.createBuckets **/}}
{{- end }}
{{- end }}

View File

@@ -251,7 +251,7 @@ spec:
httpGet:
path: {{ $volume.readinessProbe.httpGet.path }}
port: {{ $volume.port }}
scheme: {{ $volume.readinessProbe.scheme }}
scheme: {{ $volume.readinessProbe.httpGet.scheme }}
initialDelaySeconds: {{ $volume.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ $volume.readinessProbe.periodSeconds }}
successThreshold: {{ $volume.readinessProbe.successThreshold }}
@@ -263,7 +263,7 @@ spec:
httpGet:
path: {{ $volume.livenessProbe.httpGet.path }}
port: {{ $volume.port }}
scheme: {{ $volume.livenessProbe.scheme }}
scheme: {{ $volume.livenessProbe.httpGet.scheme }}
initialDelaySeconds: {{ $volume.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ $volume.livenessProbe.periodSeconds }}
successThreshold: {{ $volume.livenessProbe.successThreshold }}

View File

@@ -22,6 +22,8 @@ global:
serviceAccountName: "seaweedfs"
automountServiceAccountToken: true
certificates:
duration: 87600h
renewBefore: 720h
alphacrds: false
monitoring:
enabled: false
@@ -235,27 +237,27 @@ master:
ingress:
enabled: false
className: "nginx"
className: ""
# host: false for "*" hostname
host: "master.seaweedfs.local"
path: "/sw-master/?(.*)"
pathType: ImplementationSpecific
annotations:
nginx.ingress.kubernetes.io/auth-type: "basic"
nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret"
nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Master'
nginx.ingress.kubernetes.io/service-upstream: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/use-regex: "true"
nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
nginx.ingress.kubernetes.io/ssl-redirect: "false"
nginx.ingress.kubernetes.io/force-ssl-redirect: "false"
nginx.ingress.kubernetes.io/configuration-snippet: |
sub_filter '<head>' '<head> <base href="/sw-master/">'; #add base url
sub_filter '="/' '="./'; #make absolute paths to relative
sub_filter '=/' '=./';
sub_filter '/seaweedfsstatic' './seaweedfsstatic';
sub_filter_once off;
annotations: {}
# nginx.ingress.kubernetes.io/auth-type: "basic"
# nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret"
# nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Master'
# nginx.ingress.kubernetes.io/service-upstream: "true"
# nginx.ingress.kubernetes.io/rewrite-target: /$1
# nginx.ingress.kubernetes.io/use-regex: "true"
# nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
# nginx.ingress.kubernetes.io/ssl-redirect: "false"
# nginx.ingress.kubernetes.io/force-ssl-redirect: "false"
# nginx.ingress.kubernetes.io/configuration-snippet: |
# sub_filter '<head>' '<head> <base href="/sw-master/">'; #add base url
# sub_filter '="/' '="./'; #make absolute paths to relative
# sub_filter '=/' '=./';
# sub_filter '/seaweedfsstatic' './seaweedfsstatic';
# sub_filter_once off;
tls: []
extraEnvironmentVars:
@@ -308,7 +310,7 @@ volume:
# limit file size to avoid out of memory, default 256mb
fileSizeLimitMB: null
# minimum free disk space(in percents). If free disk space lower this value - all volumes marks as ReadOnly
minFreeSpacePercent: 7
minFreeSpacePercent: 1
# Custom command line arguments to add to the volume command
# Example to fix IPv6 metrics connectivity issues:
@@ -769,28 +771,28 @@ filer:
ingress:
enabled: false
className: "nginx"
className: ""
# host: false for "*" hostname
host: "seaweedfs.cluster.local"
path: "/sw-filer/?(.*)"
pathType: ImplementationSpecific
annotations:
nginx.ingress.kubernetes.io/backend-protocol: GRPC
nginx.ingress.kubernetes.io/auth-type: "basic"
nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret"
nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Filer'
nginx.ingress.kubernetes.io/service-upstream: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/use-regex: "true"
nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
nginx.ingress.kubernetes.io/ssl-redirect: "false"
nginx.ingress.kubernetes.io/force-ssl-redirect: "false"
nginx.ingress.kubernetes.io/configuration-snippet: |
sub_filter '<head>' '<head> <base href="/sw-filer/">'; #add base url
sub_filter '="/' '="./'; #make absolute paths to relative
sub_filter '=/' '=./';
sub_filter '/seaweedfsstatic' './seaweedfsstatic';
sub_filter_once off;
annotations: {}
# nginx.ingress.kubernetes.io/backend-protocol: GRPC
# nginx.ingress.kubernetes.io/auth-type: "basic"
# nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret"
# nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Filer'
# nginx.ingress.kubernetes.io/service-upstream: "true"
# nginx.ingress.kubernetes.io/rewrite-target: /$1
# nginx.ingress.kubernetes.io/use-regex: "true"
# nginx.ingress.kubernetes.io/enable-rewrite-log: "true"
# nginx.ingress.kubernetes.io/ssl-redirect: "false"
# nginx.ingress.kubernetes.io/force-ssl-redirect: "false"
# nginx.ingress.kubernetes.io/configuration-snippet: |
# sub_filter '<head>' '<head> <base href="/sw-filer/">'; #add base url
# sub_filter '="/' '="./'; #make absolute paths to relative
# sub_filter '=/' '=./';
# sub_filter '/seaweedfsstatic' './seaweedfsstatic';
# sub_filter_once off;
# extraEnvVars is a list of extra environment variables to set with the stateful set.
extraEnvironmentVars:
@@ -854,8 +856,6 @@ filer:
port: 8333
# add additional https port
httpsPort: 0
# allow empty folders
allowEmptyFolder: false
# Suffix of the host name, {bucket}.{domainName}
domainName: ""
# enable user & permission to s3 (need to inject to all services)
@@ -873,7 +873,7 @@ filer:
# anonymousRead: false
s3:
enabled: true
enabled: false
imageOverride: null
restartPolicy: null
replicas: 1
@@ -883,8 +883,6 @@ s3:
httpsPort: 0
metricsPort: 9327
loggingOverrideLevel: null
# allow empty folders
allowEmptyFolder: true
# enable user & permission to s3 (need to inject to all services)
enableAuth: false
# set to the name of an existing kubernetes Secret with the s3 json config file
@@ -977,9 +975,9 @@ s3:
extraEnvironmentVars:
# Custom command line arguments to add to the s3 command
# Example to fix connection idle seconds:
extraArgs: ["-idleTimeout=30"]
# extraArgs: []
# Default idleTimeout is 120 seconds. Example to customize:
# extraArgs: ["-idleTimeout=300"]
extraArgs: []
# used to configure livenessProbe on s3 containers
#
@@ -1009,7 +1007,7 @@ s3:
ingress:
enabled: false
className: "nginx"
className: ""
# host: false for "*" hostname
host: "seaweedfs.cluster.local"
path: "/"
@@ -1095,6 +1093,7 @@ allInOne:
enabled: false
imageOverride: null
restartPolicy: Always
replicas: 1 # Number of replicas (note: multiple replicas may require shared storage)
# Core configuration
idleTimeout: 30 # Connection idle seconds
@@ -1106,24 +1105,85 @@ allInOne:
metricsIp: "" # Metrics listen IP. If empty, defaults to bindAddress
loggingOverrideLevel: null # Override logging level
# Service configuration
# Custom command line arguments to add to the server command
# Example to fix IPv6 metrics connectivity issues:
# extraArgs: ["-metricsIp", "0.0.0.0"]
# Example with multiple args:
# extraArgs: ["-customFlag", "value", "-anotherFlag"]
extraArgs: []
# Update strategy configuration
# type: Recreate or RollingUpdate
# For single replica, Recreate is recommended to avoid data conflicts.
# For multiple replicas with RollingUpdate, you MUST use shared storage
# (e.g., data.type: persistentVolumeClaim with ReadWriteMany access mode)
# to avoid data loss or inconsistency between pods.
updateStrategy:
type: Recreate
# S3 gateway configuration
# Note: Most parameters below default to null, which means they inherit from
# the global s3.* settings. Set explicit values here to override for allInOne only.
s3:
enabled: false # Whether to enable S3 gateway
port: null # S3 gateway port (null inherits from s3.port)
httpsPort: null # S3 gateway HTTPS port (null inherits from s3.httpsPort)
domainName: null # Suffix of the host name (null inherits from s3.domainName)
enableAuth: false # Enable user & permission to S3
# Set to the name of an existing kubernetes Secret with the s3 json config file
# should have a secret key called seaweedfs_s3_config with an inline json config
existingConfigSecret: null
auditLogConfig: null # S3 audit log configuration (null inherits from s3.auditLogConfig)
# You may specify buckets to be created during the install process.
# Buckets may be exposed publicly by setting `anonymousRead` to `true`
# createBuckets:
# - name: bucket-a
# anonymousRead: true
# - name: bucket-b
# anonymousRead: false
# SFTP server configuration
# Note: Most parameters below default to null, which means they inherit from
# the global sftp.* settings. Set explicit values here to override for allInOne only.
sftp:
enabled: false # Whether to enable SFTP server
port: null # SFTP port (null inherits from sftp.port)
sshPrivateKey: null # Path to SSH private key (null inherits from sftp.sshPrivateKey)
hostKeysFolder: null # Path to SSH host keys folder (null inherits from sftp.hostKeysFolder)
authMethods: null # Comma-separated auth methods (null inherits from sftp.authMethods)
maxAuthTries: null # Maximum authentication attempts (null inherits from sftp.maxAuthTries)
bannerMessage: null # Banner message (null inherits from sftp.bannerMessage)
loginGraceTime: null # Login grace time (null inherits from sftp.loginGraceTime)
clientAliveInterval: null # Client keep-alive interval (null inherits from sftp.clientAliveInterval)
clientAliveCountMax: null # Maximum missed keep-alive messages (null inherits from sftp.clientAliveCountMax)
enableAuth: false # Enable SFTP authentication
# Set to the name of an existing kubernetes Secret with the sftp json config file
existingConfigSecret: null
# Set to the name of an existing kubernetes Secret with the SSH keys
existingSshConfigSecret: null
# Service settings
service:
annotations: {} # Annotations for the service
type: ClusterIP # Service type (ClusterIP, NodePort, LoadBalancer)
internalTrafficPolicy: Cluster # Internal traffic policy
# Note: For ingress in all-in-one mode, use the standard s3.ingress and
# filer.ingress settings. The templates automatically detect all-in-one mode
# and point to the correct service (seaweedfs-all-in-one instead of
# seaweedfs-s3 or seaweedfs-filer).
# Storage configuration
data:
type: "emptyDir" # Options: "hostPath", "persistentVolumeClaim", "emptyDir"
type: "emptyDir" # Options: "hostPath", "persistentVolumeClaim", "emptyDir", "existingClaim"
hostPathPrefix: /mnt/data # Path prefix for hostPath volumes
claimName: seaweedfs-data-pvc # Name of the PVC to use
size: "" # Size of the PVC
storageClass: "" # Storage class for the PVC
claimName: seaweedfs-data-pvc # Name of the PVC to use (for existingClaim type)
size: null # Size of the PVC (null defaults to 10Gi for persistentVolumeClaim type)
storageClass: null # Storage class for the PVC (null uses cluster default)
# accessModes for the PVC. Default is ["ReadWriteOnce"].
# For multi-replica deployments, use ["ReadWriteMany"] with a compatible storage class.
accessModes: []
annotations: {} # Annotations for the PVC
# Health checks
readinessProbe:
@@ -1131,7 +1191,7 @@ allInOne:
httpGet:
path: /cluster/status
port: 9333
scheme: HTTP
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 15
successThreshold: 1
@@ -1143,7 +1203,7 @@ allInOne:
httpGet:
path: /cluster/status
port: 9333
scheme: HTTP
scheme: HTTP
initialDelaySeconds: 20
periodSeconds: 30
successThreshold: 1
@@ -1152,6 +1212,18 @@ allInOne:
# Additional resources
extraEnvironmentVars: {} # Additional environment variables
# Secret environment variables (for database credentials, etc.)
# Example:
# secretExtraEnvironmentVars:
# WEED_POSTGRES_USERNAME:
# secretKeyRef:
# name: postgres-credentials
# key: username
# WEED_POSTGRES_PASSWORD:
# secretKeyRef:
# name: postgres-credentials
# key: password
secretExtraEnvironmentVars: {}
extraVolumeMounts: "" # Additional volume mounts
extraVolumes: "" # Additional volumes
initContainers: "" # Init containers
@@ -1171,7 +1243,7 @@ allInOne:
matchLabels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: master
app.kubernetes.io/component: seaweedfs-all-in-one
topologyKey: kubernetes.io/hostname
# Topology Spread Constraints Settings
@@ -1179,16 +1251,16 @@ allInOne:
# for a PodSpec. By Default no constraints are set.
topologySpreadConstraints: ""
# Toleration Settings for master pods
# Toleration Settings for pods
# This should be a multi-line string matching the Toleration array
# in a PodSpec.
tolerations: ""
# nodeSelector labels for master pod assignment, formatted as a muli-line string.
# nodeSelector labels for pod assignment, formatted as a muli-line string.
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelector: ""
# Used to assign priority to master pods
# Used to assign priority to pods
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
priorityClassName: ""
@@ -1268,6 +1340,9 @@ certificates:
keySize: 2048
duration: 2160h # 90d
renewBefore: 360h # 15d
ca:
duration: 87600h # 10 years
renewBefore: 720h # 30d
externalCertificates:
# This will avoid the need to use cert-manager and will rely on providing your own external certificates and CA
# you will need to store your provided certificates in the secret read by the different services:

View File

@@ -1,2 +1,2 @@
ARG VERSION=3.99
ARG VERSION=4.02
FROM chrislusf/seaweedfs:${VERSION}

View File

@@ -1,13 +0,0 @@
diff --git a/packages/system/seaweedfs/charts/seaweedfs/templates/cert/ca-cert.yaml b/packages/system/seaweedfs/charts/seaweedfs/templates/cert/ca-cert.yaml
index 0fd6615e..f2572558 100644
--- a/packages/system/seaweedfs/charts/seaweedfs/templates/cert/ca-cert.yaml
+++ b/packages/system/seaweedfs/charts/seaweedfs/templates/cert/ca-cert.yaml
@@ -13,6 +13,8 @@ spec:
secretName: {{ template "seaweedfs.name" . }}-ca-cert
commonName: "{{ template "seaweedfs.name" . }}-root-ca"
isCA: true
+ duration: 87600h
+ renewBefore: 720h
issuerRef:
name: {{ template "seaweedfs.name" . }}-issuer
kind: Issuer

View File

@@ -124,7 +124,7 @@ seaweedfs:
bucketClassName: "seaweedfs"
region: ""
sidecar:
image: "ghcr.io/cozystack/cozystack/objectstorage-sidecar:v0.38.2@sha256:ff3281fe53a97d2cd5cd94bd4c4d8ff08189508729869bb39b3f60c80da5f919"
image: "ghcr.io/cozystack/cozystack/objectstorage-sidecar:v0.38.8@sha256:2d1833c78c35b697a3634d4b3be9a3218edae95a77583e9e121c10a92e7433ec"
certificates:
commonName: "SeaweedFS CA"
ipAddresses: []

View File

@@ -28,7 +28,6 @@ import (
helmv2 "github.com/fluxcd/helm-controller/api/v2"
metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
fields "k8s.io/apimachinery/pkg/fields"
labels "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
@@ -142,17 +141,9 @@ func (r *REST) GetSingularName() string {
// Create handles the creation of a new Application by converting it to a HelmRelease
func (r *REST) Create(ctx context.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) {
// Assert the object is of type Application
us, ok := obj.(*unstructured.Unstructured)
app, ok := obj.(*appsv1alpha1.Application)
if !ok {
return nil, fmt.Errorf("expected unstructured.Unstructured object, got %T", obj)
}
app := &appsv1alpha1.Application{}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(us.Object, app); err != nil {
errMsg := fmt.Sprintf("returned unstructured.Unstructured object was not an Application")
klog.Errorf(errMsg)
return nil, fmt.Errorf(errMsg)
return nil, fmt.Errorf("expected *appsv1alpha1.Application object, got %T", obj)
}
// Convert Application to HelmRelease
@@ -186,15 +177,8 @@ func (r *REST) Create(ctx context.Context, obj runtime.Object, createValidation
klog.V(6).Infof("Successfully created and converted HelmRelease %s to Application", helmRelease.GetName())
// Convert Application to unstructured format
unstructuredApp, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&convertedApp)
if err != nil {
klog.Errorf("Failed to convert Application to unstructured for resource %s: %v", convertedApp.GetName(), err)
return nil, fmt.Errorf("failed to convert Application to unstructured: %v", err)
}
klog.V(6).Infof("Successfully retrieved and converted resource %s of type %s to unstructured", convertedApp.GetName(), r.gvr.Resource)
return &unstructured.Unstructured{Object: unstructuredApp}, nil
klog.V(6).Infof("Successfully retrieved and converted resource %s of type %s", convertedApp.GetName(), r.gvr.Resource)
return &convertedApp, nil
}
// Get retrieves an Application by converting the corresponding HelmRelease
@@ -238,25 +222,8 @@ func (r *REST) Get(ctx context.Context, name string, options *metav1.GetOptions)
return nil, fmt.Errorf("conversion error: %v", err)
}
// Explicitly set apiVersion and kind for Application
convertedApp.TypeMeta = metav1.TypeMeta{
APIVersion: "apps.cozystack.io/v1alpha1",
Kind: r.kindName,
}
// Convert Application to unstructured format
unstructuredApp, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&convertedApp)
if err != nil {
klog.Errorf("Failed to convert Application to unstructured for resource %s: %v", name, err)
return nil, fmt.Errorf("failed to convert Application to unstructured: %v", err)
}
// Explicitly set apiVersion and kind in unstructured object
unstructuredApp["apiVersion"] = "apps.cozystack.io/v1alpha1"
unstructuredApp["kind"] = r.kindName
klog.V(6).Infof("Successfully retrieved and converted resource %s of kind %s to unstructured", name, r.gvr.Resource)
return &unstructured.Unstructured{Object: unstructuredApp}, nil
klog.V(6).Infof("Successfully retrieved and converted resource %s of kind %s", name, r.gvr.Resource)
return &convertedApp, nil
}
// List retrieves a list of Applications by converting HelmReleases
@@ -339,8 +306,8 @@ func (r *REST) List(ctx context.Context, options *metainternalversion.ListOption
return nil, err
}
// Initialize unstructured items array
items := make([]unstructured.Unstructured, 0)
// Initialize Application items array
items := make([]appsv1alpha1.Application, 0, len(hrList.Items))
// Iterate over HelmReleases and convert to Applications
for i := range hrList.Items {
@@ -387,17 +354,11 @@ func (r *REST) List(ctx context.Context, options *metainternalversion.ListOption
}
}
// Convert Application to unstructured
unstructuredApp, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&app)
if err != nil {
klog.Errorf("Error converting Application %s to unstructured: %v", app.Name, err)
continue
}
items = append(items, unstructured.Unstructured{Object: unstructuredApp})
items = append(items, app)
}
// Explicitly set apiVersion and kind in unstructured object
appList := r.NewList().(*unstructured.UnstructuredList)
// Create ApplicationList with proper kind
appList := r.NewList().(*appsv1alpha1.ApplicationList)
appList.SetResourceVersion(hrList.GetResourceVersion())
appList.Items = items
@@ -447,16 +408,9 @@ func (r *REST) Update(ctx context.Context, name string, objInfo rest.UpdatedObje
}
// Assert the new object is of type Application
us, ok := newObj.(*unstructured.Unstructured)
app, ok := newObj.(*appsv1alpha1.Application)
if !ok {
errMsg := fmt.Sprintf("expected unstructured.Unstructured object, got %T", newObj)
klog.Errorf(errMsg)
return nil, false, fmt.Errorf(errMsg)
}
app := &appsv1alpha1.Application{}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(us.Object, app); err != nil {
errMsg := fmt.Sprintf("returned unstructured.Unstructured object was not an Application")
errMsg := fmt.Sprintf("expected *appsv1alpha1.Application object, got %T", newObj)
klog.Errorf(errMsg)
return nil, false, fmt.Errorf(errMsg)
}
@@ -517,24 +471,9 @@ func (r *REST) Update(ctx context.Context, name string, objInfo rest.UpdatedObje
klog.V(6).Infof("Successfully updated and converted HelmRelease %s to Application", helmRelease.GetName())
// Explicitly set apiVersion and kind for Application
convertedApp.TypeMeta = metav1.TypeMeta{
APIVersion: "apps.cozystack.io/v1alpha1",
Kind: r.kindName,
}
klog.V(6).Infof("Returning updated Application object: %+v", convertedApp)
// Convert Application to unstructured format
unstructuredApp, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&convertedApp)
if err != nil {
klog.Errorf("Failed to convert Application to unstructured for resource %s: %v", convertedApp.GetName(), err)
return nil, false, fmt.Errorf("failed to convert Application to unstructured: %v", err)
}
obj := &unstructured.Unstructured{Object: unstructuredApp}
obj.SetGroupVersionKind(r.gvk)
klog.V(6).Infof("Returning patched Application object: %+v", unstructuredApp)
return obj, false, nil
return &convertedApp, false, nil
}
// Delete removes an Application by deleting the corresponding HelmRelease
@@ -728,19 +667,10 @@ func (r *REST) Watch(ctx context.Context, options *metainternalversion.ListOptio
}
}
// Convert Application to unstructured
unstructuredApp, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&app)
if err != nil {
klog.Errorf("Failed to convert Application to unstructured: %v", err)
continue
}
obj := &unstructured.Unstructured{Object: unstructuredApp}
obj.SetGroupVersionKind(r.gvk)
// Create watch event with Application object
appEvent := watch.Event{
Type: event.Type,
Object: obj,
Object: &app,
}
// Send event to custom watcher
@@ -766,8 +696,8 @@ func (r *REST) Watch(ctx context.Context, options *metainternalversion.ListOptio
// Helper function to get HelmRelease name from object
func helmReleaseName(obj runtime.Object) string {
if u, ok := obj.(*unstructured.Unstructured); ok {
return u.GetName()
if app, ok := obj.(*appsv1alpha1.Application); ok {
return app.GetName()
}
return "<unknown>"
}
@@ -1059,56 +989,6 @@ func (r *REST) ConvertToTable(ctx context.Context, object runtime.Object, tableO
case *appsv1alpha1.Application:
table = r.buildTableFromApplication(*obj)
table.ListMeta.ResourceVersion = obj.GetResourceVersion()
case *unstructured.UnstructuredList:
apps := make([]appsv1alpha1.Application, 0, len(obj.Items))
for _, u := range obj.Items {
var a appsv1alpha1.Application
err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, &a)
if err != nil {
klog.Errorf("Failed to convert Unstructured to Application: %v", err)
continue
}
apps = append(apps, a)
}
table = r.buildTableFromApplications(apps)
table.ListMeta.ResourceVersion = obj.GetResourceVersion()
case *unstructured.Unstructured:
var apps []appsv1alpha1.Application
for {
var items interface{}
var ok bool
var objects []unstructured.Unstructured
if items, ok = obj.Object["items"]; !ok {
break
}
if objects, ok = items.([]unstructured.Unstructured); !ok {
break
}
apps = make([]appsv1alpha1.Application, 0, len(objects))
var a appsv1alpha1.Application
for i := range objects {
err := runtime.DefaultUnstructuredConverter.FromUnstructured(objects[i].Object, &a)
if err != nil {
klog.Errorf("Failed to convert Unstructured to Application: %v", err)
continue
}
apps = append(apps, a)
}
break
}
if apps != nil {
table = r.buildTableFromApplications(apps)
table.ListMeta.ResourceVersion = obj.GetResourceVersion()
break
}
var app appsv1alpha1.Application
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), &app)
if err != nil {
klog.Errorf("Failed to convert Unstructured to Application: %v", err)
return nil, fmt.Errorf("failed to convert Unstructured to Application: %v", err)
}
table = r.buildTableFromApplication(app)
table.ListMeta.ResourceVersion = obj.GetResourceVersion()
default:
resource := schema.GroupResource{}
if info, ok := request.RequestInfoFrom(ctx); ok {
@@ -1147,10 +1027,11 @@ func (r *REST) buildTableFromApplications(apps []appsv1alpha1.Application) metav
}
now := time.Now()
for _, app := range apps {
for i := range apps {
app := &apps[i]
row := metav1.TableRow{
Cells: []interface{}{app.GetName(), getReadyStatus(app.Status.Conditions), computeAge(app.GetCreationTimestamp().Time, now), getVersion(app.Status.Version)},
Object: runtime.RawExtension{Object: &app},
Object: runtime.RawExtension{Object: app},
}
table.Rows = append(table.Rows, row)
}
@@ -1171,9 +1052,10 @@ func (r *REST) buildTableFromApplication(app appsv1alpha1.Application) metav1.Ta
}
now := time.Now()
a := app
row := metav1.TableRow{
Cells: []interface{}{app.GetName(), getReadyStatus(app.Status.Conditions), computeAge(app.GetCreationTimestamp().Time, now), getVersion(app.Status.Version)},
Object: runtime.RawExtension{Object: &app},
Object: runtime.RawExtension{Object: &a},
}
table.Rows = append(table.Rows, row)
@@ -1237,15 +1119,21 @@ func (r *REST) Destroy() {
// New creates a new instance of Application
func (r *REST) New() runtime.Object {
obj := &unstructured.Unstructured{}
obj.SetGroupVersionKind(r.gvk)
obj := &appsv1alpha1.Application{}
obj.TypeMeta = metav1.TypeMeta{
APIVersion: r.gvk.GroupVersion().String(),
Kind: r.kindName,
}
return obj
}
// NewList returns an empty list of Application objects
func (r *REST) NewList() runtime.Object {
obj := &unstructured.UnstructuredList{}
obj.SetGroupVersionKind(r.gvk.GroupVersion().WithKind(r.kindName + "List"))
obj := &appsv1alpha1.ApplicationList{}
obj.TypeMeta = metav1.TypeMeta{
APIVersion: r.gvk.GroupVersion().String(),
Kind: r.kindName + "List",
}
return obj
}

View File

@@ -26,7 +26,6 @@ import (
helmv2 "github.com/fluxcd/helm-controller/api/v2"
metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
fields "k8s.io/apimachinery/pkg/fields"
labels "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
@@ -147,25 +146,8 @@ func (r *REST) Get(ctx context.Context, name string, options *metav1.GetOptions)
return nil, fmt.Errorf("conversion error: %v", err)
}
// Explicitly set apiVersion and kind for TenantModule
convertedModule.TypeMeta = metav1.TypeMeta{
APIVersion: "core.cozystack.io/v1alpha1",
Kind: r.kindName,
}
// Convert TenantModule to unstructured format
unstructuredModule, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&convertedModule)
if err != nil {
klog.Errorf("Failed to convert TenantModule to unstructured for resource %s: %v", name, err)
return nil, fmt.Errorf("failed to convert TenantModule to unstructured: %v", err)
}
// Explicitly set apiVersion and kind in unstructured object
unstructuredModule["apiVersion"] = "core.cozystack.io/v1alpha1"
unstructuredModule["kind"] = r.kindName
klog.V(6).Infof("Successfully retrieved and converted resource %s of kind %s to unstructured", name, r.gvr.Resource)
return &unstructured.Unstructured{Object: unstructuredModule}, nil
klog.V(6).Infof("Successfully retrieved and converted resource %s of kind %s", name, r.gvr.Resource)
return &convertedModule, nil
}
// List retrieves a list of TenantModules by converting HelmReleases
@@ -245,8 +227,8 @@ func (r *REST) List(ctx context.Context, options *metainternalversion.ListOption
return nil, err
}
// Initialize unstructured items array
items := make([]unstructured.Unstructured, 0)
// Initialize TenantModule items array
items := make([]corev1alpha1.TenantModule, 0, len(hrList.Items))
// Iterate over HelmReleases and convert to TenantModules
for i := range hrList.Items {
@@ -294,19 +276,15 @@ func (r *REST) List(ctx context.Context, options *metainternalversion.ListOption
}
}
// Convert TenantModule to unstructured
unstructuredModule, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&module)
if err != nil {
klog.Errorf("Error converting TenantModule %s to unstructured: %v", module.Name, err)
continue
}
items = append(items, unstructured.Unstructured{Object: unstructuredModule})
items = append(items, module)
}
// Explicitly set apiVersion and kind in unstructured object
moduleList := &unstructured.UnstructuredList{}
moduleList.SetAPIVersion("core.cozystack.io/v1alpha1")
moduleList.SetKind(r.kindName + "List")
// Create TenantModuleList with proper kind
moduleList := &corev1alpha1.TenantModuleList{}
moduleList.TypeMeta = metav1.TypeMeta{
APIVersion: "core.cozystack.io/v1alpha1",
Kind: r.kindName + "List",
}
moduleList.SetResourceVersion(hrList.GetResourceVersion())
moduleList.Items = items
@@ -455,17 +433,10 @@ func (r *REST) Watch(ctx context.Context, options *metainternalversion.ListOptio
}
}
// Convert TenantModule to unstructured
unstructuredModule, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&module)
if err != nil {
klog.Errorf("Failed to convert TenantModule to unstructured: %v", err)
continue
}
// Create watch event with TenantModule object
moduleEvent := watch.Event{
Type: event.Type,
Object: &unstructured.Unstructured{Object: unstructuredModule},
Object: &module,
}
// Send event to custom watcher
@@ -620,27 +591,11 @@ func (r *REST) ConvertToTable(ctx context.Context, object runtime.Object, tableO
var table metav1.Table
switch obj := object.(type) {
case *unstructured.UnstructuredList:
modules := make([]corev1alpha1.TenantModule, 0, len(obj.Items))
for _, u := range obj.Items {
var m corev1alpha1.TenantModule
err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, &m)
if err != nil {
klog.Errorf("Failed to convert Unstructured to TenantModule: %v", err)
continue
}
modules = append(modules, m)
}
table = r.buildTableFromTenantModules(modules)
table.ListMeta.ResourceVersion = obj.GetResourceVersion()
case *unstructured.Unstructured:
var module corev1alpha1.TenantModule
err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), &module)
if err != nil {
klog.Errorf("Failed to convert Unstructured to TenantModule: %v", err)
return nil, fmt.Errorf("failed to convert Unstructured to TenantModule: %v", err)
}
table = r.buildTableFromTenantModule(module)
case *corev1alpha1.TenantModuleList:
table = r.buildTableFromTenantModules(obj.Items)
table.ListMeta.ResourceVersion = obj.ListMeta.ResourceVersion
case *corev1alpha1.TenantModule:
table = r.buildTableFromTenantModule(*obj)
table.ListMeta.ResourceVersion = obj.GetResourceVersion()
default:
resource := schema.GroupResource{}
@@ -680,10 +635,11 @@ func (r *REST) buildTableFromTenantModules(modules []corev1alpha1.TenantModule)
}
now := time.Now()
for _, module := range modules {
for i := range modules {
module := &modules[i]
row := metav1.TableRow{
Cells: []interface{}{module.GetName(), getReadyStatus(module.Status.Conditions), computeAge(module.GetCreationTimestamp().Time, now), getVersion(module.Status.Version)},
Object: runtime.RawExtension{Object: &module},
Object: runtime.RawExtension{Object: module},
}
table.Rows = append(table.Rows, row)
}
@@ -704,9 +660,10 @@ func (r *REST) buildTableFromTenantModule(module corev1alpha1.TenantModule) meta
}
now := time.Now()
m := module
row := metav1.TableRow{
Cells: []interface{}{module.GetName(), getReadyStatus(module.Status.Conditions), computeAge(module.GetCreationTimestamp().Time, now), getVersion(module.Status.Version)},
Object: runtime.RawExtension{Object: &module},
Object: runtime.RawExtension{Object: &m},
}
table.Rows = append(table.Rows, row)
@@ -751,12 +708,22 @@ func (r *REST) Destroy() {
// New creates a new instance of TenantModule
func (r *REST) New() runtime.Object {
return &corev1alpha1.TenantModule{}
obj := &corev1alpha1.TenantModule{}
obj.TypeMeta = metav1.TypeMeta{
APIVersion: r.gvk.GroupVersion().String(),
Kind: r.kindName,
}
return obj
}
// NewList returns an empty list of TenantModule objects
func (r *REST) NewList() runtime.Object {
return &corev1alpha1.TenantModuleList{}
obj := &corev1alpha1.TenantModuleList{}
obj.TypeMeta = metav1.TypeMeta{
APIVersion: r.gvk.GroupVersion().String(),
Kind: r.kindName + "List",
}
return obj
}
// Kind returns the resource kind used for API discovery