Compare commits

..

4 Commits

Author SHA1 Message Date
Andrei Kvapil
725f70399f refactor: remove assets-server and switch to cozystack-operator
- Remove cozystack-assets-server and related manifests
- Move grafana dashboards to dedicated pod in grafana-operator
- Remove old installer (cozystack.yaml) and switch to cozystack-operator
- Remove cozystackOperator.enabled flag (operator is now always used)
- Remove obsolete platform templates (helmrepos, helmreleases, apps, namespaces)
- Update Makefile to build cozystack-operator and cozystack-packages
- Remove installer.sh script (replaced by operator)

Co-Authored-By: Claude <noreply@anthropic.com>
Signed-off-by: Andrei Kvapil <kvapss@gmail.com>
2026-01-06 17:42:29 +01:00
Andrei Kvapil
49f7ac9192 refactor: move migrations to platform chart
- Move migrations from scripts/migrations/ to packages/core/platform/images/migrations/
- Create migration-hook.yaml template to run migrations as Helm pre-upgrade/pre-install hook
- Add Dockerfile and run-migrations.sh for migrations image
- Remove old cozystack-assets image directory
- Update platform Makefile to build migrations image instead of assets

Co-Authored-By: Claude <noreply@anthropic.com>
Signed-off-by: Andrei Kvapil <kvapss@gmail.com>
2026-01-06 17:40:56 +01:00
Andrei Kvapil
ab8cc5ffd4 refactor: update CozystackResourceDefinition to use chartRef instead of chart
This commit replaces the `chart` field with `chartRef` in CozystackResourceDefinition
to support direct OCIRepository references instead of HelmRepository lookups.

Changes:
- Update API types to use ChartRef structure with Kind, Name, Namespace fields
- Modify HelmRelease reconciler to set spec.chartRef instead of spec.chart
- Update config and registry to use new ChartRef configuration
- Add backward compatibility in lineage mapper for both chartRef and legacy chart
- Update all CozystackResourceDefinition YAML files to use new format
- Regenerate CRDs and deepcopy functions

Co-Authored-By: Claude <noreply@anthropic.com>
Signed-off-by: Andrei Kvapil <kvapss@gmail.com>
2026-01-06 17:39:08 +01:00
Andrei Kvapil
f5e6107e3a feat(api): show only hash in version column for applications and modules
Fix getVersion to parse "0.1.4+abcdef" format (with "+" separator)
instead of incorrectly looking for "sha256:" prefix.

Co-Authored-By: Claude <noreply@anthropic.com>
Signed-off-by: Andrei Kvapil <kvapss@gmail.com>
2026-01-06 17:38:17 +01:00
634 changed files with 17494 additions and 50474 deletions

2
.github/CODEOWNERS vendored
View File

@@ -1 +1 @@
* @kvaps @lllamnyp @lexfrei @androndo
* @kvaps @lllamnyp @nbykov0

View File

@@ -71,17 +71,11 @@ jobs:
name: pr-patch
path: _out/assets/pr.patch
- name: Upload CRDs
- name: Upload installer
uses: actions/upload-artifact@v4
with:
name: cozystack-crds
path: _out/assets/cozystack-crds.yaml
- name: Upload operator
uses: actions/upload-artifact@v4
with:
name: cozystack-operator
path: _out/assets/cozystack-operator.yaml
name: cozystack-installer
path: _out/assets/cozystack-installer.yaml
- name: Upload Talos image
uses: actions/upload-artifact@v4
@@ -94,9 +88,8 @@ jobs:
runs-on: ubuntu-latest
if: contains(github.event.pull_request.labels.*.name, 'release')
outputs:
crds_id: ${{ steps.fetch_assets.outputs.crds_id }}
operator_id: ${{ steps.fetch_assets.outputs.operator_id }}
disk_id: ${{ steps.fetch_assets.outputs.disk_id }}
installer_id: ${{ steps.fetch_assets.outputs.installer_id }}
disk_id: ${{ steps.fetch_assets.outputs.disk_id }}
steps:
- name: Checkout code
@@ -139,22 +132,19 @@ jobs:
return;
}
const find = (n) => draft.assets.find(a => a.name === n)?.id;
const crdsId = find('cozystack-crds.yaml');
const operatorId = find('cozystack-operator.yaml');
const diskId = find('nocloud-amd64.raw.xz');
if (!crdsId || !operatorId || !diskId) {
const installerId = find('cozystack-installer.yaml');
const diskId = find('nocloud-amd64.raw.xz');
if (!installerId || !diskId) {
core.setFailed('Required assets missing in draft release');
return;
}
core.setOutput('crds_id', crdsId);
core.setOutput('operator_id', operatorId);
core.setOutput('disk_id', diskId);
core.setOutput('installer_id', installerId);
core.setOutput('disk_id', diskId);
e2e:
name: "E2E Tests"
runs-on: [oracle-vm-24cpu-96gb-x86-64]
#runs-on: [oracle-vm-32cpu-128gb-x86-64]
prepare_env:
name: "Prepare environment"
runs-on: [self-hosted]
permissions:
contents: read
packages: read
@@ -174,20 +164,6 @@ jobs:
name: talos-image
path: _out/assets
- name: "Download CRDs (regular PR)"
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
uses: actions/download-artifact@v4
with:
name: cozystack-crds
path: _out/assets
- name: "Download operator (regular PR)"
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
uses: actions/download-artifact@v4
with:
name: cozystack-operator
path: _out/assets
- name: Download PR patch
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
uses: actions/download-artifact@v4
@@ -208,19 +184,13 @@ jobs:
curl -sSL -H "Authorization: token ${GH_PAT}" -H "Accept: application/octet-stream" \
-o _out/assets/nocloud-amd64.raw.xz \
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/assets/${{ needs.resolve_assets.outputs.disk_id }}"
curl -sSL -H "Authorization: token ${GH_PAT}" -H "Accept: application/octet-stream" \
-o _out/assets/cozystack-crds.yaml \
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/assets/${{ needs.resolve_assets.outputs.crds_id }}"
curl -sSL -H "Authorization: token ${GH_PAT}" -H "Accept: application/octet-stream" \
-o _out/assets/cozystack-operator.yaml \
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/assets/${{ needs.resolve_assets.outputs.operator_id }}"
env:
GH_PAT: ${{ secrets.GH_PAT }}
- name: Set sandbox ID
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
# ▸ Prepare environment
# ▸ Start actual job steps
- name: Prepare workspace
run: |
rm -rf /tmp/$SANDBOX_NAME
@@ -240,7 +210,47 @@ jobs:
done
echo "✅ The task completed successfully after $attempt attempts"
# ▸ Install Cozystack
install_cozystack:
name: "Install Cozystack"
runs-on: [self-hosted]
permissions:
contents: read
packages: read
needs: ["prepare_env", "resolve_assets"]
if: ${{ always() && needs.prepare_env.result == 'success' }}
steps:
- name: Prepare _out/assets directory
run: mkdir -p _out/assets
# ▸ Regular PR path download artefacts produced by the *build* job
- name: "Download installer (regular PR)"
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
uses: actions/download-artifact@v4
with:
name: cozystack-installer
path: _out/assets
# ▸ Release PR path fetch artefacts from the corresponding draft release
- name: Download assets from draft release (release PR)
if: contains(github.event.pull_request.labels.*.name, 'release')
run: |
mkdir -p _out/assets
curl -sSL -H "Authorization: token ${GH_PAT}" -H "Accept: application/octet-stream" \
-o _out/assets/cozystack-installer.yaml \
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/assets/${{ needs.resolve_assets.outputs.installer_id }}"
env:
GH_PAT: ${{ secrets.GH_PAT }}
# ▸ Start actual job steps
- name: Set sandbox ID
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
- name: Sync _out/assets directory
run: |
mkdir -p /tmp/$SANDBOX_NAME/_out/assets
mv _out/assets/* /tmp/$SANDBOX_NAME/_out/assets/
- name: Install Cozystack into sandbox
run: |
cd /tmp/$SANDBOX_NAME
@@ -253,77 +263,107 @@ jobs:
fi
echo "❌ Attempt $attempt failed, retrying..."
done
echo "✅ The task completed successfully after $attempt attempts"
echo "✅ The task completed successfully after $attempt attempts."
- name: Run OpenAPI tests
run: |
cd /tmp/$SANDBOX_NAME
make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME test-openapi
# ▸ Run E2E tests
- name: Run E2E tests
id: e2e_tests
run: |
cd /tmp/$SANDBOX_NAME
failed_tests=""
for app in $(ls hack/e2e-apps/*.bats | xargs -n1 basename | cut -d. -f1); do
echo "::group::Testing $app"
attempt=0
success=false
until [ $attempt -ge 3 ]; do
if make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME test-apps-$app; then
success=true
break
fi
attempt=$((attempt + 1))
echo "❌ Attempt $attempt failed, retrying..."
done
if [ "$success" = true ]; then
echo "✅ Test $app completed successfully"
else
echo "❌ Test $app failed after $attempt attempts"
failed_tests="$failed_tests $app"
fi
echo "::endgroup::"
done
if [ -n "$failed_tests" ]; then
echo "❌ Failed tests:$failed_tests"
exit 1
fi
echo "✅ All E2E tests passed"
detect_test_matrix:
name: "Detect e2e test matrix"
runs-on: ubuntu-latest
outputs:
matrix: ${{ steps.set.outputs.matrix }}
# ▸ Collect debug information (always runs)
- name: Collect report
if: always()
steps:
- uses: actions/checkout@v4
- id: set
run: |
apps=$(ls hack/e2e-apps/*.bats | cut -f3 -d/ | cut -f1 -d. | jq -R | jq -cs)
echo "matrix={\"app\":$apps}" >> "$GITHUB_OUTPUT"
test_apps:
strategy:
fail-fast: false
matrix: ${{ fromJson(needs.detect_test_matrix.outputs.matrix) }}
name: Test ${{ matrix.app }}
runs-on: [self-hosted]
needs: [install_cozystack,detect_test_matrix]
if: ${{ always() && (needs.install_cozystack.result == 'success' && needs.detect_test_matrix.result == 'success') }}
steps:
- name: Set sandbox ID
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
- name: E2E Apps
run: |
cd /tmp/$SANDBOX_NAME
make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME collect-report || true
attempt=0
until make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME test-apps-${{ matrix.app }}; do
attempt=$((attempt + 1))
if [ $attempt -ge 3 ]; then
echo "❌ Attempt $attempt failed, exiting..."
exit 1
fi
echo "❌ Attempt $attempt failed, retrying..."
done
echo "✅ The task completed successfully after $attempt attempts"
collect_debug_information:
name: Collect debug information
runs-on: [self-hosted]
needs: [test_apps]
if: ${{ always() }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set sandbox ID
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
- name: Collect report
run: |
cd /tmp/$SANDBOX_NAME
make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME collect-report
- name: Upload cozyreport.tgz
if: always()
uses: actions/upload-artifact@v4
with:
name: cozyreport
path: /tmp/${{ env.SANDBOX_NAME }}/_out/cozyreport.tgz
- name: Collect images list
if: always()
run: |
cd /tmp/$SANDBOX_NAME
make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME collect-images || true
make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME collect-images
- name: Upload image list
if: always()
uses: actions/upload-artifact@v4
with:
name: image-list
path: /tmp/${{ env.SANDBOX_NAME }}/_out/images.txt
# ▸ Tear down environment (always runs)
cleanup:
name: Tear down environment
runs-on: [self-hosted]
needs: [collect_debug_information]
if: ${{ always() && needs.test_apps.result == 'success' }}
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
fetch-tags: true
- name: Set sandbox ID
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
- name: Tear down sandbox
if: always()
run: make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME delete || true
run: make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME delete
- name: Remove workspace
if: always()
run: rm -rf /tmp/$SANDBOX_NAME

View File

@@ -1,7 +1,5 @@
.PHONY: manifests assets unit-tests helm-unit-tests
include hack/common-envs.mk
build-deps:
@command -V find docker skopeo jq gh helm > /dev/null
@yq --version | grep -q "mikefarah" || (echo "mikefarah/yq is required" && exit 1)
@@ -14,11 +12,10 @@ build: build-deps
make -C packages/apps/mysql image
make -C packages/apps/clickhouse image
make -C packages/apps/kubernetes image
make -C packages/system/monitoring image
make -C packages/extra/monitoring image
make -C packages/system/cozystack-api image
make -C packages/system/cozystack-controller image
make -C packages/system/backup-controller image
make -C packages/system/backupstrategy-controller image
make -C packages/system/lineage-controller-webhook image
make -C packages/system/cilium image
make -C packages/system/linstor image
@@ -27,55 +24,21 @@ build: build-deps
make -C packages/system/dashboard image
make -C packages/system/metallb image
make -C packages/system/kamaji image
make -C packages/system/kilo image
make -C packages/system/bucket image
make -C packages/system/objectstorage-controller image
make -C packages/system/grafana-operator image
make -C packages/core/testing image
make -C packages/core/talos image
make -C packages/core/platform image
make -C packages/core/installer image
make manifests
manifests:
mkdir -p _out/assets
helm template installer packages/core/installer -n cozy-system \
-s templates/crds.yaml \
> _out/assets/cozystack-crds.yaml
# Talos variant (default)
helm template installer packages/core/installer -n cozy-system \
-s templates/cozystack-operator.yaml \
-s templates/packagesource.yaml \
> _out/assets/cozystack-operator.yaml
# Generic Kubernetes variant (k3s, kubeadm, RKE2)
helm template installer packages/core/installer -n cozy-system \
-s templates/cozystack-operator-generic.yaml \
-s templates/packagesource.yaml \
> _out/assets/cozystack-operator-generic.yaml
# Hosted variant (managed Kubernetes)
helm template installer packages/core/installer -n cozy-system \
-s templates/cozystack-operator-hosted.yaml \
-s templates/packagesource.yaml \
> _out/assets/cozystack-operator-hosted.yaml
(cd packages/core/installer/; helm template --namespace cozy-installer installer .) > _out/assets/cozystack-installer.yaml
cozypkg:
go build -ldflags "-X github.com/cozystack/cozystack/cmd/cozypkg/cmd.Version=v$(COZYSTACK_VERSION)" -o _out/bin/cozypkg ./cmd/cozypkg
assets: assets-talos assets-cozypkg
assets-talos:
assets:
make -C packages/core/talos assets
assets-cozypkg: assets-cozypkg-linux-amd64 assets-cozypkg-linux-arm64 assets-cozypkg-darwin-amd64 assets-cozypkg-darwin-arm64 assets-cozypkg-windows-amd64 assets-cozypkg-windows-arm64
(cd _out/assets/ && sha256sum cozypkg-*.tar.gz) > _out/assets/cozypkg-checksums.txt
assets-cozypkg-%:
$(eval EXT := $(if $(filter windows,$(firstword $(subst -, ,$*))),.exe,))
mkdir -p _out/assets
GOOS=$(firstword $(subst -, ,$*)) GOARCH=$(lastword $(subst -, ,$*)) go build -ldflags "-X github.com/cozystack/cozystack/cmd/cozypkg/cmd.Version=v$(COZYSTACK_VERSION)" -o _out/bin/cozypkg-$*/cozypkg$(EXT) ./cmd/cozypkg
cp LICENSE _out/bin/cozypkg-$*/LICENSE
tar -C _out/bin/cozypkg-$* -czf _out/assets/cozypkg-$*.tar.gz LICENSE cozypkg$(EXT)
test:
make -C packages/core/testing apply
make -C packages/core/testing test

View File

@@ -100,13 +100,13 @@ Describe **when**, **how**, and **where** to back up a specific managed applicat
```go
type PlanSpec struct {
// Application to back up.
// If apiGroup is not specified, it defaults to "apps.cozystack.io".
ApplicationRef corev1.TypedLocalObjectReference `json:"applicationRef"`
// BackupClassName references a BackupClass that contains strategy and other parameters (e.g. storage reference).
// The BackupClass will be resolved to determine the appropriate strategy and parameters
// based on the ApplicationRef.
BackupClassName string `json:"backupClassName"`
// Where backups should be stored.
StorageRef corev1.TypedLocalObjectReference `json:"storageRef"`
// Driver-specific BackupStrategy to use.
StrategyRef corev1.TypedLocalObjectReference `json:"strategyRef"`
// When backups should run.
Schedule PlanSchedule `json:"schedule"`
@@ -145,12 +145,12 @@ Core Plan controller:
* Create a `BackupJob` in the same namespace:
* `spec.planRef.name = plan.Name`
* `spec.applicationRef = plan.spec.applicationRef` (normalized with default apiGroup if not specified)
* `spec.backupClassName = plan.spec.backupClassName`
* `spec.applicationRef = plan.spec.applicationRef`
* `spec.storageRef = plan.spec.storageRef`
* `spec.strategyRef = plan.spec.strategyRef`
* `spec.triggeredBy = "Plan"`
* Set `ownerReferences` so the `BackupJob` is owned by the `Plan`.
**Note:** The `BackupJob` controller resolves the `BackupClass` to determine the appropriate strategy and parameters, based on the `ApplicationRef`. The strategy template is processed with a context containing the `Application` object and `Parameters` from the `BackupClass`.
The Plan controller does **not**:
* Execute backups itself.
@@ -159,64 +159,17 @@ The Plan controller does **not**:
---
### 4.2 BackupClass
### 4.2 Storage
**Group/Kind**
`backups.cozystack.io/v1alpha1, Kind=BackupClass`
**API Shape**
**Purpose**
Define a class of backup configurations that encapsulate strategy and parameters per application type. `BackupClass` is a cluster-scoped resource that allows admins to configure backup strategies and parameters in a reusable way.
TBD
**Key fields (spec)**
**Storage usage**
```go
type BackupClassSpec struct {
// Strategies is a list of backup strategies, each matching a specific application type.
Strategies []BackupClassStrategy `json:"strategies"`
}
type BackupClassStrategy struct {
// StrategyRef references the driver-specific BackupStrategy (e.g., Velero).
StrategyRef corev1.TypedLocalObjectReference `json:"strategyRef"`
// Application specifies which application types this strategy applies to.
// If apiGroup is not specified, it defaults to "apps.cozystack.io".
Application ApplicationSelector `json:"application"`
// Parameters holds strategy-specific parameters, like storage reference.
// Common parameters include:
// - backupStorageLocationName: Name of Velero BackupStorageLocation
// +optional
Parameters map[string]string `json:"parameters,omitempty"`
}
type ApplicationSelector struct {
// APIGroup is the API group of the application.
// If not specified, defaults to "apps.cozystack.io".
// +optional
APIGroup *string `json:"apiGroup,omitempty"`
// Kind is the kind of the application (e.g., VirtualMachine, MySQL).
Kind string `json:"kind"`
}
```
**BackupClass resolution**
* When a `BackupJob` or `Plan` references a `BackupClass` via `backupClassName`, the controller:
1. Fetches the `BackupClass` by name.
2. Matches the `ApplicationRef` against strategies in the `BackupClass`:
* Normalizes `ApplicationRef.apiGroup` (defaults to `"apps.cozystack.io"` if not specified).
* Finds a strategy where `ApplicationSelector` matches the `ApplicationRef` (apiGroup and kind).
3. Returns the matched `StrategyRef` and `Parameters`.
* Strategy templates (e.g., Velero's `backupTemplate.spec`) are processed with a context containing:
* `Application`: The application object being backed up.
* `Parameters`: The parameters from the matched `BackupClassStrategy`.
**Parameters**
* Parameters are passed via `Parameters` in the `BackupClass` (e.g., `backupStorageLocationName` for Velero).
* The driver uses these parameters to resolve the actual resources (e.g., Velero's `BackupStorageLocation` CRD).
* `Plan` and `BackupJob` reference `Storage` via `TypedLocalObjectReference`.
* Drivers read `Storage` to know how/where to store or read artifacts.
* Core treats `Storage` spec as opaque; it does not directly talk to S3 or buckets.
---
@@ -236,13 +189,16 @@ type BackupJobSpec struct {
PlanRef *corev1.LocalObjectReference `json:"planRef,omitempty"`
// Application to back up.
// If apiGroup is not specified, it defaults to "apps.cozystack.io".
ApplicationRef corev1.TypedLocalObjectReference `json:"applicationRef"`
// BackupClassName references a BackupClass that contains strategy and related parameters
// The BackupClass will be resolved to determine the appropriate strategy and parameters
// based on the ApplicationRef.
BackupClassName string `json:"backupClassName"`
// Storage to use.
StorageRef corev1.TypedLocalObjectReference `json:"storageRef"`
// Driver-specific BackupStrategy to use.
StrategyRef corev1.TypedLocalObjectReference `json:"strategyRef"`
// Informational: what triggered this run ("Plan", "Manual", etc.).
TriggeredBy string `json:"triggeredBy,omitempty"`
}
```
@@ -267,9 +223,7 @@ type BackupJobStatus struct {
* Each driver controller:
* Watches `BackupJob`.
* Resolves the `BackupClass` referenced by `spec.backupClassName`.
* Matches the `ApplicationRef` against strategies in the `BackupClass` to find the appropriate strategy.
* Reconciles runs where the resolved strategy's `apiGroup/kind` matches its **strategy type(s)**.
* Reconciles runs where `spec.strategyRef.apiGroup/kind` matches its **strategy type(s)**.
* Driver responsibilities:
1. On first reconcile:
@@ -278,12 +232,7 @@ type BackupJobStatus struct {
* Set `status.phase = Running`.
2. Resolve inputs:
* Resolve `BackupClass` from `spec.backupClassName`.
* Match `ApplicationRef` against `BackupClass` strategies to get `StrategyRef` and `Parameters`.
* Read `Strategy` (driver-owned CRD) from `StrategyRef`.
* Read `Application` from `ApplicationRef`.
* Extract parameters from `Parameters` (e.g., `backupStorageLocationName` for Velero).
* Process strategy template with context: `Application` object and `Parameters` from `BackupClass`.
* Read `Strategy` (driver-owned CRD), `Storage`, `Application`, optionally `Plan`.
3. Execute backup logic (implementation-specific).
4. On success:
@@ -315,14 +264,13 @@ Represent a single **backup artifact** for a given application, decoupled from a
type BackupSpec struct {
ApplicationRef corev1.TypedLocalObjectReference `json:"applicationRef"`
PlanRef *corev1.LocalObjectReference `json:"planRef,omitempty"`
StorageRef corev1.TypedLocalObjectReference `json:"storageRef"`
StrategyRef corev1.TypedLocalObjectReference `json:"strategyRef"`
TakenAt metav1.Time `json:"takenAt"`
DriverMetadata map[string]string `json:"driverMetadata,omitempty"`
}
```
**Note:** Parameters are not stored directly in `Backup`. Instead, they are resolved from `BackupClass` parameters when the backup was created. The storage location is managed by the driver (e.g., Velero's `BackupStorageLocation`) and referenced via parameters in the `BackupClass`.
**Key fields (status)**
```go
@@ -342,8 +290,7 @@ type BackupStatus struct {
* Creates a `Backup` in the same namespace (typically owned by the `BackupJob`).
* Populates `spec` fields with:
* The application reference.
* The strategy reference (resolved from `BackupClass` during `BackupJob` execution).
* The application, storage, strategy references.
* `takenAt`.
* Optional `driverMetadata`.
* Sets `status` with:
@@ -359,8 +306,6 @@ type BackupStatus struct {
* Anchor `RestoreJob` operations.
* Implement higher-level policies (retention) if needed.
**Note:** Parameters are resolved from `BackupClass` when the `BackupJob` is created. The driver uses these parameters to determine where to store backups. The storage location itself is managed by the driver (e.g., Velero's `BackupStorageLocation` CRD) and is not directly referenced in the `Backup` resource. When restoring, the driver resolves the storage location from the original `BackupClass` parameters or from the driver's own metadata.
---
### 4.5 RestoreJob
@@ -408,13 +353,13 @@ type RestoreJobStatus struct {
* Determines effective:
* **Strategy**: `backup.spec.strategyRef`.
* **Storage**: Resolved from driver metadata or `BackupClass` parameters (e.g., `backupStorageLocationName` stored in `driverMetadata` or resolved from the original `BackupClass`).
* **Storage**: `backup.spec.storageRef`.
* **Target application**: `spec.targetApplicationRef` or `backup.spec.applicationRef`.
* If effective strategys GVK is one of its supported strategy types → driver is responsible.
3. Behaviour:
* On first reconcile, set `status.startedAt` and `phase = Running`.
* Resolve `Backup`, storage location (from driver metadata or `BackupClass`), `Strategy`, target application.
* Resolve `Backup`, `Storage`, `Strategy`, target application.
* Execute restore logic (implementation-specific).
* On success:
@@ -469,10 +414,8 @@ The Cozystack backups core API:
* Uses a single group, `backups.cozystack.io`, for all core CRDs.
* Cleanly separates:
* **When** (Plan schedule) core-owned.
* **How & where** (BackupClass) central configuration unit that encapsulates strategy and parameters (e.g., storage reference) per application type, resolved per BackupJob/Plan.
* **Execution** (BackupJob) created by Plan when schedule fires, resolves BackupClass to get strategy and parameters, then delegates to driver.
* **When & where** (Plan + Storage) core-owned.
* **What backup artifacts exist** (Backup) driver-created but cluster-visible.
* **Restore lifecycle** (RestoreJob) shared contract boundary.
* **Execution lifecycle** (BackupJob, RestoreJob) shared contract boundary.
* Allows multiple strategy drivers to implement backup/restore logic without entangling their implementation with the core API.

View File

@@ -57,6 +57,10 @@ type BackupSpec struct {
// +optional
PlanRef *corev1.LocalObjectReference `json:"planRef,omitempty"`
// StorageRef refers to the Storage object that describes where the backup
// artifact is stored.
StorageRef corev1.TypedLocalObjectReference `json:"storageRef"`
// StrategyRef refers to the driver-specific BackupStrategy that was used
// to create this backup. This allows the driver to later perform restores.
StrategyRef corev1.TypedLocalObjectReference `json:"strategyRef"`

View File

@@ -1,85 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// Package v1alpha1 defines backups.cozystack.io API types.
//
// Group: backups.cozystack.io
// Version: v1alpha1
package v1alpha1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
func init() {
SchemeBuilder.Register(func(s *runtime.Scheme) error {
s.AddKnownTypes(GroupVersion,
&BackupClass{},
&BackupClassList{},
)
return nil
})
}
// +kubebuilder:object:root=true
// +kubebuilder:resource:scope=Cluster
// +kubebuilder:subresource:status
// BackupClass defines a class of backup configurations that can be referenced
// by BackupJob and Plan resources. It encapsulates strategy and storage configuration
// per application type.
type BackupClass struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec BackupClassSpec `json:"spec,omitempty"`
Status BackupClassStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// BackupClassList contains a list of BackupClasses.
type BackupClassList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []BackupClass `json:"items"`
}
// BackupClassSpec defines the desired state of a BackupClass.
type BackupClassSpec struct {
// Strategies is a list of backup strategies, each matching a specific application type.
Strategies []BackupClassStrategy `json:"strategies"`
}
// BackupClassStrategy defines a backup strategy for a specific application type.
type BackupClassStrategy struct {
// StrategyRef references the driver-specific BackupStrategy (e.g., Velero).
StrategyRef corev1.TypedLocalObjectReference `json:"strategyRef"`
// Application specifies which application types this strategy applies to.
Application ApplicationSelector `json:"application"`
// Parameters holds strategy-specific and storage-specific parameters.
// Common parameters include:
// - backupStorageLocationName: Name of Velero BackupStorageLocation
// +optional
Parameters map[string]string `json:"parameters,omitempty"`
}
// ApplicationSelector specifies which application types a strategy applies to.
type ApplicationSelector struct {
// APIGroup is the API group of the application.
// If not specified, defaults to "apps.cozystack.io".
// +optional
APIGroup *string `json:"apiGroup,omitempty"`
// Kind is the kind of the application (e.g., VirtualMachine, MySQL).
Kind string `json:"kind"`
}
// BackupClassStatus defines the observed state of a BackupClass.
type BackupClassStatus struct {
// Conditions represents the latest available observations of a BackupClass's state.
// +optional
Conditions []metav1.Condition `json:"conditions,omitempty"`
}

View File

@@ -24,10 +24,6 @@ func init() {
const (
OwningJobNameLabel = thisGroup + "/owned-by.BackupJobName"
OwningJobNamespaceLabel = thisGroup + "/owned-by.BackupJobNamespace"
// DefaultApplicationAPIGroup is the default API group for applications
// when not specified in ApplicationRef or ApplicationSelector.
DefaultApplicationAPIGroup = "apps.cozystack.io"
)
// BackupJobPhase represents the lifecycle phase of a BackupJob.
@@ -50,15 +46,15 @@ type BackupJobSpec struct {
// ApplicationRef holds a reference to the managed application whose state
// is being backed up.
// If apiGroup is not specified, it defaults to "apps.cozystack.io".
ApplicationRef corev1.TypedLocalObjectReference `json:"applicationRef"`
// BackupClassName references a BackupClass that contains strategy and storage configuration.
// The BackupClass will be resolved to determine the appropriate strategy and storage
// based on the ApplicationRef.
// This field is immutable once the BackupJob is created.
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="backupClassName is immutable"
BackupClassName string `json:"backupClassName"`
// StorageRef holds a reference to the Storage object that describes where
// the backup will be stored.
StorageRef corev1.TypedLocalObjectReference `json:"storageRef"`
// StrategyRef holds a reference to the driver-specific BackupStrategy object
// that describes how the backup should be created.
StrategyRef corev1.TypedLocalObjectReference `json:"strategyRef"`
}
// BackupJobStatus represents the observed state of a BackupJob.
@@ -118,13 +114,3 @@ type BackupJobList struct {
metav1.ListMeta `json:"metadata,omitempty"`
Items []BackupJob `json:"items"`
}
// NormalizeApplicationRef sets the default apiGroup to DefaultApplicationAPIGroup if it's not specified.
// This function is exported so it can be used by other packages (e.g., controllers, factories).
func NormalizeApplicationRef(ref corev1.TypedLocalObjectReference) corev1.TypedLocalObjectReference {
if ref.APIGroup == nil || *ref.APIGroup == "" {
apiGroup := DefaultApplicationAPIGroup
ref.APIGroup = &apiGroup
}
return ref
}

View File

@@ -1,67 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
package v1alpha1
import (
"context"
"fmt"
"strings"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
// SetupWebhookWithManager registers the BackupJob webhook with the manager.
func SetupBackupJobWebhookWithManager(mgr ctrl.Manager) error {
return ctrl.NewWebhookManagedBy(mgr).
For(&BackupJob{}).
Complete()
}
// +kubebuilder:webhook:path=/mutate-backups-cozystack-io-v1alpha1-backupjob,mutating=true,failurePolicy=fail,sideEffects=None,groups=backups.cozystack.io,resources=backupjobs,verbs=create;update,versions=v1alpha1,name=mbackupjob.kb.io,admissionReviewVersions=v1
// Default implements webhook.Defaulter so a webhook will be registered for the type
func (j *BackupJob) Default() {
j.Spec.ApplicationRef = NormalizeApplicationRef(j.Spec.ApplicationRef)
}
// +kubebuilder:webhook:path=/validate-backups-cozystack-io-v1alpha1-backupjob,mutating=false,failurePolicy=fail,sideEffects=None,groups=backups.cozystack.io,resources=backupjobs,verbs=create;update,versions=v1alpha1,name=vbackupjob.kb.io,admissionReviewVersions=v1
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
func (j *BackupJob) ValidateCreate() (admission.Warnings, error) {
logger := log.FromContext(context.Background())
logger.Info("validating BackupJob creation", "name", j.Name, "namespace", j.Namespace)
// Validate that backupClassName is set
if strings.TrimSpace(j.Spec.BackupClassName) == "" {
return nil, fmt.Errorf("backupClassName is required and cannot be empty")
}
return nil, nil
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
func (j *BackupJob) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {
logger := log.FromContext(context.Background())
logger.Info("validating BackupJob update", "name", j.Name, "namespace", j.Namespace)
oldJob, ok := old.(*BackupJob)
if !ok {
return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a BackupJob but got a %T", old))
}
// Enforce immutability of backupClassName
if oldJob.Spec.BackupClassName != j.Spec.BackupClassName {
return nil, fmt.Errorf("backupClassName is immutable and cannot be changed from %q to %q", oldJob.Spec.BackupClassName, j.Spec.BackupClassName)
}
return nil, nil
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
func (j *BackupJob) ValidateDelete() (admission.Warnings, error) {
// No validation needed for deletion
return nil, nil
}

View File

@@ -1,334 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
package v1alpha1
import (
"testing"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
func TestBackupJob_ValidateCreate(t *testing.T) {
tests := []struct {
name string
job *BackupJob
wantErr bool
errMsg string
}{
{
name: "valid BackupJob with backupClassName",
job: &BackupJob{
ObjectMeta: metav1.ObjectMeta{
Name: "test-job",
Namespace: "default",
},
Spec: BackupJobSpec{
ApplicationRef: corev1.TypedLocalObjectReference{
Kind: "VirtualMachine",
Name: "vm1",
},
BackupClassName: "velero",
},
},
wantErr: false,
},
{
name: "BackupJob with empty backupClassName should be rejected",
job: &BackupJob{
ObjectMeta: metav1.ObjectMeta{
Name: "test-job",
Namespace: "default",
},
Spec: BackupJobSpec{
ApplicationRef: corev1.TypedLocalObjectReference{
Kind: "VirtualMachine",
Name: "vm1",
},
BackupClassName: "",
},
},
wantErr: true,
errMsg: "backupClassName is required and cannot be empty",
},
{
name: "BackupJob with whitespace-only backupClassName should be rejected",
job: &BackupJob{
ObjectMeta: metav1.ObjectMeta{
Name: "test-job",
Namespace: "default",
},
Spec: BackupJobSpec{
ApplicationRef: corev1.TypedLocalObjectReference{
Kind: "VirtualMachine",
Name: "vm1",
},
BackupClassName: " ",
},
},
wantErr: true,
errMsg: "backupClassName is required and cannot be empty",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
warnings, err := tt.job.ValidateCreate()
if (err != nil) != tt.wantErr {
t.Errorf("ValidateCreate() error = %v, wantErr %v", err, tt.wantErr)
return
}
if tt.wantErr && err != nil {
if tt.errMsg != "" && err.Error() != tt.errMsg {
t.Errorf("ValidateCreate() error message = %v, want %v", err.Error(), tt.errMsg)
}
}
if warnings != nil && len(warnings) > 0 {
t.Logf("ValidateCreate() warnings = %v", warnings)
}
})
}
}
func TestBackupJob_ValidateUpdate(t *testing.T) {
baseJob := &BackupJob{
ObjectMeta: metav1.ObjectMeta{
Name: "test-job",
Namespace: "default",
},
Spec: BackupJobSpec{
ApplicationRef: corev1.TypedLocalObjectReference{
Kind: "VirtualMachine",
Name: "vm1",
},
BackupClassName: "velero",
},
}
tests := []struct {
name string
old runtime.Object
new *BackupJob
wantErr bool
errMsg string
}{
{
name: "update with same backupClassName should succeed",
old: baseJob,
new: &BackupJob{
ObjectMeta: metav1.ObjectMeta{
Name: "test-job",
Namespace: "default",
},
Spec: BackupJobSpec{
ApplicationRef: corev1.TypedLocalObjectReference{
Kind: "VirtualMachine",
Name: "vm1",
},
BackupClassName: "velero", // Same as old
},
},
wantErr: false,
},
{
name: "update changing backupClassName should be rejected",
old: baseJob,
new: &BackupJob{
ObjectMeta: metav1.ObjectMeta{
Name: "test-job",
Namespace: "default",
},
Spec: BackupJobSpec{
ApplicationRef: corev1.TypedLocalObjectReference{
Kind: "VirtualMachine",
Name: "vm1",
},
BackupClassName: "different-class", // Changed!
},
},
wantErr: true,
errMsg: "backupClassName is immutable and cannot be changed from \"velero\" to \"different-class\"",
},
{
name: "update changing other fields but keeping backupClassName should succeed",
old: baseJob,
new: &BackupJob{
ObjectMeta: metav1.ObjectMeta{
Name: "test-job",
Namespace: "default",
Labels: map[string]string{
"new-label": "value",
},
},
Spec: BackupJobSpec{
ApplicationRef: corev1.TypedLocalObjectReference{
Kind: "VirtualMachine",
Name: "vm2", // Changed application
},
BackupClassName: "velero", // Same as old
},
},
wantErr: false,
},
{
name: "update when old backupClassName is empty should be rejected",
old: &BackupJob{
ObjectMeta: metav1.ObjectMeta{
Name: "test-job",
Namespace: "default",
},
Spec: BackupJobSpec{
ApplicationRef: corev1.TypedLocalObjectReference{
Kind: "VirtualMachine",
Name: "vm1",
},
BackupClassName: "", // Empty in old
},
},
new: &BackupJob{
ObjectMeta: metav1.ObjectMeta{
Name: "test-job",
Namespace: "default",
},
Spec: BackupJobSpec{
ApplicationRef: corev1.TypedLocalObjectReference{
Kind: "VirtualMachine",
Name: "vm1",
},
BackupClassName: "velero", // Setting it for the first time
},
},
wantErr: true,
errMsg: "backupClassName is immutable",
},
{
name: "update changing from non-empty to different non-empty should be rejected",
old: &BackupJob{
ObjectMeta: metav1.ObjectMeta{
Name: "test-job",
Namespace: "default",
},
Spec: BackupJobSpec{
ApplicationRef: corev1.TypedLocalObjectReference{
Kind: "VirtualMachine",
Name: "vm1",
},
BackupClassName: "class-a",
},
},
new: &BackupJob{
ObjectMeta: metav1.ObjectMeta{
Name: "test-job",
Namespace: "default",
},
Spec: BackupJobSpec{
ApplicationRef: corev1.TypedLocalObjectReference{
Kind: "VirtualMachine",
Name: "vm1",
},
BackupClassName: "class-b", // Changed from class-a
},
},
wantErr: true,
errMsg: "backupClassName is immutable and cannot be changed from \"class-a\" to \"class-b\"",
},
{
name: "update with invalid old object type should be rejected",
old: &corev1.Pod{ // Wrong type - will be cast to runtime.Object in test
ObjectMeta: metav1.ObjectMeta{
Name: "test-job",
Namespace: "default",
},
},
new: baseJob,
wantErr: true,
errMsg: "expected a BackupJob but got a",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
warnings, err := tt.new.ValidateUpdate(tt.old)
if (err != nil) != tt.wantErr {
t.Errorf("ValidateUpdate() error = %v, wantErr %v", err, tt.wantErr)
if err != nil {
t.Logf("Error message: %v", err.Error())
}
return
}
if tt.wantErr && err != nil {
if tt.errMsg != "" {
if tt.errMsg != "" && !contains(err.Error(), tt.errMsg) {
t.Errorf("ValidateUpdate() error message = %v, want contains %v", err.Error(), tt.errMsg)
}
}
}
if warnings != nil && len(warnings) > 0 {
t.Logf("ValidateUpdate() warnings = %v", warnings)
}
})
}
}
func TestBackupJob_ValidateDelete(t *testing.T) {
job := &BackupJob{
ObjectMeta: metav1.ObjectMeta{
Name: "test-job",
Namespace: "default",
},
Spec: BackupJobSpec{
ApplicationRef: corev1.TypedLocalObjectReference{
Kind: "VirtualMachine",
Name: "vm1",
},
BackupClassName: "velero",
},
}
warnings, err := job.ValidateDelete()
if err != nil {
t.Errorf("ValidateDelete() should never return an error, got %v", err)
}
if warnings != nil && len(warnings) > 0 {
t.Logf("ValidateDelete() warnings = %v", warnings)
}
}
func TestBackupJob_Default(t *testing.T) {
job := &BackupJob{
ObjectMeta: metav1.ObjectMeta{
Name: "test-job",
Namespace: "default",
},
Spec: BackupJobSpec{
ApplicationRef: corev1.TypedLocalObjectReference{
Kind: "VirtualMachine",
Name: "vm1",
},
BackupClassName: "velero",
},
}
// Default() should not panic and should not modify the object
originalClassName := job.Spec.BackupClassName
job.Default()
if job.Spec.BackupClassName != originalClassName {
t.Errorf("Default() should not modify backupClassName, got %v, want %v", job.Spec.BackupClassName, originalClassName)
}
}
// Helper function to check if a string contains a substring
func contains(s, substr string) bool {
if len(substr) == 0 {
return true
}
if len(s) < len(substr) {
return false
}
for i := 0; i <= len(s)-len(substr); i++ {
if s[i:i+len(substr)] == substr {
return true
}
}
return false
}

View File

@@ -65,13 +65,15 @@ type PlanList struct {
type PlanSpec struct {
// ApplicationRef holds a reference to the managed application,
// whose state and configuration must be backed up.
// If apiGroup is not specified, it defaults to "apps.cozystack.io".
ApplicationRef corev1.TypedLocalObjectReference `json:"applicationRef"`
// BackupClassName references a BackupClass that contains strategy and storage configuration.
// The BackupClass will be resolved to determine the appropriate strategy and storage
// based on the ApplicationRef.
BackupClassName string `json:"backupClassName"`
// StorageRef holds a reference to the Storage object that
// describes the location where the backup will be stored.
StorageRef corev1.TypedLocalObjectReference `json:"storageRef"`
// StrategyRef holds a reference to the Strategy object that
// describes, how a backup copy is to be created.
StrategyRef corev1.TypedLocalObjectReference `json:"strategyRef"`
// Schedule specifies when backup copies are created.
Schedule PlanSchedule `json:"schedule"`

View File

@@ -26,26 +26,6 @@ import (
"k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ApplicationSelector) DeepCopyInto(out *ApplicationSelector) {
*out = *in
if in.APIGroup != nil {
in, out := &in.APIGroup, &out.APIGroup
*out = new(string)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationSelector.
func (in *ApplicationSelector) DeepCopy() *ApplicationSelector {
if in == nil {
return nil
}
out := new(ApplicationSelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Backup) DeepCopyInto(out *Backup) {
*out = *in
@@ -88,133 +68,6 @@ func (in *BackupArtifact) DeepCopy() *BackupArtifact {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BackupClass) DeepCopyInto(out *BackupClass) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupClass.
func (in *BackupClass) DeepCopy() *BackupClass {
if in == nil {
return nil
}
out := new(BackupClass)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *BackupClass) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BackupClassList) DeepCopyInto(out *BackupClassList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]BackupClass, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupClassList.
func (in *BackupClassList) DeepCopy() *BackupClassList {
if in == nil {
return nil
}
out := new(BackupClassList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *BackupClassList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BackupClassSpec) DeepCopyInto(out *BackupClassSpec) {
*out = *in
if in.Strategies != nil {
in, out := &in.Strategies, &out.Strategies
*out = make([]BackupClassStrategy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupClassSpec.
func (in *BackupClassSpec) DeepCopy() *BackupClassSpec {
if in == nil {
return nil
}
out := new(BackupClassSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BackupClassStatus) DeepCopyInto(out *BackupClassStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupClassStatus.
func (in *BackupClassStatus) DeepCopy() *BackupClassStatus {
if in == nil {
return nil
}
out := new(BackupClassStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BackupClassStrategy) DeepCopyInto(out *BackupClassStrategy) {
*out = *in
in.StrategyRef.DeepCopyInto(&out.StrategyRef)
in.Application.DeepCopyInto(&out.Application)
if in.Parameters != nil {
in, out := &in.Parameters, &out.Parameters
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupClassStrategy.
func (in *BackupClassStrategy) DeepCopy() *BackupClassStrategy {
if in == nil {
return nil
}
out := new(BackupClassStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BackupJob) DeepCopyInto(out *BackupJob) {
*out = *in
@@ -283,6 +136,8 @@ func (in *BackupJobSpec) DeepCopyInto(out *BackupJobSpec) {
**out = **in
}
in.ApplicationRef.DeepCopyInto(&out.ApplicationRef)
in.StorageRef.DeepCopyInto(&out.StorageRef)
in.StrategyRef.DeepCopyInto(&out.StrategyRef)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupJobSpec.
@@ -371,6 +226,7 @@ func (in *BackupSpec) DeepCopyInto(out *BackupSpec) {
*out = new(v1.LocalObjectReference)
**out = **in
}
in.StorageRef.DeepCopyInto(&out.StorageRef)
in.StrategyRef.DeepCopyInto(&out.StrategyRef)
in.TakenAt.DeepCopyInto(&out.TakenAt)
if in.DriverMetadata != nil {
@@ -497,6 +353,8 @@ func (in *PlanSchedule) DeepCopy() *PlanSchedule {
func (in *PlanSpec) DeepCopyInto(out *PlanSpec) {
*out = *in
in.ApplicationRef.DeepCopyInto(&out.ApplicationRef)
in.StorageRef.DeepCopyInto(&out.StorageRef)
in.StrategyRef.DeepCopyInto(&out.StrategyRef)
out.Schedule = in.Schedule
}

View File

@@ -24,45 +24,45 @@ import (
// +kubebuilder:object:root=true
// +kubebuilder:resource:scope=Cluster
// ApplicationDefinition is the Schema for the applicationdefinitions API
type ApplicationDefinition struct {
// CozystackResourceDefinition is the Schema for the cozystackresourcedefinitions API
type CozystackResourceDefinition struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ApplicationDefinitionSpec `json:"spec,omitempty"`
Spec CozystackResourceDefinitionSpec `json:"spec,omitempty"`
}
// +kubebuilder:object:root=true
// ApplicationDefinitionList contains a list of ApplicationDefinitions
type ApplicationDefinitionList struct {
// CozystackResourceDefinitionList contains a list of CozystackResourceDefinitions
type CozystackResourceDefinitionList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ApplicationDefinition `json:"items"`
Items []CozystackResourceDefinition `json:"items"`
}
func init() {
SchemeBuilder.Register(&ApplicationDefinition{}, &ApplicationDefinitionList{})
SchemeBuilder.Register(&CozystackResourceDefinition{}, &CozystackResourceDefinitionList{})
}
type ApplicationDefinitionSpec struct {
type CozystackResourceDefinitionSpec struct {
// Application configuration
Application ApplicationDefinitionApplication `json:"application"`
Application CozystackResourceDefinitionApplication `json:"application"`
// Release configuration
Release ApplicationDefinitionRelease `json:"release"`
Release CozystackResourceDefinitionRelease `json:"release"`
// Secret selectors
Secrets ApplicationDefinitionResources `json:"secrets,omitempty"`
Secrets CozystackResourceDefinitionResources `json:"secrets,omitempty"`
// Service selectors
Services ApplicationDefinitionResources `json:"services,omitempty"`
Services CozystackResourceDefinitionResources `json:"services,omitempty"`
// Ingress selectors
Ingresses ApplicationDefinitionResources `json:"ingresses,omitempty"`
Ingresses CozystackResourceDefinitionResources `json:"ingresses,omitempty"`
// Dashboard configuration for this resource
Dashboard *ApplicationDefinitionDashboard `json:"dashboard,omitempty"`
Dashboard *CozystackResourceDefinitionDashboard `json:"dashboard,omitempty"`
}
type ApplicationDefinitionApplication struct {
type CozystackResourceDefinitionApplication struct {
// Kind of the application, used for UI and API
Kind string `json:"kind"`
// OpenAPI schema for the application, used for API validation
@@ -73,7 +73,7 @@ type ApplicationDefinitionApplication struct {
Singular string `json:"singular"`
}
type ApplicationDefinitionRelease struct {
type CozystackResourceDefinitionRelease struct {
// Reference to the chart source
ChartRef *helmv2.CrossNamespaceSourceReference `json:"chartRef"`
// Labels for the release
@@ -82,7 +82,7 @@ type ApplicationDefinitionRelease struct {
Prefix string `json:"prefix"`
}
// ApplicationDefinitionResourceSelector extends metav1.LabelSelector with resourceNames support.
// CozystackResourceDefinitionResourceSelector extends metav1.LabelSelector with resourceNames support.
// A resource matches this selector only if it satisfies ALL criteria:
// - Label selector conditions (matchExpressions and matchLabels)
// - AND has a name that matches one of the names in resourceNames (if specified)
@@ -105,7 +105,7 @@ type ApplicationDefinitionRelease struct {
// - "{{ .name }}-secret"
// - "{{ .kind }}-{{ .name }}-tls"
// - "specificname"
type ApplicationDefinitionResourceSelector struct {
type CozystackResourceDefinitionResourceSelector struct {
metav1.LabelSelector `json:",inline"`
// ResourceNames is a list of resource names to match
// If specified, the resource must have one of these exact names to match the selector
@@ -113,16 +113,16 @@ type ApplicationDefinitionResourceSelector struct {
ResourceNames []string `json:"resourceNames,omitempty"`
}
type ApplicationDefinitionResources struct {
type CozystackResourceDefinitionResources struct {
// Exclude contains an array of resource selectors that target resources.
// If a resource matches the selector in any of the elements in the array, it is
// hidden from the user, regardless of the matches in the include array.
Exclude []*ApplicationDefinitionResourceSelector `json:"exclude,omitempty"`
Exclude []*CozystackResourceDefinitionResourceSelector `json:"exclude,omitempty"`
// Include contains an array of resource selectors that target resources.
// If a resource matches the selector in any of the elements in the array, and
// matches none of the selectors in the exclude array that resource is marked
// as a tenant resource and is visible to users.
Include []*ApplicationDefinitionResourceSelector `json:"include,omitempty"`
Include []*CozystackResourceDefinitionResourceSelector `json:"include,omitempty"`
}
// ---- Dashboard types ----
@@ -139,8 +139,8 @@ const (
DashboardTabYAML DashboardTab = "yaml"
)
// ApplicationDefinitionDashboard describes how this resource appears in the UI.
type ApplicationDefinitionDashboard struct {
// CozystackResourceDefinitionDashboard describes how this resource appears in the UI.
type CozystackResourceDefinitionDashboard struct {
// Human-readable name shown in the UI (e.g., "Bucket")
Singular string `json:"singular"`
// Plural human-readable name (e.g., "Buckets")

View File

@@ -28,225 +28,6 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ApplicationDefinition) DeepCopyInto(out *ApplicationDefinition) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationDefinition.
func (in *ApplicationDefinition) DeepCopy() *ApplicationDefinition {
if in == nil {
return nil
}
out := new(ApplicationDefinition)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ApplicationDefinition) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ApplicationDefinitionApplication) DeepCopyInto(out *ApplicationDefinitionApplication) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationDefinitionApplication.
func (in *ApplicationDefinitionApplication) DeepCopy() *ApplicationDefinitionApplication {
if in == nil {
return nil
}
out := new(ApplicationDefinitionApplication)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ApplicationDefinitionDashboard) DeepCopyInto(out *ApplicationDefinitionDashboard) {
*out = *in
if in.Tags != nil {
in, out := &in.Tags, &out.Tags
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Tabs != nil {
in, out := &in.Tabs, &out.Tabs
*out = make([]DashboardTab, len(*in))
copy(*out, *in)
}
if in.KeysOrder != nil {
in, out := &in.KeysOrder, &out.KeysOrder
*out = make([][]string, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = make([]string, len(*in))
copy(*out, *in)
}
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationDefinitionDashboard.
func (in *ApplicationDefinitionDashboard) DeepCopy() *ApplicationDefinitionDashboard {
if in == nil {
return nil
}
out := new(ApplicationDefinitionDashboard)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ApplicationDefinitionList) DeepCopyInto(out *ApplicationDefinitionList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ApplicationDefinition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationDefinitionList.
func (in *ApplicationDefinitionList) DeepCopy() *ApplicationDefinitionList {
if in == nil {
return nil
}
out := new(ApplicationDefinitionList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ApplicationDefinitionList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ApplicationDefinitionRelease) DeepCopyInto(out *ApplicationDefinitionRelease) {
*out = *in
if in.ChartRef != nil {
in, out := &in.ChartRef, &out.ChartRef
*out = new(v2.CrossNamespaceSourceReference)
**out = **in
}
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationDefinitionRelease.
func (in *ApplicationDefinitionRelease) DeepCopy() *ApplicationDefinitionRelease {
if in == nil {
return nil
}
out := new(ApplicationDefinitionRelease)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ApplicationDefinitionResourceSelector) DeepCopyInto(out *ApplicationDefinitionResourceSelector) {
*out = *in
in.LabelSelector.DeepCopyInto(&out.LabelSelector)
if in.ResourceNames != nil {
in, out := &in.ResourceNames, &out.ResourceNames
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationDefinitionResourceSelector.
func (in *ApplicationDefinitionResourceSelector) DeepCopy() *ApplicationDefinitionResourceSelector {
if in == nil {
return nil
}
out := new(ApplicationDefinitionResourceSelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ApplicationDefinitionResources) DeepCopyInto(out *ApplicationDefinitionResources) {
*out = *in
if in.Exclude != nil {
in, out := &in.Exclude, &out.Exclude
*out = make([]*ApplicationDefinitionResourceSelector, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(ApplicationDefinitionResourceSelector)
(*in).DeepCopyInto(*out)
}
}
}
if in.Include != nil {
in, out := &in.Include, &out.Include
*out = make([]*ApplicationDefinitionResourceSelector, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(ApplicationDefinitionResourceSelector)
(*in).DeepCopyInto(*out)
}
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationDefinitionResources.
func (in *ApplicationDefinitionResources) DeepCopy() *ApplicationDefinitionResources {
if in == nil {
return nil
}
out := new(ApplicationDefinitionResources)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ApplicationDefinitionSpec) DeepCopyInto(out *ApplicationDefinitionSpec) {
*out = *in
out.Application = in.Application
in.Release.DeepCopyInto(&out.Release)
in.Secrets.DeepCopyInto(&out.Secrets)
in.Services.DeepCopyInto(&out.Services)
in.Ingresses.DeepCopyInto(&out.Ingresses)
if in.Dashboard != nil {
in, out := &in.Dashboard, &out.Dashboard
*out = new(ApplicationDefinitionDashboard)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationDefinitionSpec.
func (in *ApplicationDefinitionSpec) DeepCopy() *ApplicationDefinitionSpec {
if in == nil {
return nil
}
out := new(ApplicationDefinitionSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Component) DeepCopyInto(out *Component) {
*out = *in
@@ -297,6 +78,225 @@ func (in *ComponentInstall) DeepCopy() *ComponentInstall {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CozystackResourceDefinition) DeepCopyInto(out *CozystackResourceDefinition) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CozystackResourceDefinition.
func (in *CozystackResourceDefinition) DeepCopy() *CozystackResourceDefinition {
if in == nil {
return nil
}
out := new(CozystackResourceDefinition)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CozystackResourceDefinition) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CozystackResourceDefinitionApplication) DeepCopyInto(out *CozystackResourceDefinitionApplication) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CozystackResourceDefinitionApplication.
func (in *CozystackResourceDefinitionApplication) DeepCopy() *CozystackResourceDefinitionApplication {
if in == nil {
return nil
}
out := new(CozystackResourceDefinitionApplication)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CozystackResourceDefinitionDashboard) DeepCopyInto(out *CozystackResourceDefinitionDashboard) {
*out = *in
if in.Tags != nil {
in, out := &in.Tags, &out.Tags
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Tabs != nil {
in, out := &in.Tabs, &out.Tabs
*out = make([]DashboardTab, len(*in))
copy(*out, *in)
}
if in.KeysOrder != nil {
in, out := &in.KeysOrder, &out.KeysOrder
*out = make([][]string, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = make([]string, len(*in))
copy(*out, *in)
}
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CozystackResourceDefinitionDashboard.
func (in *CozystackResourceDefinitionDashboard) DeepCopy() *CozystackResourceDefinitionDashboard {
if in == nil {
return nil
}
out := new(CozystackResourceDefinitionDashboard)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CozystackResourceDefinitionList) DeepCopyInto(out *CozystackResourceDefinitionList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CozystackResourceDefinition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CozystackResourceDefinitionList.
func (in *CozystackResourceDefinitionList) DeepCopy() *CozystackResourceDefinitionList {
if in == nil {
return nil
}
out := new(CozystackResourceDefinitionList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CozystackResourceDefinitionList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CozystackResourceDefinitionRelease) DeepCopyInto(out *CozystackResourceDefinitionRelease) {
*out = *in
if in.ChartRef != nil {
in, out := &in.ChartRef, &out.ChartRef
*out = new(v2.CrossNamespaceSourceReference)
**out = **in
}
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CozystackResourceDefinitionRelease.
func (in *CozystackResourceDefinitionRelease) DeepCopy() *CozystackResourceDefinitionRelease {
if in == nil {
return nil
}
out := new(CozystackResourceDefinitionRelease)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CozystackResourceDefinitionResourceSelector) DeepCopyInto(out *CozystackResourceDefinitionResourceSelector) {
*out = *in
in.LabelSelector.DeepCopyInto(&out.LabelSelector)
if in.ResourceNames != nil {
in, out := &in.ResourceNames, &out.ResourceNames
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CozystackResourceDefinitionResourceSelector.
func (in *CozystackResourceDefinitionResourceSelector) DeepCopy() *CozystackResourceDefinitionResourceSelector {
if in == nil {
return nil
}
out := new(CozystackResourceDefinitionResourceSelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CozystackResourceDefinitionResources) DeepCopyInto(out *CozystackResourceDefinitionResources) {
*out = *in
if in.Exclude != nil {
in, out := &in.Exclude, &out.Exclude
*out = make([]*CozystackResourceDefinitionResourceSelector, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(CozystackResourceDefinitionResourceSelector)
(*in).DeepCopyInto(*out)
}
}
}
if in.Include != nil {
in, out := &in.Include, &out.Include
*out = make([]*CozystackResourceDefinitionResourceSelector, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(CozystackResourceDefinitionResourceSelector)
(*in).DeepCopyInto(*out)
}
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CozystackResourceDefinitionResources.
func (in *CozystackResourceDefinitionResources) DeepCopy() *CozystackResourceDefinitionResources {
if in == nil {
return nil
}
out := new(CozystackResourceDefinitionResources)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CozystackResourceDefinitionSpec) DeepCopyInto(out *CozystackResourceDefinitionSpec) {
*out = *in
out.Application = in.Application
in.Release.DeepCopyInto(&out.Release)
in.Secrets.DeepCopyInto(&out.Secrets)
in.Services.DeepCopyInto(&out.Services)
in.Ingresses.DeepCopyInto(&out.Ingresses)
if in.Dashboard != nil {
in, out := &in.Dashboard, &out.Dashboard
*out = new(CozystackResourceDefinitionDashboard)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CozystackResourceDefinitionSpec.
func (in *CozystackResourceDefinitionSpec) DeepCopy() *CozystackResourceDefinitionSpec {
if in == nil {
return nil
}
out := new(CozystackResourceDefinitionSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DependencyStatus) DeepCopyInto(out *DependencyStatus) {
*out = *in

View File

@@ -29,8 +29,6 @@ import (
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/controller-runtime/pkg/metrics/filters"
@@ -136,11 +134,6 @@ func main() {
HealthProbeBindAddress: probeAddr,
LeaderElection: enableLeaderElection,
LeaderElectionID: "core.backups.cozystack.io",
Cache: cache.Options{
ByObject: map[client.Object]cache.ByObject{
&backupsv1alpha1.BackupClass{}: {},
},
},
// LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily
// when the Manager ends. This requires the binary to immediately end when the
// Manager is stopped, otherwise, this setting is unsafe. Setting this significantly
@@ -175,12 +168,6 @@ func main() {
os.Exit(1)
}
// Register BackupJob webhook for validation (immutability of backupClassName)
if err = backupsv1alpha1.SetupBackupJobWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "BackupJob")
os.Exit(1)
}
// +kubebuilder:scaffold:builder
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {

View File

@@ -29,8 +29,6 @@ import (
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/controller-runtime/pkg/metrics/filters"
@@ -132,11 +130,6 @@ func main() {
HealthProbeBindAddress: probeAddr,
LeaderElection: enableLeaderElection,
LeaderElectionID: "strategy.backups.cozystack.io",
Cache: cache.Options{
ByObject: map[client.Object]cache.ByObject{
&backupsv1alpha1.BackupClass{}: {},
},
},
// LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily
// when the Manager ends. This requires the binary to immediately end when the
// Manager is stopped, otherwise, this setting is unsafe. Setting this significantly

View File

@@ -23,9 +23,6 @@ import (
"github.com/spf13/cobra"
)
// Version is set at build time via -ldflags.
var Version = "dev"
// rootCmd represents the base command when called without any subcommands.
var rootCmd = &cobra.Command{
Use: "cozypkg",
@@ -47,6 +44,6 @@ func Execute() error {
}
func init() {
rootCmd.Version = Version
// Commands are registered in their respective init() functions
}

View File

@@ -68,6 +68,7 @@ func main() {
var disableTelemetry bool
var telemetryEndpoint string
var telemetryInterval string
var cozystackVersion string
var reconcileDeployment bool
var tlsOpts []func(*tls.Config)
flag.StringVar(&metricsAddr, "metrics-bind-address", "0", "The address the metrics endpoint binds to. "+
@@ -86,6 +87,8 @@ func main() {
"Endpoint for sending telemetry data")
flag.StringVar(&telemetryInterval, "telemetry-interval", "15m",
"Interval between telemetry data collection (e.g. 15m, 1h)")
flag.StringVar(&cozystackVersion, "cozystack-version", "unknown",
"Version of Cozystack")
flag.BoolVar(&reconcileDeployment, "reconcile-deployment", false,
"If set, the Cozystack API server is assumed to run as a Deployment, else as a DaemonSet.")
opts := zap.Options{
@@ -103,9 +106,10 @@ func main() {
// Configure telemetry
telemetryConfig := telemetry.Config{
Disabled: disableTelemetry,
Endpoint: telemetryEndpoint,
Interval: interval,
Disabled: disableTelemetry,
Endpoint: telemetryEndpoint,
Interval: interval,
CozystackVersion: cozystackVersion,
}
ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
@@ -200,20 +204,20 @@ func main() {
if reconcileDeployment {
cozyAPIKind = "Deployment"
}
if err = (&controller.ApplicationDefinitionReconciler{
if err = (&controller.CozystackResourceDefinitionReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
CozystackAPIKind: cozyAPIKind,
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "ApplicationDefinitionReconciler")
setupLog.Error(err, "unable to create controller", "controller", "CozystackResourceDefinitionReconciler")
os.Exit(1)
}
if err = (&controller.ApplicationDefinitionHelmReconciler{
if err = (&controller.CozystackResourceDefinitionHelmReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "ApplicationDefinitionHelmReconciler")
setupLog.Error(err, "unable to create controller", "controller", "CozystackResourceDefinitionHelmReconciler")
os.Exit(1)
}

View File

@@ -52,7 +52,6 @@ import (
"github.com/cozystack/cozystack/internal/cozyvaluesreplicator"
"github.com/cozystack/cozystack/internal/fluxinstall"
"github.com/cozystack/cozystack/internal/operator"
"github.com/cozystack/cozystack/internal/telemetry"
// +kubebuilder:scaffold:imports
)
@@ -78,9 +77,7 @@ func main() {
var secureMetrics bool
var enableHTTP2 bool
var installFlux bool
var disableTelemetry bool
var telemetryEndpoint string
var telemetryInterval string
var cozystackVersion string
var cozyValuesSecretName string
var cozyValuesSecretNamespace string
var cozyValuesNamespaceSelector string
@@ -98,12 +95,8 @@ func main() {
flag.BoolVar(&enableHTTP2, "enable-http2", false,
"If set, HTTP/2 will be enabled for the metrics and webhook servers")
flag.BoolVar(&installFlux, "install-flux", false, "Install Flux components before starting reconcile loop")
flag.BoolVar(&disableTelemetry, "disable-telemetry", false,
"Disable telemetry collection")
flag.StringVar(&telemetryEndpoint, "telemetry-endpoint", "https://telemetry.cozystack.io",
"Endpoint for sending telemetry data")
flag.StringVar(&telemetryInterval, "telemetry-interval", "15m",
"Interval between telemetry data collection (e.g. 15m, 1h)")
flag.StringVar(&cozystackVersion, "cozystack-version", "unknown",
"Version of Cozystack")
flag.StringVar(&platformSourceURL, "platform-source-url", "", "Platform source URL (oci:// or https://). If specified, generates OCIRepository or GitRepository resource.")
flag.StringVar(&platformSourceName, "platform-source-name", "cozystack-packages", "Name for the generated platform source resource (default: cozystack-packages)")
flag.StringVar(&platformSourceRef, "platform-source-ref", "", "Reference specification as key=value pairs (e.g., 'branch=main' or 'digest=sha256:...,tag=v1.0'). For OCI: digest, semver, semverFilter, tag. For Git: branch, tag, semver, name, commit.")
@@ -247,34 +240,6 @@ func main() {
os.Exit(1)
}
// Parse telemetry interval
interval, err := time.ParseDuration(telemetryInterval)
if err != nil {
setupLog.Error(err, "invalid telemetry interval")
os.Exit(1)
}
// Configure telemetry
telemetryConfig := telemetry.Config{
Disabled: disableTelemetry,
Endpoint: telemetryEndpoint,
Interval: interval,
}
// Initialize telemetry collector
// Use APIReader (non-cached) because the manager's cache is filtered
// and doesn't include resources needed for telemetry (e.g., kube-system namespace, nodes, etc.)
collector, err := telemetry.NewOperatorCollector(mgr.GetAPIReader(), &telemetryConfig, config)
if err != nil {
setupLog.V(1).Info("unable to create telemetry collector, telemetry will be disabled", "error", err)
}
if collector != nil {
if err := mgr.Add(collector); err != nil {
setupLog.V(1).Info("unable to set up telemetry collector, continuing without telemetry", "error", err)
}
}
setupLog.Info("Starting controller manager")
mgrCtx := ctrl.SetupSignalHandler()
if err := mgr.Start(mgrCtx); err != nil {

View File

@@ -1,151 +0,0 @@
/*
Copyright 2025.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"crypto/tls"
"flag"
"os"
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
// to ensure that exec-entrypoint and run can make use of them.
_ "k8s.io/client-go/plugin/pkg/client/auth"
helmv2 "github.com/fluxcd/helm-controller/api/v2"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"github.com/cozystack/cozystack/internal/controller/fluxplunger"
// +kubebuilder:scaffold:imports
)
var (
scheme = runtime.NewScheme()
setupLog = ctrl.Log.WithName("setup")
)
func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(helmv2.AddToScheme(scheme))
// +kubebuilder:scaffold:scheme
}
func main() {
var metricsAddr string
var enableLeaderElection bool
var probeAddr string
var secureMetrics bool
var enableHTTP2 bool
var tlsOpts []func(*tls.Config)
flag.StringVar(&metricsAddr, "metrics-bind-address", "0", "The address the metrics endpoint binds to. "+
"Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.")
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
flag.BoolVar(&enableLeaderElection, "leader-elect", false,
"Enable leader election for controller manager. "+
"Enabling this will ensure there is only one active controller manager.")
flag.BoolVar(&secureMetrics, "metrics-secure", true,
"If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.")
flag.BoolVar(&enableHTTP2, "enable-http2", false,
"If set, HTTP/2 will be enabled for the metrics server")
opts := zap.Options{
Development: false,
}
opts.BindFlags(flag.CommandLine)
flag.Parse()
ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
// if the enable-http2 flag is false (the default), http/2 should be disabled
// due to its vulnerabilities. More specifically, disabling http/2 will
// prevent from being vulnerable to the HTTP/2 Stream Cancellation and
// Rapid Reset CVEs. For more information see:
// - https://github.com/advisories/GHSA-qppj-fm5r-hxr3
// - https://github.com/advisories/GHSA-4374-p667-p6c8
disableHTTP2 := func(c *tls.Config) {
setupLog.Info("disabling http/2")
c.NextProtos = []string{"http/1.1"}
}
if !enableHTTP2 {
tlsOpts = append(tlsOpts, disableHTTP2)
}
// Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server.
// More info:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.1/pkg/metrics/server
// - https://book.kubebuilder.io/reference/metrics.html
metricsServerOptions := metricsserver.Options{
BindAddress: metricsAddr,
SecureServing: secureMetrics,
TLSOpts: tlsOpts,
}
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
Metrics: metricsServerOptions,
HealthProbeBindAddress: probeAddr,
LeaderElection: enableLeaderElection,
LeaderElectionID: "flux-plunger.cozystack.io",
// LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily
// when the Manager ends. This requires the binary to immediately end when the
// Manager is stopped, otherwise, this setting is unsafe. Setting this significantly
// speeds up voluntary leader transitions as the new leader don't have to wait
// LeaseDuration time first.
//
// In the default scaffold provided, the program ends immediately after
// the manager stops, so would be fine to enable this option. However,
// if you are doing or is intended to do any operation such as perform cleanups
// after the manager stops then its usage might be unsafe.
// LeaderElectionReleaseOnCancel: true,
})
if err != nil {
setupLog.Error(err, "unable to create manager")
os.Exit(1)
}
if err = (&fluxplunger.FluxPlunger{
Client: mgr.GetClient(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "FluxPlunger")
os.Exit(1)
}
// +kubebuilder:scaffold:builder
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up health check")
os.Exit(1)
}
if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up ready check")
os.Exit(1)
}
setupLog.Info("starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
}

View File

@@ -1,602 +0,0 @@
{
"__inputs": [
{
"name": "DS_PROMETHEUS",
"label": "Prometheus",
"description": "",
"type": "datasource",
"pluginId": "prometheus",
"pluginName": "Prometheus"
}
],
"__elements": {},
"__requires": [
{
"type": "panel",
"id": "bargauge",
"name": "Bar gauge",
"version": ""
},
{
"type": "grafana",
"id": "grafana",
"name": "Grafana",
"version": "9.4.7"
},
{
"type": "datasource",
"id": "prometheus",
"name": "Prometheus",
"version": "1.0.0"
},
{
"type": "panel",
"id": "timeseries",
"name": "Time series",
"version": ""
}
],
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "datasource",
"uid": "grafana"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"target": {
"limit": 100,
"matchAny": false,
"tags": [],
"type": "dashboard"
},
"type": "dashboard"
}
]
},
"description": "",
"editable": true,
"fiscalYearStartMonth": 0,
"gnetId": 16612,
"graphTooltip": 0,
"id": null,
"links": [
{
"asDropdown": true,
"icon": "external link",
"includeVars": true,
"keepTime": true,
"tags": [
"cilium-overview"
],
"targetBlank": false,
"title": "Cilium Overviews",
"tooltip": "",
"type": "dashboards",
"url": ""
},
{
"asDropdown": true,
"icon": "external link",
"includeVars": false,
"keepTime": true,
"tags": [
"hubble"
],
"targetBlank": false,
"title": "Hubble",
"tooltip": "",
"type": "dashboards",
"url": ""
}
],
"liveNow": false,
"panels": [
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 0
},
"id": 2,
"panels": [],
"title": "DNS",
"type": "row"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "normal"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "reqps"
},
"overrides": []
},
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
"y": 1
},
"id": 37,
"options": {
"legend": {
"calcs": [
"mean",
"lastNotNull"
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"editorMode": "code",
"expr": "sum(rate(hubble_dns_queries_total{cluster=~\"$cluster\", source_namespace=~\"$source_namespace\", destination_namespace=~\"$destination_namespace\"}[$__rate_interval])) by (source) > 0",
"legendFormat": "{{source}}",
"range": true,
"refId": "A"
}
],
"title": "DNS queries",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "reqps"
},
"overrides": []
},
"gridPos": {
"h": 9,
"w": 12,
"x": 12,
"y": 1
},
"id": 41,
"options": {
"displayMode": "gradient",
"minVizHeight": 10,
"minVizWidth": 0,
"orientation": "horizontal",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"showUnfilled": true
},
"pluginVersion": "9.4.7",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"editorMode": "code",
"expr": "topk(10, sum(rate(hubble_dns_queries_total{cluster=~\"$cluster\", source_namespace=~\"$source_namespace\", destination_namespace=~\"$destination_namespace\"}[$__rate_interval])*60) by (query))",
"legendFormat": "{{query}}",
"range": true,
"refId": "A"
}
],
"title": "Top 10 DNS queries",
"type": "bargauge"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "normal"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "reqps"
},
"overrides": []
},
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
"y": 10
},
"id": 39,
"options": {
"legend": {
"calcs": [
"mean",
"lastNotNull"
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"editorMode": "code",
"expr": "round(sum(rate(hubble_dns_queries_total{cluster=~\"$cluster\", source_namespace=~\"$source_namespace\", destination_namespace=~\"$destination_namespace\"}[$__rate_interval])) by (source) - sum(label_replace(sum(rate(hubble_dns_responses_total{cluster=~\"$cluster\", source_namespace=~\"$destination_namespace\", destination_namespace=~\"$source_namespace\"}[$__rate_interval])) by (destination), \"source\", \"$1\", \"destination\", \"(.*)\")) without (destination), 0.001) > 0",
"legendFormat": "{{source}}",
"range": true,
"refId": "A"
}
],
"title": "Missing DNS responses",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "normal"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "reqps"
},
"overrides": []
},
"gridPos": {
"h": 9,
"w": 12,
"x": 12,
"y": 10
},
"id": 43,
"options": {
"legend": {
"calcs": [
"mean",
"lastNotNull"
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"editorMode": "code",
"expr": "sum(rate(hubble_dns_responses_total{cluster=~\"$cluster\", source_namespace=~\"$destination_namespace\", destination_namespace=~\"$source_namespace\", rcode!=\"No Error\"}[$__rate_interval])) by (destination, rcode) > 0",
"legendFormat": "{{destination}}: {{rcode}}",
"range": true,
"refId": "A"
}
],
"title": "DNS errors",
"type": "timeseries"
}
],
"refresh": "",
"revision": 1,
"schemaVersion": 38,
"style": "dark",
"tags": [
"kubecon-demo"
],
"templating": {
"list": [
{
"current": {
"selected": false,
"text": "default",
"value": "default"
},
"hide": 0,
"includeAll": false,
"label": "Data Source",
"multi": false,
"name": "DS_PROMETHEUS",
"options": [],
"query": "prometheus",
"queryValue": "",
"refresh": 1,
"regex": "(?!grafanacloud-usage|grafanacloud-ml-metrics).+",
"skipUrlSync": false,
"type": "datasource"
},
{
"current": {},
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"definition": "label_values(cilium_version, cluster)",
"hide": 0,
"includeAll": true,
"multi": true,
"name": "cluster",
"options": [],
"query": {
"query": "label_values(cilium_version, cluster)",
"refId": "StandardVariableQuery"
},
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"type": "query"
},
{
"allValue": ".*",
"current": {},
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"definition": "label_values(source_namespace)",
"hide": 0,
"includeAll": true,
"label": "Source Namespace",
"multi": true,
"name": "source_namespace",
"options": [],
"query": {
"query": "label_values(source_namespace)",
"refId": "StandardVariableQuery"
},
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"type": "query"
},
{
"allValue": ".*",
"current": {},
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"definition": "label_values(destination_namespace)",
"hide": 0,
"includeAll": true,
"label": "Destination Namespace",
"multi": true,
"name": "destination_namespace",
"options": [],
"query": {
"query": "label_values(destination_namespace)",
"refId": "StandardVariableQuery"
},
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"type": "query"
}
]
},
"time": {
"from": "now-1h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
"timezone": "",
"title": "Hubble / DNS Overview (Namespace)",
"uid": "_f0DUpY4k",
"version": 26,
"weekStart": ""
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,16 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.37.10
-->
## Features and Improvements
* **[virtual-machine] Improve check for resizing job**: Improved storage resize logic to only expand persistent volume claims when storage is being increased, preventing unintended storage reduction operations. Added validation to accurately compare current and desired storage sizes before triggering resize operations ([**@kvaps**](https://github.com/kvaps) in #1688, #1702).
## Fixes
* **[dashboard] Fix CustomFormsOverride schema to nest properties under spec.properties**: Fixed the logic for generating CustomFormsOverride schema to properly nest properties under `spec.properties` instead of directly under `properties`, ensuring correct form schema generation in the dashboard ([**@kvaps**](https://github.com/kvaps) in #1692, #1699).
---
**Full Changelog**: [v0.37.9...v0.37.10](https://github.com/cozystack/cozystack/compare/v0.37.9...v0.37.10)

View File

@@ -1,18 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.38.5
-->
## Features and Improvements
* **[virtual-machine,vm-instance] Add nodeAffinity for Windows VMs based on scheduling config**: Added nodeAffinity configuration to virtual-machine and vm-instance charts to support dedicated nodes for Windows VMs. When `dedicatedNodesForWindowsVMs` is enabled in the `cozystack-scheduling` ConfigMap, Windows VMs are scheduled on nodes with label `scheduling.cozystack.io/vm-windows=true`, while non-Windows VMs prefer nodes without this label ([**@kvaps**](https://github.com/kvaps) in #1693, #1744).
* **[cilium] Enable automatic pod rollout on configmap updates**: Cilium and Cilium operator pods now automatically restart when the cilium-config ConfigMap is updated, ensuring configuration changes are applied immediately without manual intervention ([**@kvaps**](https://github.com/kvaps) in #1728, #1745).
* **Update SeaweedFS v4.02**: Updated SeaweedFS to version 4.02 with improved S3 daemon performance and fixes. This update includes better S3 compatibility and performance improvements ([**@kvaps**](https://github.com/kvaps) in #1725, #1732).
## Fixes
* **[apps] Refactor apiserver to use typed objects and fix UnstructuredList GVK**: Refactored the apiserver REST handlers to use typed objects (`appsv1alpha1.Application`) instead of `unstructured.Unstructured`, eliminating the need for runtime conversions and simplifying the codebase. Additionally, fixed an issue where `UnstructuredList` objects were using the first registered kind from `typeToGVK` instead of the kind from the object's field when multiple kinds are registered with the same Go type. This fix includes the upstream fix from kubernetes/kubernetes#135537 ([**@kvaps**](https://github.com/kvaps) in #1679, #1709).
---
**Full Changelog**: [v0.38.4...v0.38.5](https://github.com/cozystack/cozystack/compare/v0.38.4...v0.38.5)

View File

@@ -1,12 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.38.6
-->
## Development, Testing, and CI/CD
* **[kubernetes] Add lb tests for tenant k8s**: Added load balancer tests for tenant Kubernetes clusters, improving test coverage and ensuring proper load balancer functionality in tenant environments ([**@IvanHunters**](https://github.com/IvanHunters) in #1783, #1792).
---
**Full Changelog**: [v0.38.5...v0.38.6](https://github.com/cozystack/cozystack/compare/v0.38.5...v0.38.6)

View File

@@ -1,13 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.38.7
-->
## Fixes
* **[kubevirt-operator] Fix typo in VMNotRunningFor10Minutes alert**: Fixed typo in VM alert name, ensuring proper alert triggering and monitoring for virtual machines that are not running for extended periods ([**@lexfrei**](https://github.com/lexfrei) in #1770).
* **[kubevirt-operator] Revert incorrect case change in VM alerts**: Reverted incorrect case change in VM alert names to maintain consistency with alert naming conventions ([**@lexfrei**](https://github.com/lexfrei) in #1804, #1805).
---
**Full Changelog**: [v0.38.6...v0.38.7](https://github.com/cozystack/cozystack/compare/v0.38.6...v0.38.7)

View File

@@ -1,12 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.38.8
-->
## Improvements
* **[multus] Remove memory limit**: Removed memory limit for Multus daemonset due to unpredictable memory consumption spikes during startup after node reboots (reported up to 3Gi). This temporary change prevents out-of-memory issues while the root cause is addressed in future releases ([**@nbykov0**](https://github.com/nbykov0) in #1834).
---
**Full Changelog**: [v0.38.7...v0.38.8](https://github.com/cozystack/cozystack/compare/v0.38.7...v0.38.8)

View File

@@ -1,19 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.39.2
-->
## Features and Improvements
* **[vm] Always expose VMs with a service**: Virtual machines are now always exposed with at least a ClusterIP service, ensuring they have in-cluster DNS names and can be accessed from other pods even without public IP addresses ([**@lllamnyp**](https://github.com/lllamnyp) in #1738, #1751).
* **[tenant] Allow egress to parent ingress pods**: Updated tenant network policies to allow egress traffic to parent cluster ingress pods, enabling proper communication patterns between tenant namespaces and parent cluster ingress controllers ([**@lexfrei**](https://github.com/lexfrei) in #1765, #1776).
* **[system] Add resource requests and limits to etcd-defrag**: Added resource requests and limits to etcd-defrag job to ensure proper resource allocation and prevent resource contention during etcd maintenance operations ([**@matthieu-robin**](https://github.com/matthieu-robin) in #1785, #1786).
* **[tenant] Run cleanup job from system namespace**: Moved tenant cleanup job to run from system namespace, improving security and resource isolation for tenant cleanup operations ([**@lllamnyp**](https://github.com/lllamnyp) in #1774, #1777).
## Fixes
* **[kubevirt-operator] Fix typo in VMNotRunningFor10Minutes alert**: Fixed typo in VM alert name, ensuring proper alert triggering and monitoring for virtual machines that are not running for extended periods ([**@lexfrei**](https://github.com/lexfrei) in #1770, #1775).
---
**Full Changelog**: [v0.39.1...v0.39.2](https://github.com/cozystack/cozystack/compare/v0.39.1...v0.39.2)

View File

@@ -1,36 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.39.3
-->
## Features and Improvements
* **[seaweedfs] Traffic locality**: Upgraded SeaweedFS to v4.05 with traffic locality capabilities, new admin component with web-based UI, worker component for distributed operations, and enhanced S3 monitoring with Grafana dashboards. Improves S3 service performance by routing requests to nearest available volume servers ([**@nbykov0**](https://github.com/nbykov0) in #1748, #1830).
* **[kube-ovn] Update to v1.14.25**: Updated Kube-OVN to version 1.14.25 with improved stability and new features ([**@kvaps**](https://github.com/kvaps) in #1819, #1837).
* **[linstor] Build linstor-server with custom patches**: Added custom patches to linstor-server build process, enabling platform-specific optimizations and fixes ([**@kvaps**](https://github.com/kvaps) in #1726, #1818).
* **[api, lineage] Tolerate all taints**: Updated API and lineage webhook to tolerate all taints, ensuring controllers can run on any node regardless of taint configuration ([**@nbykov0**](https://github.com/nbykov0) in #1781, #1827).
* **[ingress] Add topology anti-affinities**: Added topology anti-affinity rules to ingress controller deployment for better pod distribution across nodes ([**@kvaps**](https://github.com/kvaps) in commit 25f31022).
## Fixes
* **[linstor] fix: prevent DRBD device race condition in updateDiscGran**: Fixed race condition in DRBD device management during granularity updates, preventing potential data corruption or device conflicts ([**@kvaps**](https://github.com/kvaps) in #1829, #1836).
* **fix(linstor): prevent orphaned DRBD devices during toggle-disk retry**: Fixed issue where retry logic during disk toggle operations could leave orphaned DRBD devices, now properly cleans up devices during retry attempts ([**@kvaps**](https://github.com/kvaps) in #1823, #1825).
* **[kubernetes] Fix endpoints for cilium-gateway**: Fixed endpoint configuration for cilium-gateway, ensuring proper service discovery and connectivity ([**@kvaps**](https://github.com/kvaps) in #1729, #1808).
* **[kubevirt-operator] Revert incorrect case change in VM alerts**: Reverted incorrect case change in VM alert names to maintain consistency with alert naming conventions ([**@lexfrei**](https://github.com/lexfrei) in #1804, #1806).
## System Configuration
* **[kubeovn] Package from external repo**: Extracted Kube-OVN packaging from main repository to external repository, improving modularity ([**@lllamnyp**](https://github.com/lllamnyp) in #1535).
## Development, Testing, and CI/CD
* **[testing] Add aliases and autocomplete**: Added shell aliases and autocomplete support for testing commands, improving developer experience ([**@lllamnyp**](https://github.com/lllamnyp) in #1803, #1809).
## Dependencies
* **[seaweedfs] Traffic locality**: Upgraded SeaweedFS to v4.05 with traffic locality capabilities ([**@nbykov0**](https://github.com/nbykov0) in #1748, #1830).
* **[kube-ovn] Update to v1.14.25**: Updated Kube-OVN to version 1.14.25 ([**@kvaps**](https://github.com/kvaps) in #1819, #1837).
---
**Full Changelog**: [v0.39.2...v0.39.3](https://github.com/cozystack/cozystack/compare/v0.39.2...v0.39.3)

View File

@@ -1,12 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.39.4
-->
## Features and Improvements
* **[paas-full] Add multus dependencies similar to other CNIs**: Added Multus as a dependency in the paas-full package, consistent with how other CNIs are included. This ensures proper dependency management and simplifies the installation process for environments using Multus networking ([**@nbykov0**](https://github.com/nbykov0) in #1835).
---
**Full Changelog**: [v0.39.3...v0.39.4](https://github.com/cozystack/cozystack/compare/v0.39.3...v0.39.4)

View File

@@ -1,11 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.39.5
-->
## Fixes
* **[linstor] Update piraeus-server patches with critical fixes**: Backported critical patches to piraeus-server that address storage stability issues and improve DRBD resource handling. These patches fix edge cases in device management and ensure more reliable storage operations ([**@kvaps**](https://github.com/kvaps) in #1850, #1853).
---
**Full Changelog**: [v0.39.4...v0.39.5](https://github.com/cozystack/cozystack/compare/v0.39.4...v0.39.5)

View File

@@ -1,206 +0,0 @@
# Cozystack v0.40 — "Enhanced Storage & Platform Architecture"
This release introduces LINSTOR scheduler for optimal pod placement, SeaweedFS traffic locality, a new valuesFrom-based configuration mechanism, auto-diskful for LINSTOR, automated version management systems, and numerous improvements across the platform.
## Feature Highlights
### LINSTOR Scheduler for Optimal Pod Placement
Cozystack now includes a custom Kubernetes scheduler extender that works alongside the default kube-scheduler to optimize pod placement on nodes with LINSTOR storage. When a pod requests LINSTOR-backed storage, the scheduler communicates with the LINSTOR controller to find nodes that have local replicas of the requested volumes, prioritizing placement on nodes with existing data to minimize network traffic and improve I/O performance.
The scheduler includes an admission webhook that automatically routes pods using LINSTOR CSI volumes to the custom scheduler, ensuring seamless integration without manual configuration. This feature significantly improves performance for workloads using LINSTOR storage by reducing network latency and improving data locality.
Learn more about LINSTOR in the [documentation](https://cozystack.io/docs/operations/storage/linstor/).
### SeaweedFS Traffic Locality
SeaweedFS has been upgraded to version 4.05 with new traffic locality capabilities that optimize S3 service traffic distribution. The update includes a new admin component with a web-based UI and authentication support, as well as a worker component for distributed operations. These enhancements improve S3 service performance and provide better visibility through enhanced Grafana dashboard panels for buckets, API calls, costs, and performance metrics.
The traffic locality feature ensures that S3 requests are routed to the nearest available volume servers, reducing latency and improving overall performance for distributed storage operations. TLS certificate support for admin and worker components adds an extra layer of security for management operations.
### ValuesFrom Configuration Mechanism
Cozystack now uses FluxCD's valuesFrom mechanism to replace Helm lookup functions for configuration propagation. This architectural improvement provides cleaner config propagation and eliminates the need for force reconcile controllers. Configuration from ConfigMaps (cozystack, cozystack-branding, cozystack-scheduling) and namespace service references (etcd, host, ingress, monitoring, seaweedfs) is now centrally managed through a `cozystack-values` Secret in each namespace.
This change simplifies Helm chart templates by replacing complex lookup functions with direct value references, improves configuration consistency, and reduces the reconciliation overhead. All HelmReleases now automatically receive cluster and namespace configuration through the valuesFrom mechanism, making configuration management more transparent and maintainable.
### Auto-diskful for LINSTOR
The LINSTOR integration now includes automatic diskful functionality that converts diskless nodes to diskful when they hold DRBD resources in Primary state for an extended period (30 minutes). This feature addresses scenarios where workloads are scheduled on nodes without local storage replicas by automatically creating local disk replicas when needed, improving I/O performance for long-running workloads.
When enabled with cleanup options, the system can automatically remove disk replicas that are no longer needed, preventing storage waste from temporary replicas. This intelligent storage management reduces network traffic for frequently accessed data while maintaining efficient storage utilization.
### Automated Version Management Systems
Cozystack now includes automated version management systems for PostgreSQL, Kubernetes, MariaDB, and Redis applications. These systems automatically track upstream versions and provide mechanisms for automated version updates, ensuring that platform users always have access to the latest stable versions while maintaining compatibility with existing deployments.
The version management systems integrate with the Cozystack API and dashboard, providing administrators with visibility into available versions and update paths. This infrastructure sets the foundation for future automated upgrade workflows and version compatibility management.
---
## Major Features and Improvements
### Storage
* **[linstor] Add linstor-scheduler package**: Added LINSTOR scheduler extender for optimal pod placement on nodes with LINSTOR storage. Includes admission webhook that automatically routes pods using LINSTOR CSI volumes to the custom scheduler, ensuring pods are placed on nodes with local replicas to minimize network traffic and improve I/O performance ([**@kvaps**](https://github.com/kvaps) in #1824).
* **[linstor] Enable auto-diskful for diskless nodes**: Enabled DRBD auto-diskful functionality to automatically convert diskless nodes to diskful when they hold volumes in Primary state for more than 30 minutes. Improves I/O performance for long-running workloads by creating local replicas and includes automatic cleanup options to prevent storage waste ([**@kvaps**](https://github.com/kvaps) in #1826).
* **[linstor] Build linstor-server with custom patches**: Added custom patches to linstor-server build process, enabling platform-specific optimizations and fixes ([**@kvaps**](https://github.com/kvaps) in #1726).
* **[seaweedfs] Traffic locality**: Upgraded SeaweedFS to v4.05 with traffic locality capabilities, new admin component with web-based UI, worker component for distributed operations, and enhanced S3 monitoring with Grafana dashboards. Improves S3 service performance by routing requests to nearest available volume servers ([**@nbykov0**](https://github.com/nbykov0) in #1748).
* **[linstor] fix: prevent DRBD device race condition in updateDiscGran**: Fixed race condition in DRBD device management during granularity updates, preventing potential data corruption or device conflicts ([**@kvaps**](https://github.com/kvaps) in #1829).
* **fix(linstor): prevent orphaned DRBD devices during toggle-disk retry**: Fixed issue where retry logic during disk toggle operations could leave orphaned DRBD devices, now properly cleans up devices during retry attempts ([**@kvaps**](https://github.com/kvaps) in #1823).
### Platform Architecture
* **[platform] Replace Helm lookup with valuesFrom mechanism**: Replaced Helm lookup functions with FluxCD valuesFrom mechanism for configuration propagation. Configuration from ConfigMaps and namespace references is now managed through `cozystack-values` Secret, simplifying templates and eliminating force reconcile controllers ([**@kvaps**](https://github.com/kvaps) in #1787).
* **[platform] refactor: split cozystack-resource-definitions into separate packages**: Refactored cozystack-resource-definitions into separate packages for better organization and maintainability, improving code structure and reducing coupling between components ([**@kvaps**](https://github.com/kvaps) in #1778).
* **[platform] Separate assets server into dedicated deployment**: Separated assets server from main platform deployment, improving scalability and allowing independent scaling of asset delivery infrastructure ([**@kvaps**](https://github.com/kvaps) in #1705).
* **[core] Extract Talos package from installer**: Extracted Talos package configuration from installer into a separate package, improving modularity and enabling independent updates ([**@kvaps**](https://github.com/kvaps) in #1724).
* **[registry] Add application labels and update filtering mechanism**: Added application labels to registry resources and improved filtering mechanism for better resource discovery and organization ([**@kvaps**](https://github.com/kvaps) in #1707).
* **fix(registry): implement field selector filtering for label-based resources**: Implemented field selector filtering for label-based resources in the registry, improving query performance and resource lookup efficiency ([**@kvaps**](https://github.com/kvaps) in #1845).
* **[platform] Add alphabetical sorting to registry resource lists**: Added alphabetical sorting to registry resource lists in the API and dashboard, improving user experience when browsing available applications ([**@lexfrei**](https://github.com/lexfrei) in #1764).
### Version Management
* **[postgres] Add version management system with automated version updates**: Introduced version management system for PostgreSQL with automated version tracking and update mechanisms ([**@kvaps**](https://github.com/kvaps) in #1671).
* **[kubernetes] Add version management system with automated version updates**: Added version management system for Kubernetes tenant clusters with automated version tracking and update capabilities ([**@kvaps**](https://github.com/kvaps) in #1672).
* **[mariadb] Add version management system with automated version updates**: Implemented version management system for MariaDB with automated version tracking and update mechanisms ([**@kvaps**](https://github.com/kvaps) in #1680).
* **[redis] Add version management system with automated version updates**: Added version management system for Redis with automated version tracking and update capabilities ([**@kvaps**](https://github.com/kvaps) in #1681).
### Networking
* **[kube-ovn] Update to v1.14.25**: Updated Kube-OVN to version 1.14.25 with improved stability and new features ([**@kvaps**](https://github.com/kvaps) in #1819).
* **[kubeovn] Package from external repo**: Extracted Kube-OVN packaging from main repository to external repository, improving modularity ([**@lllamnyp**](https://github.com/lllamnyp) in #1535).
* **[cilium] Update Cilium to v1.18.5**: Updated Cilium to version 1.18.5 with latest features and bug fixes ([**@lexfrei**](https://github.com/lexfrei) in #1769).
* **[system/cilium] Enable topology-aware routing for services**: Enabled topology-aware routing for Cilium services, improving traffic distribution and reducing latency by routing traffic to endpoints in the same zone when possible ([**@nbykov0**](https://github.com/nbykov0) in #1734).
* **[cilium] Enable automatic pod rollout on configmap updates**: Cilium and Cilium operator pods now automatically restart when the cilium-config ConfigMap is updated, ensuring configuration changes are applied immediately ([**@kvaps**](https://github.com/kvaps) in #1728).
* **[kubernetes] Fix endpoints for cilium-gateway**: Fixed endpoint configuration for cilium-gateway, ensuring proper service discovery and connectivity ([**@kvaps**](https://github.com/kvaps) in #1729).
* **[multus] Increase memory limit**: Increased memory limits for Multus components to handle larger network configurations and reduce out-of-memory issues ([**@nbykov0**](https://github.com/nbykov0) in #1773).
* **[main][paas-full] Add multus dependencies similar to other CNIs**: Added Multus as a dependency in the paas-full package, consistent with how other CNIs are included ([**@nbykov0**](https://github.com/nbykov0) in #1842).
### Virtual Machines
* **[vm] Always expose VMs with a service**: Virtual machines are now always exposed with at least a ClusterIP service, ensuring they have in-cluster DNS names and can be accessed from other pods even without public IP addresses ([**@lllamnyp**](https://github.com/lllamnyp) in #1738).
* **[virtual-machine] Improve check for resizing job**: Improved storage resize logic to only expand persistent volume claims when storage is being increased, preventing unintended storage reduction operations ([**@kvaps**](https://github.com/kvaps) in #1688).
* **[virtual-machine,vm-instance] Add nodeAffinity for Windows VMs based on scheduling config**: Added nodeAffinity configuration to virtual-machine and vm-instance charts to support dedicated nodes for Windows VMs ([**@kvaps**](https://github.com/kvaps) in #1693).
### Monitoring
* **[monitoring] Add SLACK_SEVERITY_FILTER field and VMAgent for tenant monitoring**: Introduced SLACK_SEVERITY_FILTER environment variable in Alerta deployment to enable filtering of alert severities for Slack notifications. Added VMAgent resource template for scraping metrics within tenant namespaces, improving monitoring granularity ([**@IvanHunters**](https://github.com/IvanHunters) in #1712).
* **[monitoring] Improve tenant metrics collection**: Improved tenant metrics collection mechanisms for better observability and monitoring coverage ([**@IvanHunters**](https://github.com/IvanHunters) in #1684).
### System Configuration
* **[api, lineage] Tolerate all taints**: Updated API and lineage webhook to tolerate all taints, ensuring controllers can run on any node regardless of taint configuration ([**@nbykov0**](https://github.com/nbykov0) in #1781).
* **[system] Add resource requests and limits to etcd-defrag**: Added resource requests and limits to etcd-defrag job to ensure proper resource allocation and prevent resource contention ([**@matthieu-robin**](https://github.com/matthieu-robin) in #1785).
* **[system:coredns] update coredns app labels to match Talos coredns labels**: Updated coredns app labels to match Talos coredns labels, ensuring consistency across the platform ([**@nbykov0**](https://github.com/nbykov0) in #1675).
* **[system:monitoring-agents] rename coredns metrics service**: Renamed coredns metrics service to avoid interference with coredns service used for name resolution in tenant k8s clusters ([**@nbykov0**](https://github.com/nbykov0) in #1676).
### Tenants and Namespaces
* **[tenant] Allow egress to parent ingress pods**: Updated tenant network policies to allow egress traffic to parent cluster ingress pods, enabling proper communication patterns ([**@lexfrei**](https://github.com/lexfrei) in #1765).
* **[tenant] Run cleanup job from system namespace**: Moved tenant cleanup job to run from system namespace, improving security and resource isolation ([**@lllamnyp**](https://github.com/lllamnyp) in #1774).
### FluxCD
* **[fluxcd] Add flux-aio module and migration**: Added FluxCD all-in-one module with migration support, simplifying FluxCD installation and management ([**@kvaps**](https://github.com/kvaps) in #1698).
* **[fluxcd] Enable source-watcher**: Enabled source-watcher in FluxCD configuration for improved GitOps synchronization and faster update detection ([**@kvaps**](https://github.com/kvaps) in #1706).
### Applications
* **[dashboard] Fix CustomFormsOverride schema to nest properties under spec.properties**: Fixed CustomFormsOverride schema generation to properly nest properties under `spec.properties` instead of directly under `properties`, ensuring correct form schema generation ([**@kvaps**](https://github.com/kvaps) in #1692).
* **[apps] Refactor apiserver to use typed objects and fix UnstructuredList GVK**: Refactored apiserver REST handlers to use typed objects instead of unstructured.Unstructured, eliminating runtime conversions. Fixed UnstructuredList GVK issue where objects were using the first registered kind instead of the correct kind ([**@kvaps**](https://github.com/kvaps) in #1679).
* **[keycloak] Make kubernetes client public**: Made Kubernetes client public in Keycloak configuration, enabling broader access patterns for Kubernetes integrations ([**@lllamnyp**](https://github.com/lllamnyp) in #1802).
## Improvements
* **[granular kubernetes application extensions dependencies]**: Improved dependency management for Kubernetes application extensions with more granular control over dependencies ([**@nbykov0**](https://github.com/nbykov0) in #1683).
* **[core:installer] Address buildx warnings**: Fixed Dockerfile syntax warnings from buildx, ensuring clean builds without warnings ([**@nbykov0**](https://github.com/nbykov0) in #1682).
* **[linstor] Update piraeus-operator v2.10.2**: Updated LINSTOR CSI to version 2.10.2 with improved stability and bug fixes ([**@kvaps**](https://github.com/kvaps) in #1689).
* **Update SeaweedFS v4.02**: Updated SeaweedFS to version 4.02 with improved S3 daemon performance and fixes ([**@kvaps**](https://github.com/kvaps) in #1725).
* **[installer,dx] Rename cozypkg to cozyhr**: Renamed cozypkg tool to cozyhr for better branding and consistency ([**@kvaps**](https://github.com/kvaps) in #1763).
## Fixes
* **fix(platform): fix migrations for v0.40 release**: Fixed platform migrations for v0.40 release, ensuring smooth upgrades from previous versions ([**@kvaps**](https://github.com/kvaps) in #1846).
* **[platform] fix migration for removing fluxcd-operator**: Fixed migration logic for removing fluxcd-operator, ensuring clean removal without leaving orphaned resources ([**@kvaps**](https://github.com/kvaps) in commit 4a83d2c7).
* **[kubevirt-operator] Fix typo in VMNotRunningFor10Minutes alert**: Fixed typo in VM alert name, ensuring proper alert triggering and monitoring ([**@kvaps**](https://github.com/kvaps) in #1770).
* **[kubevirt-operator] Revert incorrect case change in VM alerts**: Reverted incorrect case change in VM alert names to maintain consistency ([**@lexfrei**](https://github.com/lexfrei) in #1804).
* **[cozystack-controller] Fix: move crds to definitions**: Fixed CRD placement by moving them to definitions directory, ensuring proper resource organization ([**@kvaps**](https://github.com/kvaps) in #1759).
## Dependencies
* **Update SeaweedFS v4.02**: Updated SeaweedFS to version 4.02 ([**@kvaps**](https://github.com/kvaps) in #1725).
* **[seaweedfs] Traffic locality**: Upgraded SeaweedFS to v4.05 with traffic locality capabilities ([**@nbykov0**](https://github.com/nbykov0) in #1748).
* **[linstor] Update piraeus-operator v2.10.2**: Updated piraeus-operator to version 2.10.2 ([**@kvaps**](https://github.com/kvaps) in #1689).
* **[kube-ovn] Update to v1.14.25**: Updated Kube-OVN to version 1.14.25 ([**@kvaps**](https://github.com/kvaps) in #1819).
* **[cilium] Update Cilium to v1.18.5**: Updated Cilium to version 1.18.5 ([**@lexfrei**](https://github.com/lexfrei) in #1769).
* **Update go modules**: Updated Go modules to latest versions ([**@kvaps**](https://github.com/kvaps) in #1736).
## Development, Testing, and CI/CD
* **[ci] Fix auto-release workflow**: Fixed auto-release workflow to ensure correct release publishing and tagging ([**@kvaps**](https://github.com/kvaps) in commit 526af294).
* **fix(ci): ensure correct latest release after backport publishing**: Fixed CI workflow to correctly identify and tag the latest release after backport publishing ([**@kvaps**](https://github.com/kvaps) in #1800).
* **[workflows] Add auto patch release workflow**: Added automated patch release workflow for streamlined release management ([**@kvaps**](https://github.com/kvaps) in #1754).
* **[workflow] Add GitHub Action to update release notes from changelogs**: Added GitHub Action to automatically update release notes from changelog files ([**@kvaps**](https://github.com/kvaps) in #1752).
* **[ci] Improve backport workflow with merge_commits skip and conflict resolution**: Improved backport workflow with better merge commit handling and conflict resolution ([**@kvaps**](https://github.com/kvaps) in #1694).
* **[testing] Add aliases and autocomplete**: Added shell aliases and autocomplete support for testing commands, improving developer experience ([**@lllamnyp**](https://github.com/lllamnyp) in #1803).
* **[kubernetes] Add lb tests for tenant k8s**: Added load balancer tests for tenant Kubernetes clusters, improving test coverage ([**@IvanHunters**](https://github.com/IvanHunters) in #1783).
* **[agents] Add instructions for working with unresolved code review comments**: Added documentation and instructions for working with unresolved code review comments in agent workflows ([**@kvaps**](https://github.com/kvaps) in #1710).
* **feat(ci): add /retest command to rerun tests from Prepare environment**: Added `/retest` command to rerun tests from Prepare environment workflow ([**@kvaps**](https://github.com/kvaps) in commit 30c1041e).
* **fix(ci): remove GITHUB_TOKEN extraheader to trigger workflows**: Removed GITHUB_TOKEN extraheader to properly trigger workflows ([**@kvaps**](https://github.com/kvaps) in commit 68a639b3).
* **Fix: Add missing components to `distro-full` bundle**: Fixed missing components in distro-full bundle, ensuring all required components are included ([**@LoneExile**](https://github.com/LoneExile) in #1620).
* **Update Flux Operator (v0.33.0)**: Updated Flux Operator to version 0.33.0 ([**@kingdonb**](https://github.com/kingdonb) in #1649).
* **Add changelogs for v0.38.3 and v.0.38.4**: Added missing changelogs for v0.38.3 and v0.38.4 releases ([**@androndo**](https://github.com/androndo) in #1743).
* **Add changelogs to v.0.39.1**: Added changelog for v0.39.1 release ([**@androndo**](https://github.com/androndo) in #1750).
* **Add Cloupard to ADOPTERS.md**: Added Cloupard to the adopters list ([**@SerjioTT**](https://github.com/SerjioTT) in #1733).
## Documentation
* **[website] docs: expand monitoring and alerting documentation**: Expanded monitoring and alerting documentation with comprehensive guides, examples, and troubleshooting information ([**@IvanHunters**](https://github.com/IvanHunters) in [cozystack/website#388](https://github.com/cozystack/website/pull/388)).
* **[website] fix auto-generation of documentation**: Fixed automatic documentation generation process, ensuring all documentation is properly generated and formatted ([**@IvanHunters**](https://github.com/IvanHunters) in [cozystack/website#391](https://github.com/cozystack/website/pull/391)).
* **[website] secure boot**: Added documentation for Secure Boot support in Talos Linux ([**@kvaps**](https://github.com/kvaps) in [cozystack/website#387](https://github.com/cozystack/website/pull/387)).
## Tools
* **[talm] feat(helpers): add bond interface discovery helpers**: Added bond interface discovery helpers to talm for easier network configuration ([**@kvaps**](https://github.com/kvaps) in [cozystack/talm#94](https://github.com/cozystack/talm/pull/94)).
* **[talm] feat(talosconfig): add certificate regeneration from secrets.yaml**: Added certificate regeneration functionality to talm talosconfig command, allowing certificates to be regenerated from secrets.yaml ([**@kvaps**](https://github.com/kvaps) in cozystack/talm@1319dde).
* **[talm] fix(init): make name optional for -u flag**: Made name parameter optional for init command with -u flag, improving flexibility ([**@kvaps**](https://github.com/kvaps) in cozystack/talm@da29320).
* **[talm] fix(wrapper): copy NoOptDefVal when remapping -f to -F flag**: Fixed wrapper to properly copy NoOptDefVal when remapping flags, ensuring correct default value handling ([**@kvaps**](https://github.com/kvaps) in cozystack/talm@f6a6f1d).
* **[talm] fix(root): detect project root with secrets.encrypted.yaml**: Fixed root detection to properly identify project root when secrets.encrypted.yaml is present ([**@kvaps**](https://github.com/kvaps) in cozystack/talm@cf56780).
* **[talm] Fix interfaces helper for Talos v1.12**: Fixed interfaces helper to work correctly with Talos v1.12 ([**@kvaps**](https://github.com/kvaps) in cozystack/talm@34984ae).
* **[talm] Fix typo on README.md**: Fixed typo in README documentation ([**@diegolakatos**](https://github.com/diegolakatos) in [cozystack/talm#92](https://github.com/cozystack/talm/pull/92)).
* **[talm] fix(template): return error for invalid YAML in template output**: Fixed template command to return proper error for invalid YAML output ([**@kvaps**](https://github.com/kvaps) in [cozystack/talm#93](https://github.com/cozystack/talm/pull/93)).
* **[talm] feat(cozystack): enable allocateNodeCIDRs by default**: Enabled allocateNodeCIDRs by default in talm cozystack preset ([**@lexfrei**](https://github.com/lexfrei) in [cozystack/talm#91](https://github.com/cozystack/talm/pull/91)).
* **[boot-to-talos] feat(network): add VLAN interface support via netlink**: Added VLAN interface support via netlink in boot-to-talos for advanced network configuration ([**@kvaps**](https://github.com/kvaps) in cozystack/boot-to-talos@02874d7).
* **[boot-to-talos] feat(network): add bond interface support via netlink**: Added bond interface support via netlink in boot-to-talos for network bonding configurations ([**@kvaps**](https://github.com/kvaps) in cozystack/boot-to-talos@067822d).
* **[boot-to-talos] Draft EFI Support**: Added draft EFI support in boot-to-talos for UEFI boot scenarios ([**@kvaps**](https://github.com/kvaps) in cozystack/boot-to-talos@e194bc8).
* **[boot-to-talos] Change default install image size from 2GB to 3GB**: Changed default install image size from 2GB to 3GB to accommodate larger installations ([**@kvaps**](https://github.com/kvaps) in cozystack/boot-to-talos@3bfb035).
* **[cozyhr] feat(values): add valuesFrom support for HelmRelease**: Added valuesFrom support for HelmRelease in cozyhr tool, enabling better configuration management ([**@kvaps**](https://github.com/kvaps) in cozystack/cozyhr@7dff0c8).
* **[cozyhr] Rename cozypkg to cozyhr**: Renamed cozypkg tool to cozyhr for better branding ([**@kvaps**](https://github.com/kvaps) in cozystack/cozyhr@1029461).
---
## Contributors
We'd like to thank all contributors who made this release possible:
* [**@IvanHunters**](https://github.com/IvanHunters)
* [**@kvaps**](https://github.com/kvaps)
* [**@lexfrei**](https://github.com/lexfrei)
* [**@lllamnyp**](https://github.com/lllamnyp)
* [**@nbykov0**](https://github.com/nbykov0)
* [**@LoneExile**](https://github.com/LoneExile)
* [**@kingdonb**](https://github.com/kingdonb)
* [**@androndo**](https://github.com/androndo)
* [**@SerjioTT**](https://github.com/SerjioTT)
* [**@matthieu-robin**](https://github.com/matthieu-robin)
* [**@diegolakatos**](https://github.com/diegolakatos)
---
**Full Changelog**: [v0.39.0...v0.40.0](https://github.com/cozystack/cozystack/compare/v0.39.0...v0.40.0)
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.40.0
-->

View File

@@ -1,11 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.40.1
-->
## Fixes
* **[linstor] Update piraeus-server patches with critical fixes**: Backported critical patches to piraeus-server that address storage stability issues and improve DRBD resource handling. These patches fix edge cases in device management and ensure more reliable storage operations ([**@kvaps**](https://github.com/kvaps) in #1850, #1852).
---
**Full Changelog**: [v0.40.0...v0.40.1](https://github.com/cozystack/cozystack/compare/v0.40.0...v0.40.1)

View File

@@ -1,15 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.40.2
-->
## Improvements
* **[linstor] Refactor node-level RWX validation**: Refactored the node-level ReadWriteMany (RWX) validation logic in LINSTOR CSI. The validation has been moved to the CSI driver level with a custom linstor-csi image build, providing more reliable RWX volume handling and clearer error messages when RWX requirements cannot be satisfied ([**@kvaps**](https://github.com/kvaps) in #1856, #1857).
## Fixes
* **[linstor] Remove node-level RWX validation**: Removed the problematic node-level RWX validation that was causing issues with volume provisioning. The validation logic has been refactored and moved to a more appropriate location in the LINSTOR CSI driver ([**@kvaps**](https://github.com/kvaps) in #1851).
---
**Full Changelog**: [v0.40.1...v0.40.2](https://github.com/cozystack/cozystack/compare/v0.40.1...v0.40.2)

View File

@@ -1,15 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.40.3
-->
## Fixes
* **[apiserver] Fix Watch resourceVersion and bookmark handling**: Fixed issues with Watch API handling of resourceVersion and bookmarks, ensuring proper event streaming and state synchronization for API clients ([**@kvaps**](https://github.com/kvaps) in #1860).
## Dependencies
* **[cilium] Update Cilium to v1.18.6**: Updated Cilium CNI to v1.18.6 with security fixes and performance improvements ([**@sircthulhu**](https://github.com/sircthulhu) in #1868, #1870).
---
**Full Changelog**: [v0.40.2...v0.40.3](https://github.com/cozystack/cozystack/compare/v0.40.2...v0.40.3)

View File

@@ -1,23 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.40.4
-->
## Improvements
* **[kubernetes] Increase default apiServer resourcesPreset to large**: Increased the default resource preset for kube-apiserver to `large` to ensure more reliable operation under higher workloads and prevent resource constraints ([**@kvaps**](https://github.com/kvaps) in #1875, #1882).
* **[kubernetes] Increase kube-apiserver startup probe threshold**: Increased the startup probe threshold for kube-apiserver to allow more time for the API server to become ready, especially in scenarios with slow storage or high load ([**@kvaps**](https://github.com/kvaps) in #1876, #1883).
* **[etcd] Increase probe thresholds for better recovery**: Increased etcd probe thresholds to provide more time for recovery operations, improving cluster resilience during network issues or temporary slowdowns ([**@kvaps**](https://github.com/kvaps) in #1874, #1878).
## Fixes
* **[dashboard] Fix view of loadbalancer IP in services window**: Fixed an issue where load balancer IP addresses were not displayed correctly in the services window of the dashboard ([**@IvanHunters**](https://github.com/IvanHunters) in #1884, #1887).
## Dependencies
* **Update Talos Linux v1.11.6**: Updated Talos Linux to v1.11.6 with latest security patches and improvements ([**@kvaps**](https://github.com/kvaps) in #1879).
---
**Full Changelog**: [v0.40.3...v0.40.4](https://github.com/cozystack/cozystack/compare/v0.40.3...v0.40.4)

View File

@@ -1,63 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.41.0
-->
# Cozystack v0.41.0 — "MongoDB"
This release introduces MongoDB as a new managed application, expanding Cozystack's database offerings alongside existing PostgreSQL, MySQL, and Redis services. The release also includes storage improvements, Kubernetes stability enhancements, and updated documentation.
## Feature Highlights
### MongoDB Managed Application
Cozystack now includes MongoDB as a fully managed database service. Users can deploy production-ready MongoDB instances directly from the application catalog with minimal configuration.
Key capabilities:
- **Replica Set deployment**: Automatic configuration of MongoDB replica sets for high availability
- **Persistent storage**: Integration with Cozystack storage backends for reliable data persistence
- **Resource management**: Configurable CPU, memory, and storage resources
- **Monitoring integration**: Built-in metrics export for platform monitoring
Deploy MongoDB through the Cozystack dashboard or using the standard application deployment workflow ([**@lexfrei**](https://github.com/lexfrei) in #1822, #1881).
## Improvements
* **[linstor] Update piraeus-server patches with critical fixes**: Backported critical patches to piraeus-server that address storage stability issues and improve DRBD resource handling. These patches fix edge cases in device management and ensure more reliable storage operations ([**@kvaps**](https://github.com/kvaps) in #1850, #1852).
* **[linstor] Refactor node-level RWX validation**: Refactored the node-level ReadWriteMany (RWX) validation logic in LINSTOR CSI. The validation has been moved to the CSI driver level with a custom linstor-csi image build, providing more reliable RWX volume handling and clearer error messages when RWX requirements cannot be satisfied ([**@kvaps**](https://github.com/kvaps) in #1856, #1857).
* **[kubernetes] Increase default apiServer resourcesPreset to large**: Increased the default resource preset for kube-apiserver to `large` to ensure more reliable operation under higher workloads and prevent resource constraints ([**@kvaps**](https://github.com/kvaps) in #1875, #1882).
* **[kubernetes] Increase kube-apiserver startup probe threshold**: Increased the startup probe threshold for kube-apiserver to allow more time for the API server to become ready, especially in scenarios with slow storage or high load ([**@kvaps**](https://github.com/kvaps) in #1876, #1883).
* **[etcd] Increase probe thresholds for better recovery**: Increased etcd probe thresholds to provide more time for recovery operations, improving cluster resilience during network issues or temporary slowdowns ([**@kvaps**](https://github.com/kvaps) in #1874, #1878).
## Fixes
* **[linstor] Remove node-level RWX validation**: Removed the problematic node-level RWX validation that was causing issues with volume provisioning. The validation logic has been refactored and moved to a more appropriate location in the LINSTOR CSI driver ([**@kvaps**](https://github.com/kvaps) in #1851).
* **[apiserver] Fix Watch resourceVersion and bookmark handling**: Fixed issues with Watch API handling of resourceVersion and bookmarks, ensuring proper event streaming and state synchronization for API clients ([**@kvaps**](https://github.com/kvaps) in #1860).
* **[dashboard] Fix view of loadbalancer IP in services window**: Fixed an issue where load balancer IP addresses were not displayed correctly in the services window of the dashboard ([**@IvanHunters**](https://github.com/IvanHunters) in #1884, #1887).
## Dependencies
* **[cilium] Update cilium to v1.18.6**: Updated Cilium CNI to v1.18.6 with security fixes and performance improvements ([**@sircthulhu**](https://github.com/sircthulhu) in #1868, #1870).
* **Update Talos Linux v1.11.6**: Updated Talos Linux to v1.11.6 with latest security patches and improvements ([**@kvaps**](https://github.com/kvaps) in #1879).
## Documentation
* **[website] Add documentation for creating and managing cloned virtual machines**: Added comprehensive guide for VM cloning operations ([**@sircthulhu**](https://github.com/sircthulhu) in [cozystack/website#401](https://github.com/cozystack/website/pull/401)).
* **[website] Simplify NFS driver setup instructions**: Improved NFS driver setup documentation with clearer instructions ([**@kvaps**](https://github.com/kvaps) in [cozystack/website#399](https://github.com/cozystack/website/pull/399)).
* **[website] Update Talos installation docs for Hetzner and Servers.com**: Updated installation documentation with improved instructions for Hetzner and Servers.com environments ([**@kvaps**](https://github.com/kvaps) in [cozystack/website#395](https://github.com/cozystack/website/pull/395)).
* **[website] Add Hetzner RobotLB documentation**: Added documentation for configuring public IP with Hetzner RobotLB ([**@kvaps**](https://github.com/kvaps) in [cozystack/website#394](https://github.com/cozystack/website/pull/394)).
* **[website] Add Hidora organization support details**: Added Hidora to the support page with organization details ([**@matthieu-robin**](https://github.com/matthieu-robin) in [cozystack/website#397](https://github.com/cozystack/website/pull/397), [cozystack/website#398](https://github.com/cozystack/website/pull/398)).
---
**Full Changelog**: [v0.40.0...v0.41.0](https://github.com/cozystack/cozystack/compare/v0.40.0...v0.41.0)

View File

@@ -1,11 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.41.1
-->
## Improvements
* **[kubernetes] Add enum validation for IngressNginx exposeMethod**: Added enum validation for the `exposeMethod` field in IngressNginx configuration, preventing invalid values and improving user experience with clear valid options ([**@sircthulhu**](https://github.com/sircthulhu) in #1895, #1897).
---
**Full Changelog**: [v0.41.0...v0.41.1](https://github.com/cozystack/cozystack/compare/v0.41.0...v0.41.1)

View File

@@ -1,13 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.41.2
-->
## Improvements
* **[monitoring-agents] Set minReplicas to 1 for VPA for VMAgent**: Configured VPA (Vertical Pod Autoscaler) to maintain at least 1 replica for VMAgent, ensuring monitoring availability during scaling operations ([**@sircthulhu**](https://github.com/sircthulhu) in #1894, #1905).
* **[mongodb] Remove user-configurable images from MongoDB chart**: Removed user-configurable image options from the MongoDB chart to simplify configuration and ensure consistency with tested image versions ([**@kvaps**](https://github.com/kvaps) in #1901, #1904).
---
**Full Changelog**: [v0.41.1...v0.41.2](https://github.com/cozystack/cozystack/compare/v0.41.1...v0.41.2)

View File

@@ -1,15 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.41.3
-->
## Improvements
* **[kubernetes] Show Service and Ingress resources for Kubernetes app in dashboard**: Added visibility of Service and Ingress resources for Kubernetes applications in the dashboard, improving resource management and monitoring capabilities ([**@sircthulhu**](https://github.com/sircthulhu) in #1912, #1915).
## Fixes
* **[dashboard] Fix filtering on Pods tab for Service**: Fixed an issue where pod filtering was not working correctly on the Pods tab when viewing Services in the dashboard ([**@sircthulhu**](https://github.com/sircthulhu) in #1909, #1914).
---
**Full Changelog**: [v0.41.2...v0.41.3](https://github.com/cozystack/cozystack/compare/v0.41.2...v0.41.3)

View File

@@ -1,134 +0,0 @@
# Cozystack v1.0.0-alpha.1 — "Package-Based Architecture"
This alpha release introduces a fundamental architectural shift from HelmRelease bundles to Package-based deployment managed by the new cozystack-operator. It includes a comprehensive backup system with Velero integration, significant API changes that rename the core CRD, Flux sharding for improved tenant workload distribution, enhanced monitoring capabilities, and various improvements to virtual machines, tenants, and the build workflow.
> **⚠️ Alpha Release Warning**: This is a pre-release version intended for testing and early adoption. Breaking changes may occur before the stable v1.0.0 release.
## Breaking Changes
### API Rename: CozystackResourceDefinition → ApplicationDefinition
The `CozystackResourceDefinition` CRD has been renamed to `ApplicationDefinition` for better clarity and consistency. This change affects:
- All Go types and controller files
- CRD Helm chart renamed from `cozystack-resource-definition-crd` to `application-definition-crd`
- All cozyrds YAML manifests updated to use `kind: ApplicationDefinition`
A migration (v24) is included to handle the transition automatically.
### Package-Based Deployment
The platform now uses Package resources managed by cozystack-operator instead of HelmRelease bundles. Key changes:
- Restructured values.yaml with full configuration support (networking, publishing, authentication, scheduling, branding, resources)
- Added values-isp-full.yaml and values-isp-hosted.yaml for bundle variants
- Package resources replace old HelmRelease templates
- PackageSources moved from sources/ to templates/sources/
- Migration script `hack/migrate-to-version-1.0.sh` provided for converting ConfigMaps to Package resources
---
## Major Features and Improvements
### Cozystack Operator
A new operator has been introduced to manage Package and PackageSource resources, providing declarative package management for the platform:
* **[cozystack-operator] Introduce API objects: packages and packagesources**: Added new CRDs for declarative package management, defining the API for Package and PackageSource resources ([**@kvaps**](https://github.com/kvaps) in #1740).
* **[cozystack-operator] Introduce Cozystack-operator core logic**: Implemented core reconciliation logic for the operator, handling Package and PackageSource lifecycle management ([**@kvaps**](https://github.com/kvaps) in #1741).
* **[cozystack-operator] Add Package and PackageSource reconcilers**: Added controllers for Package and PackageSource resources with full reconciliation support ([**@kvaps**](https://github.com/kvaps) in #1755).
* **[cozystack-operator] Add deployment files**: Added Kubernetes deployment manifests for running cozystack-operator in the cluster ([**@kvaps**](https://github.com/kvaps) in #1761).
* **[platform] Add PackageSources for cozystack-operator**: Added PackageSource definitions for cozystack-operator integration ([**@kvaps**](https://github.com/kvaps) in #1760).
* **[cozypkg] Add tool for managing Package and PackageSources**: Added CLI tool for managing Package and PackageSource resources ([**@kvaps**](https://github.com/kvaps) in #1756).
### Backup System
Comprehensive backup functionality has been added with Velero integration for managing application backups:
* **[backups] Implement core backup Plan controller**: Core controller for managing backup schedules and plans, providing the foundation for backup orchestration ([**@lllamnyp**](https://github.com/lllamnyp) in #1640).
* **[backups] Build and deploy backup controller**: Deployment infrastructure for the backup controller, including container image builds and Kubernetes manifests ([**@lllamnyp**](https://github.com/lllamnyp) in #1685).
* **[backups] Scaffold a backup strategy API group**: Added API group for backup strategies, enabling pluggable backup implementations ([**@lllamnyp**](https://github.com/lllamnyp) in #1687).
* **[backups] Add indices to core backup resources**: Added indices to backup resources for improved query performance ([**@lllamnyp**](https://github.com/lllamnyp) in #1719).
* **[backups] Stub the Job backup strategy controller**: Added stub implementation for Job-based backup strategy ([**@lllamnyp**](https://github.com/lllamnyp) in #1720).
* **[backups] Implement Velero strategy controller**: Integration with Velero for backup operations, enabling enterprise-grade backup capabilities ([**@androndo**](https://github.com/androndo) in #1762).
* **[backups,dashboard] User-facing UI**: Dashboard interface for managing backups and backup jobs, providing visibility into backup status and history ([**@lllamnyp**](https://github.com/lllamnyp) in #1737).
### Platform Architecture
* **[platform] Migrate from HelmRelease bundles to Package-based deployment**: Replaced HelmRelease bundle system with Package resources managed by cozystack-operator. Includes restructured values.yaml with full configuration support and migration tooling ([**@kvaps**](https://github.com/kvaps) in #1816).
* **refactor(api): rename CozystackResourceDefinition to ApplicationDefinition**: Renamed CRD and all related types for better clarity and consistency. Updated all Go types, controllers, and 25+ YAML manifests ([**@kvaps**](https://github.com/kvaps) in #1864).
* **feat(flux): implement flux sharding for tenant HelmReleases**: Added Flux sharding support to distribute tenant HelmRelease reconciliation across multiple controllers, improving scalability in multi-tenant environments ([**@kvaps**](https://github.com/kvaps) in #1816).
* **refactor(installer): migrate installer to cozystack-operator**: Moved installer functionality to cozystack-operator for unified management ([**@kvaps**](https://github.com/kvaps) in #1816).
* **feat(api): add chartRef to ApplicationDefinition**: Added chartRef field to support ExternalArtifact references for flexible chart sourcing ([**@kvaps**](https://github.com/kvaps) in #1816).
* **feat(api): show only hash in version column for applications and modules**: Simplified version display in API responses for cleaner output ([**@kvaps**](https://github.com/kvaps) in #1816).
### Virtual Machines
* **[vm] Always expose VMs with a service**: Virtual machines are now always exposed with at least a ClusterIP service, ensuring they have in-cluster DNS names and can be accessed from other pods even without public IP addresses ([**@lllamnyp**](https://github.com/lllamnyp) in #1738, #1751).
### Monitoring
* **[monitoring] Add SLACK_SEVERITY_FILTER field and VMAgent for tenant monitoring**: Introduced the SLACK_SEVERITY_FILTER environment variable in the Alerta deployment to enable filtering of alert severities for Slack notifications based on the disabledSeverity configuration. Additionally, added a VMAgent resource template for scraping metrics within tenant namespaces, improving monitoring granularity and control ([**@IvanHunters**](https://github.com/IvanHunters) in #1712).
### Tenants
* **[tenant] Allow egress to parent ingress pods**: Updated tenant network policies to allow egress traffic to parent cluster ingress pods, enabling proper communication patterns between tenant namespaces and parent cluster ingress controllers ([**@lexfrei**](https://github.com/lexfrei) in #1765, #1776).
* **[tenant] Run cleanup job from system namespace**: Moved tenant cleanup job to run from system namespace, improving security and resource isolation for tenant cleanup operations ([**@lllamnyp**](https://github.com/lllamnyp) in #1774, #1777).
### System
* **[system] Add resource requests and limits to etcd-defrag**: Added resource requests and limits to etcd-defrag job to ensure proper resource allocation and prevent resource contention during etcd maintenance operations ([**@matthieu-robin**](https://github.com/matthieu-robin) in #1785, #1786).
### Development and Build
* **feat(cozypkg): add cross-platform build targets with version injection**: Added cross-platform build targets (linux/amd64, linux/arm64, darwin/amd64, darwin/arm64) for cozypkg/cozyhr tool with automatic version injection from git tags ([**@kvaps**](https://github.com/kvaps) in #1862).
* **refactor: move scripts to hack directory**: Reorganized scripts to standard hack/ location following Kubernetes project conventions ([**@kvaps**](https://github.com/kvaps) in #1863).
## Fixes
* **fix(talos): skip rebuilding assets if files already exist**: Improved Talos package build process to avoid redundant asset rebuilds when files are already present, reducing build time ([**@kvaps**](https://github.com/kvaps)).
* **[kubevirt-operator] Fix typo in VMNotRunningFor10Minutes alert**: Fixed typo in VM alert name, ensuring proper alert triggering and monitoring for virtual machines that are not running for extended periods ([**@lexfrei**](https://github.com/lexfrei) in #1770, #1775).
* **[backups] Fix malformed glob and split in template**: Fixed malformed glob pattern and split operation in backup template processing ([**@lllamnyp**](https://github.com/lllamnyp) in #1708).
## Documentation
* **[website] docs(storage): simplify NFS driver setup instructions**: Simplified NFS driver setup documentation with clearer instructions ([**@kvaps**](https://github.com/kvaps) in [cozystack/website#399](https://github.com/cozystack/website/pull/399)).
* **[website] Add Hidora organization support details**: Added Hidora to the support page with organization details ([**@matthieu-robin**](https://github.com/matthieu-robin) in [cozystack/website#397](https://github.com/cozystack/website/pull/397)).
* **[website] Update LinkedIn link for Hidora organization**: Updated LinkedIn link for Hidora organization on the support page ([**@matthieu-robin**](https://github.com/matthieu-robin) in [cozystack/website#398](https://github.com/cozystack/website/pull/398)).
---
## Migration Guide
### From v0.38.x / v0.39.x to v1.0.0-alpha.1
1. **Backup your cluster** before upgrading
2. Run the migration script: `hack/migrate-to-version-1.0.sh`
3. The migration will:
- Convert ConfigMaps to Package resources
- Rename CozystackResourceDefinition to ApplicationDefinition
- Update HelmRelease references to use Package-based deployment
### Known Issues
- This is an alpha release; some features may be incomplete or change before stable release
- Migration script should be tested in a non-production environment first
---
## Contributors
We'd like to thank all contributors who made this release possible:
* [**@androndo**](https://github.com/androndo)
* [**@IvanHunters**](https://github.com/IvanHunters)
* [**@kvaps**](https://github.com/kvaps)
* [**@lexfrei**](https://github.com/lexfrei)
* [**@lllamnyp**](https://github.com/lllamnyp)
* [**@matthieu-robin**](https://github.com/matthieu-robin)
---
**Full Changelog**: [v0.38.0...v1.0.0-alpha.1](https://github.com/cozystack/cozystack/compare/v0.38.0...v1.0.0-alpha.1)
<!--
https://github.com/cozystack/cozystack/releases/tag/v1.0.0-alpha.1
-->

View File

@@ -1,71 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v1.0.0-alpha.2
-->
> **⚠️ Alpha Release Warning**: This is a pre-release version intended for testing and early adoption. Breaking changes may occur before the stable v1.0.0 release.
## Major Features and Improvements
### New Applications
* **[apps] Add MongoDB managed application**: Added MongoDB as a new managed application, providing a fully managed MongoDB database with automatic scaling, backups, and high availability support ([**@lexfrei**](https://github.com/lexfrei) in #1822).
### Networking
* **[kilo] Introduce kilo**: Added Kilo WireGuard mesh networking support. Kilo provides secure WireGuard-based VPN mesh for connecting Kubernetes nodes across different networks and regions ([**@kvaps**](https://github.com/kvaps) in #1691).
* **[local-ccm] Add local-ccm package**: Added local cloud controller manager package for managing load balancer services in local/bare-metal environments without a cloud provider ([**@kvaps**](https://github.com/kvaps) in #1831).
### Platform
* **[platform] Add flux-plunger controller**: Added flux-plunger controller to automatically fix stuck HelmRelease errors by cleaning up failed resources and retrying reconciliation ([**@kvaps**](https://github.com/kvaps) in #1843).
* **[platform] Split telemetry between operator and controller**: Separated telemetry collection between cozystack-operator and cozystack-controller for better metrics isolation and monitoring capabilities ([**@kvaps**](https://github.com/kvaps) in #1869).
* **[platform] Remove cozystack.io/ui label**: Cleaned up deprecated `cozystack.io/ui` labels from platform components ([**@kvaps**](https://github.com/kvaps) in #1872).
## Improvements
* **[kubernetes] Increase default apiServer resourcesPreset to large**: Increased the default resource preset for kube-apiserver to `large` to ensure more reliable operation under higher workloads ([**@kvaps**](https://github.com/kvaps) in #1875).
* **[kubernetes] Increase kube-apiserver startup probe threshold**: Increased the startup probe threshold for kube-apiserver to allow more time for the API server to become ready ([**@kvaps**](https://github.com/kvaps) in #1876).
* **[etcd] Increase probe thresholds for better recovery**: Increased etcd probe thresholds to provide more time for recovery operations, improving cluster resilience ([**@kvaps**](https://github.com/kvaps) in #1874).
## Fixes
* **[apiserver] Fix Watch resourceVersion and bookmark handling**: Fixed issues with Watch API handling of resourceVersion and bookmarks, ensuring proper event streaming and state synchronization ([**@kvaps**](https://github.com/kvaps) in #1860).
* **[dashboard] Fix view of loadbalancer IP in services window**: Fixed an issue where load balancer IP addresses were not displayed correctly in the services window of the dashboard ([**@IvanHunters**](https://github.com/IvanHunters) in #1884).
## Dependencies
* **[cilium] Update cilium to v1.18.6**: Updated Cilium CNI to v1.18.6 with security fixes and performance improvements ([**@sircthulhu**](https://github.com/sircthulhu) in #1868).
* **Update Talos Linux v1.12.1**: Updated Talos Linux to v1.12.1 with latest features, security patches and improvements ([**@kvaps**](https://github.com/kvaps) in #1877).
## Documentation
* **[website] Add documentation for creating and managing cloned virtual machines**: Added comprehensive guide for VM cloning operations ([**@sircthulhu**](https://github.com/sircthulhu) in [cozystack/website#401](https://github.com/cozystack/website/pull/401)).
* **[website] Simplify NFS driver setup instructions**: Improved NFS driver setup documentation with clearer instructions ([**@kvaps**](https://github.com/kvaps) in [cozystack/website#399](https://github.com/cozystack/website/pull/399)).
* **[website] Update Talos installation docs for Hetzner and Servers.com**: Updated installation documentation with improved instructions for Hetzner and Servers.com environments ([**@kvaps**](https://github.com/kvaps) in [cozystack/website#395](https://github.com/cozystack/website/pull/395)).
* **[website] Add Hetzner RobotLB documentation**: Added documentation for configuring public IP with Hetzner RobotLB ([**@kvaps**](https://github.com/kvaps) in [cozystack/website#394](https://github.com/cozystack/website/pull/394)).
* **[website] Add Hidora organization support details**: Added Hidora to the support page with organization details ([**@matthieu-robin**](https://github.com/matthieu-robin) in [cozystack/website#397](https://github.com/cozystack/website/pull/397), [cozystack/website#398](https://github.com/cozystack/website/pull/398)).
---
## Contributors
* [**@IvanHunters**](https://github.com/IvanHunters)
* [**@kvaps**](https://github.com/kvaps)
* [**@lexfrei**](https://github.com/lexfrei)
* [**@matthieu-robin**](https://github.com/matthieu-robin)
* [**@sircthulhu**](https://github.com/sircthulhu)
---
**Full Changelog**: [v1.0.0-alpha.1...v1.0.0-alpha.2](https://github.com/cozystack/cozystack/compare/v1.0.0-alpha.1...v1.0.0-alpha.2)

View File

@@ -1,99 +0,0 @@
# Enabling Hubble for Network Observability
Hubble is a network and security observability platform built on top of Cilium. It provides deep visibility into the communication and behavior of services in your Kubernetes cluster.
## Prerequisites
- Cozystack platform running with Cilium as the CNI
- Monitoring hub enabled for Grafana access
## Configuration
Hubble is disabled by default in Cozystack. To enable it, update the Cilium configuration.
### Enable Hubble
Edit the Cilium values in your platform configuration to enable Hubble:
```yaml
cilium:
hubble:
enabled: true
relay:
enabled: true
ui:
enabled: true
metrics:
enabled:
- dns
- drop
- tcp
- flow
- port-distribution
- icmp
- httpV2:exemplars=true;labelsContext=source_ip,source_namespace,source_workload,destination_ip,destination_namespace,destination_workload,traffic_direction
```
### Components
When Hubble is enabled, the following components become available:
- **Hubble Relay**: Aggregates flow data from all Cilium agents
- **Hubble UI**: Web-based interface for exploring network flows
- **Hubble Metrics**: Prometheus metrics for network observability
## Grafana Dashboards
Once Hubble is enabled and the monitoring hub is deployed, the following dashboards become available in Grafana under the `hubble` folder:
| Dashboard | Description |
|-----------|-------------|
| **Overview** | General Hubble metrics including processing statistics |
| **DNS Namespace** | DNS query and response metrics by namespace |
| **L7 HTTP Metrics** | HTTP layer 7 metrics by workload |
| **Network Overview** | Network flow overview by namespace |
### Accessing Dashboards
1. Navigate to Grafana via the monitoring hub
2. Browse to the `hubble` folder in the dashboard browser
3. Select a dashboard to view network observability data
## Metrics Available
Hubble exposes various metrics that can be queried in Grafana:
- `hubble_flows_processed_total`: Total number of flows processed
- `hubble_dns_queries_total`: DNS queries by type
- `hubble_dns_responses_total`: DNS responses by status
- `hubble_drop_total`: Dropped packets by reason
- `hubble_tcp_flags_total`: TCP connections by flag
- `hubble_http_requests_total`: HTTP requests by method and status
## Troubleshooting
### Verify Hubble Status
Check if Hubble is running:
```bash
kubectl get pods -n cozy-cilium -l k8s-app=hubble-relay
kubectl get pods -n cozy-cilium -l k8s-app=hubble-ui
```
### Check Metrics Endpoint
Verify Hubble metrics are being scraped:
```bash
kubectl port-forward -n cozy-cilium svc/hubble-metrics 9965:9965
curl http://localhost:9965/metrics
```
### Verify ServiceMonitor
Ensure the ServiceMonitor is created for Prometheus scraping:
```bash
kubectl get servicemonitor -n cozy-cilium
```

1
go.mod
View File

@@ -19,6 +19,7 @@ require (
github.com/spf13/cobra v1.9.1
github.com/vmware-tanzu/velero v1.17.1
go.uber.org/zap v1.27.0
gopkg.in/yaml.v2 v2.4.0
k8s.io/api v0.34.1
k8s.io/apiextensions-apiserver v0.34.1
k8s.io/apimachinery v0.34.2

2
go.sum
View File

@@ -297,6 +297,8 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -56,26 +56,6 @@ kubectl get hr -A --no-headers | awk '$4 != "True"' | \
kubectl describe hr -n $NAMESPACE $NAME > $DIR/describe.txt 2>&1
done
echo "Collecting packages..."
kubectl get packages -A > $REPORT_DIR/kubernetes/packages.txt 2>&1
kubectl get packages -A --no-headers | awk '$4 != "True"' | \
while read NAMESPACE NAME _; do
DIR=$REPORT_DIR/kubernetes/packages/$NAMESPACE/$NAME
mkdir -p $DIR
kubectl get package -n $NAMESPACE $NAME -o yaml > $DIR/package.yaml 2>&1
kubectl describe package -n $NAMESPACE $NAME > $DIR/describe.txt 2>&1
done
echo "Collecting packagesources..."
kubectl get packagesources -A > $REPORT_DIR/kubernetes/packagesources.txt 2>&1
kubectl get packagesources -A --no-headers | awk '$4 != "True"' | \
while read NAMESPACE NAME _; do
DIR=$REPORT_DIR/kubernetes/packagesources/$NAMESPACE/$NAME
mkdir -p $DIR
kubectl get packagesource -n $NAMESPACE $NAME -o yaml > $DIR/packagesource.yaml 2>&1
kubectl describe packagesource -n $NAMESPACE $NAME > $DIR/describe.txt 2>&1
done
echo "Collecting pods..."
kubectl get pod -A -o wide > $REPORT_DIR/kubernetes/pods.txt 2>&1
kubectl get pod -A --no-headers | awk '$4 !~ /Running|Succeeded|Completed/' |

View File

@@ -1,39 +0,0 @@
#!/usr/bin/env bats
@test "Create DB MongoDB" {
name='test'
kubectl apply -f - <<EOF
apiVersion: apps.cozystack.io/v1alpha1
kind: MongoDB
metadata:
name: $name
namespace: tenant-test
spec:
external: false
size: 10Gi
replicas: 1
storageClass: ""
resourcesPreset: "nano"
users:
testuser:
password: xai7Wepo
databases:
testdb:
roles:
admin:
- testuser
backup:
enabled: false
EOF
sleep 5
# Wait for HelmRelease
kubectl -n tenant-test wait hr mongodb-$name --timeout=60s --for=condition=ready
# Wait for MongoDB service (port 27017)
timeout 120 sh -ec "until kubectl -n tenant-test get svc mongodb-$name-rs0 -o jsonpath='{.spec.ports[0].port}' | grep -q '27017'; do sleep 10; done"
# Wait for endpoints
timeout 180 sh -ec "until kubectl -n tenant-test get endpoints mongodb-$name-rs0 -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
# Wait for StatefulSet replicas
kubectl -n tenant-test wait statefulset.apps/mongodb-$name-rs0 --timeout=300s --for=jsonpath='{.status.replicas}'=1
# Cleanup
kubectl -n tenant-test delete mongodbs.apps.cozystack.io $name
}

View File

@@ -1,52 +1,35 @@
#!/usr/bin/env bats
@test "Required installer assets exist" {
if [ ! -f _out/assets/cozystack-crds.yaml ]; then
echo "Missing: _out/assets/cozystack-crds.yaml" >&2
exit 1
fi
if [ ! -f _out/assets/cozystack-operator.yaml ]; then
echo "Missing: _out/assets/cozystack-operator.yaml" >&2
if [ ! -f _out/assets/cozystack-installer.yaml ]; then
echo "Missing: _out/assets/cozystack-installer.yaml" >&2
exit 1
fi
}
@test "Install Cozystack" {
# Create namespace
# Create namespace & configmap required by installer
kubectl create namespace cozy-system --dry-run=client -o yaml | kubectl apply -f -
kubectl create configmap cozystack -n cozy-system \
--from-literal=bundle-name=paas-full \
--from-literal=ipv4-pod-cidr=10.244.0.0/16 \
--from-literal=ipv4-pod-gateway=10.244.0.1 \
--from-literal=ipv4-svc-cidr=10.96.0.0/16 \
--from-literal=ipv4-join-cidr=100.64.0.0/16 \
--from-literal=root-host=example.org \
--from-literal=api-server-endpoint=https://192.168.123.10:6443 \
--dry-run=client -o yaml | kubectl apply -f -
# Apply installer manifests (CRDs + operator)
kubectl apply -f _out/assets/cozystack-crds.yaml
kubectl apply -f _out/assets/cozystack-operator.yaml
# Apply installer manifests from file
kubectl apply -f _out/assets/cozystack-installer.yaml
# Wait for the operator deployment to become available
kubectl wait deployment/cozystack-operator -n cozy-system --timeout=1m --for=condition=Available
# Create platform Package with isp-full variant
kubectl apply -f - <<EOF
apiVersion: cozystack.io/v1alpha1
kind: Package
metadata:
name: cozystack.cozystack-platform
spec:
variant: isp-full
components:
platform:
values:
networking:
podCIDR: "10.244.0.0/16"
podGateway: "10.244.0.1"
serviceCIDR: "10.96.0.0/16"
joinCIDR: "100.64.0.0/16"
publishing:
host: "example.org"
apiServerEndpoint: "https://192.168.123.10:6443"
EOF
# Wait for the installer deployment to become available
kubectl wait deployment/cozystack -n cozy-system --timeout=1m --for=condition=Available
# Wait until HelmReleases appear & reconcile them
timeout 180 sh -ec 'until [ $(kubectl get hr -A --no-headers 2>/dev/null | wc -l) -gt 10 ]; do sleep 1; done'
timeout 60 sh -ec 'until kubectl get hr -A -l cozystack.io/system-app=true | grep -q cozys; do sleep 1; done'
sleep 5
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n "$1" hr/"$2" &"} END {print "wait"}' | sh -ex
kubectl get hr -A -l cozystack.io/system-app=true | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n "$1" hr/"$2" &"} END {print "wait"}' | sh -ex
# Fail the test if any HelmRelease is not Ready
if kubectl get hr -A | grep -v " True " | grep -v NAME; then
@@ -159,7 +142,7 @@ EOF
# Expose Cozystack services through ingress
kubectl patch package cozystack.cozystack-platform --type merge -p '{"spec":{"components":{"platform":{"values":{"publishing":{"exposedServices":["api","dashboard","cdi-uploadproxy","vm-exportproxy","keycloak"]}}}}}}'
kubectl patch configmap/cozystack -n cozy-system --type merge -p '{"data":{"expose-services":"api,dashboard,cdi-uploadproxy,vm-exportproxy,keycloak"}}'
# NGINX ingress controller
timeout 60 sh -ec 'until kubectl get deploy root-ingress-controller -n tenant-root >/dev/null 2>&1; do sleep 1; done'
@@ -186,7 +169,7 @@ EOF
}
@test "Keycloak OIDC stack is healthy" {
kubectl patch package cozystack.cozystack-platform --type merge -p '{"spec":{"components":{"platform":{"values":{"authentication":{"oidc":{"enabled":true}}}}}}}'
kubectl patch configmap/cozystack -n cozy-system --type merge -p '{"data":{"oidc-enabled":"true"}}'
timeout 120 sh -ec 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator >/dev/null 2>&1; do sleep 1; done'
kubectl wait hr/keycloak hr/keycloak-configure hr/keycloak-operator -n cozy-keycloak --timeout=10m --for=condition=ready

View File

@@ -136,28 +136,25 @@ machine:
mirrors:
docker.io:
endpoints:
- https://mirror.gcr.io
#docker.io:
# endpoints:
# - https://dockerio.nexus.aenix.org
#cr.fluentbit.io:
# endpoints:
# - https://fluentbit.nexus.aenix.org
#docker-registry3.mariadb.com:
# endpoints:
# - https://mariadb.nexus.aenix.org
#gcr.io:
# endpoints:
# - https://gcr.nexus.aenix.org
#ghcr.io:
# endpoints:
# - https://ghcr.nexus.aenix.org
#quay.io:
# endpoints:
# - https://quay.nexus.aenix.org
#registry.k8s.io:
# endpoints:
# - https://k8s.nexus.aenix.org
- https://dockerio.nexus.aenix.org
cr.fluentbit.io:
endpoints:
- https://fluentbit.nexus.aenix.org
docker-registry3.mariadb.com:
endpoints:
- https://mariadb.nexus.aenix.org
gcr.io:
endpoints:
- https://gcr.nexus.aenix.org
ghcr.io:
endpoints:
- https://ghcr.nexus.aenix.org
quay.io:
endpoints:
- https://quay.nexus.aenix.org
registry.k8s.io:
endpoints:
- https://k8s.nexus.aenix.org
files:
- content: |
[plugins]
@@ -239,10 +236,7 @@ EOF
timeout 10 sh -ec 'until talosctl bootstrap -n 192.168.123.11 -e 192.168.123.11; do sleep 1; done'
# Wait until etcd is healthy
if ! timeout 180 sh -ec 'until talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 >/dev/null 2>&1; do sleep 1; done'; then
talosctl dmesg -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 || true
exit 1
fi
timeout 180 sh -ec 'until talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 >/dev/null 2>&1; do sleep 1; done'
timeout 60 sh -ec 'while talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1 | grep -q "rpc error"; do sleep 1; done'
# Retrieve kubeconfig

View File

@@ -1,179 +0,0 @@
#!/bin/bash
# Migration script from Cozystack ConfigMaps to Package-based configuration
# This script converts cozystack, cozystack-branding, and cozystack-scheduling
# ConfigMaps into a Package resource with the new values structure.
set -e
NAMESPACE="cozy-system"
echo "============================="
echo " Cozystack Migration to v1.0 "
echo "============================="
echo ""
echo "This script will convert existing ConfigMaps to a Package resource."
echo ""
# Check if kubectl is available
if ! command -v kubectl &> /dev/null; then
echo "Error: kubectl is not installed or not in PATH"
exit 1
fi
# Check if jq is available
if ! command -v jq &> /dev/null; then
echo "Error: jq is not installed or not in PATH"
exit 1
fi
# Check if we can access the cluster
if ! kubectl get namespace "$NAMESPACE" &> /dev/null; then
echo "Error: Cannot access namespace $NAMESPACE"
exit 1
fi
# Read ConfigMap cozystack
echo "Reading ConfigMap cozystack..."
COZYSTACK_CM=$(kubectl get configmap -n "$NAMESPACE" cozystack -o json 2>/dev/null || echo "{}")
# Read ConfigMap cozystack-branding
echo "Reading ConfigMap cozystack-branding..."
BRANDING_CM=$(kubectl get configmap -n "$NAMESPACE" cozystack-branding -o json 2>/dev/null || echo "{}")
# Read ConfigMap cozystack-scheduling
echo "Reading ConfigMap cozystack-scheduling..."
SCHEDULING_CM=$(kubectl get configmap -n "$NAMESPACE" cozystack-scheduling -o json 2>/dev/null || echo "{}")
# Extract values from cozystack ConfigMap
CLUSTER_DOMAIN=$(echo "$COZYSTACK_CM" | jq -r '.data["cluster-domain"] // "cozy.local"')
ROOT_HOST=$(echo "$COZYSTACK_CM" | jq -r '.data["root-host"] // "example.org"')
API_SERVER_ENDPOINT=$(echo "$COZYSTACK_CM" | jq -r '.data["api-server-endpoint"] // ""')
OIDC_ENABLED=$(echo "$COZYSTACK_CM" | jq -r '.data["oidc-enabled"] // "false"')
KEYCLOAK_REDIRECTS=$(echo "$COZYSTACK_CM" | jq -r '.data["extra-keycloak-redirect-uri-for-dashboard"] // ""' )
TELEMETRY_ENABLED=$(echo "$COZYSTACK_CM" | jq -r '.data["telemetry-enabled"] // "true"')
BUNDLE_NAME=$(echo "$COZYSTACK_CM" | jq -r '.data["bundle-name"] // "paas-full"')
# Network configuration
POD_CIDR=$(echo "$COZYSTACK_CM" | jq -r '.data["ipv4-pod-cidr"] // "10.244.0.0/16"')
POD_GATEWAY=$(echo "$COZYSTACK_CM" | jq -r '.data["ipv4-pod-gateway"] // "10.244.0.1"')
SVC_CIDR=$(echo "$COZYSTACK_CM" | jq -r '.data["ipv4-svc-cidr"] // "10.96.0.0/16"')
JOIN_CIDR=$(echo "$COZYSTACK_CM" | jq -r '.data["ipv4-join-cidr"] // "100.64.0.0/16"')
EXTERNAL_IPS=$(echo "$COZYSTACK_CM" | jq -r '.data["expose-external-ips"] // ""')
if [ -z "$EXTERNAL_IPS" ]; then
EXTERNAL_IPS="[]"
else
EXTERNAL_IPS=$(echo "$EXTERNAL_IPS" | sed 's/,/\n/g' | awk 'BEGIN{print}{print " - "$0}')
fi
# Determine bundle type
case "$BUNDLE_NAME" in
paas-full|distro-full)
SYSTEM_ENABLED="true"
SYSTEM_TYPE="full"
;;
paas-hosted|distro-hosted)
SYSTEM_ENABLED="false"
SYSTEM_TYPE="hosted"
;;
*)
SYSTEM_ENABLED="false"
SYSTEM_TYPE="hosted"
;;
esac
# Update bundle naming
BUNDLE_NAME=$(echo "$BUNDLE_NAME" | sed 's/paas/isp/')
# Extract branding if available
BRANDING=$(echo "$BRANDING_CM" | jq -r '.data // {} | to_entries[] | "\(.key): \"\(.value)\""')
if [ -z "$BRANDING" ]; then
BRANDING="{}"
else
BRANDING=$(echo "$BRANDING" | awk 'BEGIN{print}{print " " $0}')
fi
# Extract scheduling if available
SCHEDULING_CONSTRAINTS=$(echo "$SCHEDULING_CM" | jq -r '.data["globalAppTopologySpreadConstraints"] // ""')
if [ -z "$SCHEDULING_CONSTRAINTS" ]; then
SCHEDULING_CONSTRAINTS='""'
else
SCHEDULING_CONSTRAINTS=$(echo "$SCHEDULING_CONSTRAINTS" | awk 'BEGIN{print}{print " " $0}')
fi
echo ""
echo "Extracted configuration:"
echo " Cluster Domain: $CLUSTER_DOMAIN"
echo " Root Host: $ROOT_HOST"
echo " API Server Endpoint: $API_SERVER_ENDPOINT"
echo " OIDC Enabled: $OIDC_ENABLED"
echo " Bundle Name: $BUNDLE_NAME"
echo " System Enabled: $SYSTEM_ENABLED"
echo " System Type: $SYSTEM_TYPE"
echo ""
# Generate Package YAML
PACKAGE_YAML=$(cat <<EOF
apiVersion: cozystack.io/v1alpha1
kind: Package
metadata:
name: cozystack.cozystack-platform
namespace: $NAMESPACE
spec:
variant: $BUNDLE_NAME
components:
platform:
values:
bundles:
system:
enabled: $SYSTEM_ENABLED
type: "$SYSTEM_TYPE"
iaas:
enabled: true
paas:
enabled: true
naas:
enabled: true
networking:
clusterDomain: "$CLUSTER_DOMAIN"
podCIDR: "$POD_CIDR"
podGateway: "$POD_GATEWAY"
serviceCIDR: "$SVC_CIDR"
joinCIDR: "$JOIN_CIDR"
publishing:
host: "$ROOT_HOST"
apiServerEndpoint: "$API_SERVER_ENDPOINT"
externalIPs: $EXTERNAL_IPS
authentication:
oidc:
enabled: $OIDC_ENABLED
keycloakExtraRedirectUri: "$KEYCLOAK_REDIRECTS"
scheduling:
globalAppTopologySpreadConstraints: $SCHEDULING_CONSTRAINTS
branding: $BRANDING
EOF
)
echo "Generated Package resource:"
echo "---"
echo "$PACKAGE_YAML"
echo "..."
echo ""
read -p "Do you want to apply this Package? (y/N) " -n 1 -r
echo ""
if [[ $REPLY =~ ^[Yy]$ ]]; then
echo "Applying Package..."
echo "$PACKAGE_YAML" | kubectl apply -f -
echo ""
echo "Package applied successfully!"
echo ""
echo "You can now safely delete the old ConfigMaps after verifying the migration:"
echo " kubectl delete configmap -n $NAMESPACE cozystack cozystack-branding cozystack-scheduling"
else
echo "Package not applied. You can save the output above and apply it manually."
fi
echo ""
echo "All done!"

View File

@@ -26,7 +26,7 @@ CONTROLLER_GEN="go run sigs.k8s.io/controller-tools/cmd/controller-gen@v0.16.4"
TMPDIR=$(mktemp -d)
OPERATOR_CRDDIR=packages/core/installer/definitions
COZY_CONTROLLER_CRDDIR=packages/system/cozystack-controller/definitions
COZY_RD_CRDDIR=packages/system/application-definition-crd/definition
COZY_RD_CRDDIR=packages/system/cozystack-resource-definition-crd/definition
BACKUPS_CORE_CRDDIR=packages/system/backup-controller/definitions
BACKUPSTRATEGY_CRDDIR=packages/system/backupstrategy-controller/definitions
@@ -66,8 +66,8 @@ $CONTROLLER_GEN rbac:roleName=manager-role crd paths="./api/..." output:crd:arti
mv ${TMPDIR}/cozystack.io_packages.yaml ${OPERATOR_CRDDIR}/cozystack.io_packages.yaml
mv ${TMPDIR}/cozystack.io_packagesources.yaml ${OPERATOR_CRDDIR}/cozystack.io_packagesources.yaml
mv ${TMPDIR}/cozystack.io_applicationdefinitions.yaml \
${COZY_RD_CRDDIR}/cozystack.io_applicationdefinitions.yaml
mv ${TMPDIR}/cozystack.io_cozystackresourcedefinitions.yaml \
${COZY_RD_CRDDIR}/cozystack.io_cozystackresourcedefinitions.yaml
mv ${TMPDIR}/backups.cozystack.io*.yaml ${BACKUPS_CORE_CRDDIR}/
mv ${TMPDIR}/strategy.backups.cozystack.io*.yaml ${BACKUPSTRATEGY_CRDDIR}/

View File

@@ -65,21 +65,12 @@ case "$PWD" in
*"/extra/"*) SOURCE_NAME="cozystack-extra" ;;
esac
# Determine variant from PackageSource file
# Look for packages/core/platform/sources/${NAME}-application.yaml
PACKAGE_SOURCE_FILE="../../core/platform/sources/${NAME}-application.yaml"
if [[ -f "$PACKAGE_SOURCE_FILE" ]]; then
VARIANT="$(yq -r '.spec.variants[0].name // "default"' "$PACKAGE_SOURCE_FILE")"
else
VARIANT="default"
fi
# If file doesn't exist, create a minimal skeleton
OUT="${OUT:-$CRD_DIR/$NAME.yaml}"
if [[ ! -f "$OUT" ]]; then
cat >"$OUT" <<EOF
apiVersion: cozystack.io/v1alpha1
kind: ApplicationDefinition
kind: CozystackResourceDefinition
metadata:
name: ${NAME}
spec: {}
@@ -95,7 +86,6 @@ fi
export DESCRIPTION="$DESC"
export ICON_B64="$ICON_B64"
export SOURCE_NAME="$SOURCE_NAME"
export VARIANT="$VARIANT"
export SCHEMA_JSON_MIN="$(jq -c . "$SCHEMA_JSON")"
# Generate keysOrder from values.yaml
@@ -127,20 +117,21 @@ export KEYS_ORDER="$(
# Update only necessary fields in-place
# - openAPISchema is loaded from file as a multi-line string (block scalar)
# - prefix = "<name>-" or "" for extra
# - chartRef points to ExternalArtifact created by Package controller
# - labels ensure cozystack.io/ui: "true"
# - prefix = "<name>-"
# - sourceRef derived from directory (apps|extra)
yq -i '
.apiVersion = (.apiVersion // "cozystack.io/v1alpha1") |
.kind = (.kind // "ApplicationDefinition") |
.kind = (.kind // "CozystackResourceDefinition") |
.metadata.name = strenv(RES_NAME) |
.spec.application.openAPISchema = strenv(SCHEMA_JSON_MIN) |
(.spec.application.openAPISchema style="literal") |
.spec.release.prefix = (strenv(PREFIX)) |
del(.spec.release.labels."cozystack.io/application") |
del(.spec.release.labels."cozystack.io/ui") |
.spec.release.chartRef.kind = "ExternalArtifact" |
.spec.release.chartRef.name = ("cozystack-" + strenv(RES_NAME) + "-application-" + strenv(VARIANT) + "-" + strenv(RES_NAME)) |
.spec.release.chartRef.namespace = "cozy-system" |
.spec.release.labels."cozystack.io/ui" = "true" |
.spec.release.chart.name = strenv(RES_NAME) |
.spec.release.chart.sourceRef.kind = "HelmRepository" |
.spec.release.chart.sourceRef.name = strenv(SOURCE_NAME) |
.spec.release.chart.sourceRef.namespace = "cozy-public" |
.spec.dashboard.description = strenv(DESCRIPTION) |
.spec.dashboard.icon = strenv(ICON_B64) |
.spec.dashboard.keysOrder = env(KEYS_ORDER)

View File

@@ -3,14 +3,9 @@ set -xe
version=${VERSION:-$(git describe --tags)}
gh release upload --clobber $version _out/assets/cozystack-crds.yaml
gh release upload --clobber $version _out/assets/cozystack-operator.yaml
gh release upload --clobber $version _out/assets/cozystack-operator-generic.yaml
gh release upload --clobber $version _out/assets/cozystack-operator-hosted.yaml
gh release upload --clobber $version _out/assets/cozystack-installer.yaml
gh release upload --clobber $version _out/assets/metal-amd64.iso
gh release upload --clobber $version _out/assets/metal-amd64.raw.xz
gh release upload --clobber $version _out/assets/nocloud-amd64.raw.xz
gh release upload --clobber $version _out/assets/kernel-amd64
gh release upload --clobber $version _out/assets/initramfs-metal-amd64.xz
gh release upload --clobber $version _out/assets/cozypkg-*.tar.gz
gh release upload --clobber $version _out/assets/cozypkg-checksums.txt

View File

@@ -1,75 +0,0 @@
package backupcontroller
import (
"context"
"fmt"
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
backupsv1alpha1 "github.com/cozystack/cozystack/api/backups/v1alpha1"
)
const (
// DefaultApplicationAPIGroup is the default API group for applications
// when not specified in ApplicationRef or ApplicationSelector.
// Deprecated: Use backupsv1alpha1.DefaultApplicationAPIGroup instead.
DefaultApplicationAPIGroup = backupsv1alpha1.DefaultApplicationAPIGroup
)
// NormalizeApplicationRef sets the default apiGroup to DefaultApplicationAPIGroup if it's not specified.
// Deprecated: Use backupsv1alpha1.NormalizeApplicationRef instead.
func NormalizeApplicationRef(ref corev1.TypedLocalObjectReference) corev1.TypedLocalObjectReference {
return backupsv1alpha1.NormalizeApplicationRef(ref)
}
// ResolvedBackupConfig contains the resolved strategy and storage configuration
// from a BackupClass.
type ResolvedBackupConfig struct {
StrategyRef corev1.TypedLocalObjectReference
Parameters map[string]string
}
// ResolveBackupClass resolves a BackupClass and finds the matching strategy for the given application.
// It normalizes the applicationRef's apiGroup (defaults to apps.cozystack.io if not specified)
// and matches it against the strategies in the BackupClass.
func ResolveBackupClass(
ctx context.Context,
c client.Client,
backupClassName string,
applicationRef corev1.TypedLocalObjectReference,
) (*ResolvedBackupConfig, error) {
// Normalize applicationRef (default apiGroup if not specified)
applicationRef = NormalizeApplicationRef(applicationRef)
// Get BackupClass
backupClass := &backupsv1alpha1.BackupClass{}
if err := c.Get(ctx, client.ObjectKey{Name: backupClassName}, backupClass); err != nil {
return nil, fmt.Errorf("failed to get BackupClass %s: %w", backupClassName, err)
}
// Determine application API group (already normalized, but extract for matching)
appAPIGroup := backupsv1alpha1.DefaultApplicationAPIGroup
if applicationRef.APIGroup != nil {
appAPIGroup = *applicationRef.APIGroup
}
// Find matching strategy
for _, strategy := range backupClass.Spec.Strategies {
// Normalize strategy's application selector (default apiGroup if not specified)
strategyAPIGroup := backupsv1alpha1.DefaultApplicationAPIGroup
if strategy.Application.APIGroup != nil && *strategy.Application.APIGroup != "" {
strategyAPIGroup = *strategy.Application.APIGroup
}
if strategyAPIGroup == appAPIGroup && strategy.Application.Kind == applicationRef.Kind {
return &ResolvedBackupConfig{
StrategyRef: strategy.StrategyRef,
Parameters: strategy.Parameters,
}, nil
}
}
return nil, fmt.Errorf("no matching strategy found in BackupClass %s for application %s/%s",
backupClassName, appAPIGroup, applicationRef.Kind)
}

View File

@@ -1,375 +0,0 @@
package backupcontroller
import (
"context"
"testing"
corev1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
strategyv1alpha1 "github.com/cozystack/cozystack/api/backups/strategy/v1alpha1"
backupsv1alpha1 "github.com/cozystack/cozystack/api/backups/v1alpha1"
)
func TestNormalizeApplicationRef(t *testing.T) {
tests := []struct {
name string
input corev1.TypedLocalObjectReference
expected corev1.TypedLocalObjectReference
}{
{
name: "apiGroup not specified - should default to apps.cozystack.io",
input: corev1.TypedLocalObjectReference{
Kind: "VirtualMachine",
Name: "vm1",
},
expected: corev1.TypedLocalObjectReference{
APIGroup: stringPtr(DefaultApplicationAPIGroup),
Kind: "VirtualMachine",
Name: "vm1",
},
},
{
name: "apiGroup is nil - should default to apps.cozystack.io",
input: corev1.TypedLocalObjectReference{
APIGroup: nil,
Kind: "VirtualMachine",
Name: "vm1",
},
expected: corev1.TypedLocalObjectReference{
APIGroup: stringPtr(DefaultApplicationAPIGroup),
Kind: "VirtualMachine",
Name: "vm1",
},
},
{
name: "apiGroup is empty string - should default to apps.cozystack.io",
input: corev1.TypedLocalObjectReference{
APIGroup: stringPtr(""),
Kind: "VirtualMachine",
Name: "vm1",
},
expected: corev1.TypedLocalObjectReference{
APIGroup: stringPtr(DefaultApplicationAPIGroup),
Kind: "VirtualMachine",
Name: "vm1",
},
},
{
name: "apiGroup is explicitly set - should keep it",
input: corev1.TypedLocalObjectReference{
APIGroup: stringPtr("custom.api.group.io"),
Kind: "CustomApp",
Name: "custom-app",
},
expected: corev1.TypedLocalObjectReference{
APIGroup: stringPtr("custom.api.group.io"),
Kind: "CustomApp",
Name: "custom-app",
},
},
{
name: "apiGroup is apps.cozystack.io - should keep it",
input: corev1.TypedLocalObjectReference{
APIGroup: stringPtr(DefaultApplicationAPIGroup),
Kind: "VirtualMachine",
Name: "vm1",
},
expected: corev1.TypedLocalObjectReference{
APIGroup: stringPtr(DefaultApplicationAPIGroup),
Kind: "VirtualMachine",
Name: "vm1",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := NormalizeApplicationRef(tt.input)
if !apiequality.Semantic.DeepEqual(result, tt.expected) {
t.Errorf("NormalizeApplicationRef() = %v, want %v", result, tt.expected)
}
})
}
}
func TestResolveBackupClass(t *testing.T) {
scheme := runtime.NewScheme()
err := backupsv1alpha1.AddToScheme(scheme)
if err != nil {
t.Fatalf("Failed to add backupsv1alpha1 to scheme: %v", err)
}
err = strategyv1alpha1.AddToScheme(scheme)
if err != nil {
t.Fatalf("Failed to add strategyv1alpha1 to scheme: %v", err)
}
tests := []struct {
name string
backupClass *backupsv1alpha1.BackupClass
applicationRef corev1.TypedLocalObjectReference
backupClassName string
wantErr bool
expectedStrategyRef *corev1.TypedLocalObjectReference
expectedParams map[string]string
}{
{
name: "successful resolution - matches VirtualMachine strategy",
backupClass: &backupsv1alpha1.BackupClass{
ObjectMeta: metav1.ObjectMeta{
Name: "velero",
},
Spec: backupsv1alpha1.BackupClassSpec{
Strategies: []backupsv1alpha1.BackupClassStrategy{
{
StrategyRef: corev1.TypedLocalObjectReference{
APIGroup: stringPtr("strategy.backups.cozystack.io"),
Kind: "Velero",
Name: "velero-strategy-vm",
},
Application: backupsv1alpha1.ApplicationSelector{
Kind: "VirtualMachine",
},
Parameters: map[string]string{
"backupStorageLocationName": "default",
},
},
{
StrategyRef: corev1.TypedLocalObjectReference{
APIGroup: stringPtr("strategy.backups.cozystack.io"),
Kind: "Velero",
Name: "velero-strategy-mysql",
},
Application: backupsv1alpha1.ApplicationSelector{
Kind: "MySQL",
},
Parameters: map[string]string{
"backupStorageLocationName": "mysql-storage",
},
},
},
},
},
applicationRef: corev1.TypedLocalObjectReference{
Kind: "VirtualMachine",
Name: "vm1",
},
backupClassName: "velero",
wantErr: false,
expectedStrategyRef: &corev1.TypedLocalObjectReference{
APIGroup: stringPtr("strategy.backups.cozystack.io"),
Kind: "Velero",
Name: "velero-strategy-vm",
},
expectedParams: map[string]string{
"backupStorageLocationName": "default",
},
},
{
name: "successful resolution - matches MySQL strategy with explicit apiGroup",
backupClass: &backupsv1alpha1.BackupClass{
ObjectMeta: metav1.ObjectMeta{
Name: "velero",
},
Spec: backupsv1alpha1.BackupClassSpec{
Strategies: []backupsv1alpha1.BackupClassStrategy{
{
StrategyRef: corev1.TypedLocalObjectReference{
APIGroup: stringPtr("strategy.backups.cozystack.io"),
Kind: "Velero",
Name: "velero-strategy-mysql",
},
Application: backupsv1alpha1.ApplicationSelector{
APIGroup: stringPtr("apps.cozystack.io"),
Kind: "MySQL",
},
Parameters: map[string]string{
"backupStorageLocationName": "mysql-storage",
},
},
},
},
},
applicationRef: corev1.TypedLocalObjectReference{
APIGroup: stringPtr("apps.cozystack.io"),
Kind: "MySQL",
Name: "mysql1",
},
backupClassName: "velero",
wantErr: false,
expectedStrategyRef: &corev1.TypedLocalObjectReference{
APIGroup: stringPtr("strategy.backups.cozystack.io"),
Kind: "Velero",
Name: "velero-strategy-mysql",
},
expectedParams: map[string]string{
"backupStorageLocationName": "mysql-storage",
},
},
{
name: "successful resolution - applicationRef without apiGroup defaults correctly",
backupClass: &backupsv1alpha1.BackupClass{
ObjectMeta: metav1.ObjectMeta{
Name: "velero",
},
Spec: backupsv1alpha1.BackupClassSpec{
Strategies: []backupsv1alpha1.BackupClassStrategy{
{
StrategyRef: corev1.TypedLocalObjectReference{
APIGroup: stringPtr("strategy.backups.cozystack.io"),
Kind: "Velero",
Name: "velero-strategy-vm",
},
Application: backupsv1alpha1.ApplicationSelector{
Kind: "VirtualMachine",
},
Parameters: map[string]string{
"backupStorageLocationName": "default",
},
},
},
},
},
applicationRef: corev1.TypedLocalObjectReference{
// No APIGroup specified
Kind: "VirtualMachine",
Name: "vm1",
},
backupClassName: "velero",
wantErr: false,
expectedStrategyRef: &corev1.TypedLocalObjectReference{
APIGroup: stringPtr("strategy.backups.cozystack.io"),
Kind: "Velero",
Name: "velero-strategy-vm",
},
expectedParams: map[string]string{
"backupStorageLocationName": "default",
},
},
{
name: "error - BackupClass not found",
backupClass: &backupsv1alpha1.BackupClass{
ObjectMeta: metav1.ObjectMeta{
Name: "velero",
},
Spec: backupsv1alpha1.BackupClassSpec{
Strategies: []backupsv1alpha1.BackupClassStrategy{},
},
},
applicationRef: corev1.TypedLocalObjectReference{
Kind: "VirtualMachine",
Name: "vm1",
},
backupClassName: "nonexistent",
wantErr: true,
},
{
name: "error - no matching strategy found",
backupClass: &backupsv1alpha1.BackupClass{
ObjectMeta: metav1.ObjectMeta{
Name: "velero",
},
Spec: backupsv1alpha1.BackupClassSpec{
Strategies: []backupsv1alpha1.BackupClassStrategy{
{
StrategyRef: corev1.TypedLocalObjectReference{
APIGroup: stringPtr("strategy.backups.cozystack.io"),
Kind: "Velero",
Name: "velero-strategy-vm",
},
Application: backupsv1alpha1.ApplicationSelector{
Kind: "VirtualMachine",
},
},
},
},
},
applicationRef: corev1.TypedLocalObjectReference{
Kind: "PostgreSQL", // Not in BackupClass
Name: "pg1",
},
backupClassName: "velero",
wantErr: true,
},
{
name: "error - apiGroup mismatch",
backupClass: &backupsv1alpha1.BackupClass{
ObjectMeta: metav1.ObjectMeta{
Name: "velero",
},
Spec: backupsv1alpha1.BackupClassSpec{
Strategies: []backupsv1alpha1.BackupClassStrategy{
{
StrategyRef: corev1.TypedLocalObjectReference{
APIGroup: stringPtr("strategy.backups.cozystack.io"),
Kind: "Velero",
Name: "velero-strategy-vm",
},
Application: backupsv1alpha1.ApplicationSelector{
APIGroup: stringPtr("custom.api.group.io"),
Kind: "VirtualMachine",
},
},
},
},
},
applicationRef: corev1.TypedLocalObjectReference{
APIGroup: stringPtr("apps.cozystack.io"), // Different apiGroup
Kind: "VirtualMachine",
Name: "vm1",
},
backupClassName: "velero",
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctx := context.Background()
fakeClient := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(tt.backupClass).
Build()
resolved, err := ResolveBackupClass(ctx, fakeClient, tt.backupClassName, tt.applicationRef)
if tt.wantErr {
if err == nil {
t.Errorf("ResolveBackupClass() expected error but got none")
}
return
}
if err != nil {
t.Errorf("ResolveBackupClass() error = %v, wantErr %v", err, tt.wantErr)
return
}
if resolved == nil {
t.Errorf("ResolveBackupClass() returned nil, expected ResolvedBackupConfig")
return
}
// Verify strategy ref using apimachinery equality
if tt.expectedStrategyRef != nil {
if !apiequality.Semantic.DeepEqual(resolved.StrategyRef, *tt.expectedStrategyRef) {
t.Errorf("ResolveBackupClass() StrategyRef = %v, want %v", resolved.StrategyRef, *tt.expectedStrategyRef)
}
}
// Verify parameters using apimachinery equality
if tt.expectedParams != nil {
if !apiequality.Semantic.DeepEqual(resolved.Parameters, tt.expectedParams) {
t.Errorf("ResolveBackupClass() Parameters = %v, want %v", resolved.Parameters, tt.expectedParams)
}
}
})
}
}
func stringPtr(s string) *string {
return &s
}

View File

@@ -45,42 +45,29 @@ func (r *BackupJobReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
return ctrl.Result{}, err
}
// Normalize ApplicationRef (default apiGroup if not specified)
normalizedAppRef := NormalizeApplicationRef(j.Spec.ApplicationRef)
// Resolve BackupClass
resolved, err := ResolveBackupClass(ctx, r.Client, j.Spec.BackupClassName, normalizedAppRef)
if err != nil {
logger.Error(err, "failed to resolve BackupClass", "backupClassName", j.Spec.BackupClassName)
return ctrl.Result{}, err
}
strategyRef := resolved.StrategyRef
// Validate strategyRef
if strategyRef.APIGroup == nil {
logger.V(1).Info("BackupJob resolved StrategyRef has nil APIGroup, skipping", "backupjob", j.Name)
if j.Spec.StrategyRef.APIGroup == nil {
logger.V(1).Info("BackupJob has nil StrategyRef.APIGroup, skipping", "backupjob", j.Name)
return ctrl.Result{}, nil
}
if *strategyRef.APIGroup != strategyv1alpha1.GroupVersion.Group {
logger.V(1).Info("BackupJob resolved StrategyRef.APIGroup doesn't match, skipping",
if *j.Spec.StrategyRef.APIGroup != strategyv1alpha1.GroupVersion.Group {
logger.V(1).Info("BackupJob StrategyRef.APIGroup doesn't match, skipping",
"backupjob", j.Name,
"expected", strategyv1alpha1.GroupVersion.Group,
"got", *strategyRef.APIGroup)
"got", *j.Spec.StrategyRef.APIGroup)
return ctrl.Result{}, nil
}
logger.Info("processing BackupJob", "backupjob", j.Name, "strategyKind", strategyRef.Kind, "backupClassName", j.Spec.BackupClassName)
switch strategyRef.Kind {
logger.Info("processing BackupJob", "backupjob", j.Name, "strategyKind", j.Spec.StrategyRef.Kind)
switch j.Spec.StrategyRef.Kind {
case strategyv1alpha1.JobStrategyKind:
return r.reconcileJob(ctx, j, resolved)
return r.reconcileJob(ctx, j)
case strategyv1alpha1.VeleroStrategyKind:
return r.reconcileVelero(ctx, j, resolved)
return r.reconcileVelero(ctx, j)
default:
logger.V(1).Info("BackupJob resolved StrategyRef.Kind not supported, skipping",
logger.V(1).Info("BackupJob StrategyRef.Kind not supported, skipping",
"backupjob", j.Name,
"kind", strategyRef.Kind,
"kind", j.Spec.StrategyRef.Kind,
"supported", []string{strategyv1alpha1.JobStrategyKind, strategyv1alpha1.VeleroStrategyKind})
return ctrl.Result{}, nil
}
@@ -88,17 +75,6 @@ func (r *BackupJobReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
// SetupWithManager registers our controller with the Manager and sets up watches.
func (r *BackupJobReconciler) SetupWithManager(mgr ctrl.Manager) error {
// index BackupJob by backupClassName for efficient lookups when BackupClass changes
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &backupsv1alpha1.BackupJob{}, "spec.backupClassName", func(obj client.Object) []string {
job := obj.(*backupsv1alpha1.BackupJob)
if job.Spec.BackupClassName == "" {
return []string{}
}
return []string{job.Spec.BackupClassName}
}); err != nil {
return err
}
cfg := mgr.GetConfig()
var err error
if r.Interface, err = dynamic.NewForConfig(cfg); err != nil {

View File

@@ -10,9 +10,6 @@ import (
)
func BackupJob(p *backupsv1alpha1.Plan, scheduledFor time.Time) *backupsv1alpha1.BackupJob {
// Normalize ApplicationRef (default apiGroup if not specified)
appRef := backupsv1alpha1.NormalizeApplicationRef(*p.Spec.ApplicationRef.DeepCopy())
job := &backupsv1alpha1.BackupJob{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-%d", p.Name, scheduledFor.Unix()/60),
@@ -22,8 +19,9 @@ func BackupJob(p *backupsv1alpha1.Plan, scheduledFor time.Time) *backupsv1alpha1
PlanRef: &corev1.LocalObjectReference{
Name: p.Name,
},
ApplicationRef: appRef,
BackupClassName: p.Spec.BackupClassName,
ApplicationRef: *p.Spec.ApplicationRef.DeepCopy(),
StorageRef: *p.Spec.StorageRef.DeepCopy(),
StrategyRef: *p.Spec.StrategyRef.DeepCopy(),
},
}
return job

View File

@@ -1,167 +0,0 @@
package factory
import (
"testing"
"time"
backupsv1alpha1 "github.com/cozystack/cozystack/api/backups/v1alpha1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestBackupJob(t *testing.T) {
tests := []struct {
name string
plan *backupsv1alpha1.Plan
scheduled time.Time
validate func(*testing.T, *backupsv1alpha1.BackupJob)
}{
{
name: "creates BackupJob with BackupClassName",
plan: &backupsv1alpha1.Plan{
ObjectMeta: metav1.ObjectMeta{
Name: "test-plan",
Namespace: "default",
},
Spec: backupsv1alpha1.PlanSpec{
ApplicationRef: corev1.TypedLocalObjectReference{
Kind: "VirtualMachine",
Name: "vm1",
},
BackupClassName: "velero",
Schedule: backupsv1alpha1.PlanSchedule{
Type: backupsv1alpha1.PlanScheduleTypeCron,
Cron: "0 2 * * *",
},
},
},
scheduled: time.Date(2024, 1, 1, 2, 0, 0, 0, time.UTC),
validate: func(t *testing.T, job *backupsv1alpha1.BackupJob) {
if job.Name == "" {
t.Error("BackupJob name should be set")
}
if job.Namespace != "default" {
t.Errorf("BackupJob namespace = %v, want default", job.Namespace)
}
if job.Spec.BackupClassName != "velero" {
t.Errorf("BackupJob BackupClassName = %v, want velero", job.Spec.BackupClassName)
}
if job.Spec.ApplicationRef.Kind != "VirtualMachine" {
t.Errorf("BackupJob ApplicationRef.Kind = %v, want VirtualMachine", job.Spec.ApplicationRef.Kind)
}
if job.Spec.ApplicationRef.Name != "vm1" {
t.Errorf("BackupJob ApplicationRef.Name = %v, want vm1", job.Spec.ApplicationRef.Name)
}
if job.Spec.PlanRef == nil || job.Spec.PlanRef.Name != "test-plan" {
t.Errorf("BackupJob PlanRef = %v, want {Name: test-plan}", job.Spec.PlanRef)
}
},
},
{
name: "normalizes ApplicationRef apiGroup",
plan: &backupsv1alpha1.Plan{
ObjectMeta: metav1.ObjectMeta{
Name: "test-plan",
Namespace: "default",
},
Spec: backupsv1alpha1.PlanSpec{
ApplicationRef: corev1.TypedLocalObjectReference{
// No APIGroup specified
Kind: "MySQL",
Name: "mysql1",
},
BackupClassName: "velero",
Schedule: backupsv1alpha1.PlanSchedule{
Type: backupsv1alpha1.PlanScheduleTypeCron,
Cron: "0 2 * * *",
},
},
},
scheduled: time.Date(2024, 1, 1, 2, 0, 0, 0, time.UTC),
validate: func(t *testing.T, job *backupsv1alpha1.BackupJob) {
if job.Spec.ApplicationRef.APIGroup == nil {
t.Error("BackupJob ApplicationRef.APIGroup should be set (normalized)")
return
}
if *job.Spec.ApplicationRef.APIGroup != "apps.cozystack.io" {
t.Errorf("BackupJob ApplicationRef.APIGroup = %v, want apps.cozystack.io", *job.Spec.ApplicationRef.APIGroup)
}
},
},
{
name: "preserves explicit ApplicationRef apiGroup",
plan: &backupsv1alpha1.Plan{
ObjectMeta: metav1.ObjectMeta{
Name: "test-plan",
Namespace: "default",
},
Spec: backupsv1alpha1.PlanSpec{
ApplicationRef: corev1.TypedLocalObjectReference{
APIGroup: stringPtr("custom.api.group.io"),
Kind: "CustomApp",
Name: "custom1",
},
BackupClassName: "velero",
Schedule: backupsv1alpha1.PlanSchedule{
Type: backupsv1alpha1.PlanScheduleTypeCron,
Cron: "0 2 * * *",
},
},
},
scheduled: time.Date(2024, 1, 1, 2, 0, 0, 0, time.UTC),
validate: func(t *testing.T, job *backupsv1alpha1.BackupJob) {
if job.Spec.ApplicationRef.APIGroup == nil {
t.Error("BackupJob ApplicationRef.APIGroup should be preserved")
return
}
if *job.Spec.ApplicationRef.APIGroup != "custom.api.group.io" {
t.Errorf("BackupJob ApplicationRef.APIGroup = %v, want custom.api.group.io", *job.Spec.ApplicationRef.APIGroup)
}
},
},
{
name: "generates unique job name based on timestamp",
plan: &backupsv1alpha1.Plan{
ObjectMeta: metav1.ObjectMeta{
Name: "test-plan",
Namespace: "default",
},
Spec: backupsv1alpha1.PlanSpec{
ApplicationRef: corev1.TypedLocalObjectReference{
Kind: "VirtualMachine",
Name: "vm1",
},
BackupClassName: "velero",
Schedule: backupsv1alpha1.PlanSchedule{
Type: backupsv1alpha1.PlanScheduleTypeCron,
Cron: "0 2 * * *",
},
},
},
scheduled: time.Date(2024, 1, 1, 2, 0, 0, 0, time.UTC),
validate: func(t *testing.T, job *backupsv1alpha1.BackupJob) {
if job.Name == "" {
t.Error("BackupJob name should be generated")
}
// Name should start with plan name
if len(job.Name) < len("test-plan") {
t.Errorf("BackupJob name = %v, should start with test-plan", job.Name)
}
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
job := BackupJob(tt.plan, tt.scheduled)
if job == nil {
t.Fatal("BackupJob() returned nil")
}
tt.validate(t, job)
})
}
}
func stringPtr(s string) *string {
return &s
}

View File

@@ -9,8 +9,7 @@ import (
backupsv1alpha1 "github.com/cozystack/cozystack/api/backups/v1alpha1"
)
func (r *BackupJobReconciler) reconcileJob(ctx context.Context, j *backupsv1alpha1.BackupJob, resolved *ResolvedBackupConfig) (ctrl.Result, error) {
func (r *BackupJobReconciler) reconcileJob(ctx context.Context, j *backupsv1alpha1.BackupJob) (ctrl.Result, error) {
_ = log.FromContext(ctx)
_ = resolved // Use resolved BackupClass parameters when implementing your job strategy
return ctrl.Result{}, nil
}

View File

@@ -20,8 +20,8 @@ import (
)
const (
minRequeueDelay = 30 * time.Second
startingDeadline = 300 * time.Second
minRequeueDelay = 30 * time.Second
startingDeadlineSeconds = 300 * time.Second
)
// PlanReconciler reconciles a Plan object
@@ -45,7 +45,7 @@ func (r *PlanReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.
return ctrl.Result{}, err
}
tCheck := time.Now().Add(-startingDeadline)
tCheck := time.Now().Add(-startingDeadlineSeconds)
sch, err := cron.ParseStandard(p.Spec.Schedule.Cron)
if err != nil {
errWrapped := fmt.Errorf("could not parse cron %s: %w", p.Spec.Schedule.Cron, err)
@@ -78,7 +78,7 @@ func (r *PlanReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.
tNext := sch.Next(tCheck)
if time.Now().Before(tNext) {
return ctrl.Result{RequeueAfter: time.Until(tNext)}, nil
return ctrl.Result{RequeueAfter: tNext.Sub(time.Now())}, nil
}
job := factory.BackupJob(p, tNext)
@@ -88,12 +88,12 @@ func (r *PlanReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.
if err := r.Create(ctx, job); err != nil {
if apierrors.IsAlreadyExists(err) {
return ctrl.Result{RequeueAfter: startingDeadline}, nil
return ctrl.Result{RequeueAfter: startingDeadlineSeconds}, nil
}
return ctrl.Result{}, err
}
return ctrl.Result{RequeueAfter: startingDeadline}, nil
return ctrl.Result{RequeueAfter: startingDeadlineSeconds}, nil
}
// SetupWithManager registers our controller with the Manager and sets up watches.

View File

@@ -2,13 +2,16 @@ package backupcontroller
import (
"context"
"encoding/json"
"fmt"
"reflect"
"time"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -46,6 +49,19 @@ type S3Credentials struct {
AccessSecretKey string
}
// bucketInfo represents the structure of BucketInfo stored in the secret
type bucketInfo struct {
Spec struct {
BucketName string `json:"bucketName"`
SecretS3 struct {
Endpoint string `json:"endpoint"`
Region string `json:"region"`
AccessKeyID string `json:"accessKeyID"`
AccessSecretKey string `json:"accessSecretKey"`
} `json:"secretS3"`
} `json:"spec"`
}
const (
defaultRequeueAfter = 5 * time.Second
defaultActiveJobPollingInterval = defaultRequeueAfter
@@ -54,11 +70,15 @@ const (
virtualMachinePrefix = "virtual-machine-"
)
func storageS3SecretName(namespace, backupJobName string) string {
return fmt.Sprintf("backup-%s-%s-s3-credentials", namespace, backupJobName)
}
func boolPtr(b bool) *bool {
return &b
}
func (r *BackupJobReconciler) reconcileVelero(ctx context.Context, j *backupsv1alpha1.BackupJob, resolved *ResolvedBackupConfig) (ctrl.Result, error) {
func (r *BackupJobReconciler) reconcileVelero(ctx context.Context, j *backupsv1alpha1.BackupJob) (ctrl.Result, error) {
logger := getLogger(ctx)
logger.Debug("reconciling Velero strategy", "backupjob", j.Name, "phase", j.Status.Phase)
@@ -85,13 +105,13 @@ func (r *BackupJobReconciler) reconcileVelero(ctx context.Context, j *backupsv1a
logger.Debug("BackupJob already started", "startedAt", j.Status.StartedAt, "phase", j.Status.Phase)
}
// Step 2: Resolve inputs - Read Strategy from resolved config
logger.Debug("fetching Velero strategy", "strategyName", resolved.StrategyRef.Name)
// Step 2: Resolve inputs - Read Strategy, Storage, Application, optionally Plan
logger.Debug("fetching Velero strategy", "strategyName", j.Spec.StrategyRef.Name)
veleroStrategy := &strategyv1alpha1.Velero{}
if err := r.Get(ctx, client.ObjectKey{Name: resolved.StrategyRef.Name}, veleroStrategy); err != nil {
if err := r.Get(ctx, client.ObjectKey{Name: j.Spec.StrategyRef.Name}, veleroStrategy); err != nil {
if errors.IsNotFound(err) {
logger.Error(err, "Velero strategy not found", "strategyName", resolved.StrategyRef.Name)
return r.markBackupJobFailed(ctx, j, fmt.Sprintf("Velero strategy not found: %s", resolved.StrategyRef.Name))
logger.Error(err, "Velero strategy not found", "strategyName", j.Spec.StrategyRef.Name)
return r.markBackupJobFailed(ctx, j, fmt.Sprintf("Velero strategy not found: %s", j.Spec.StrategyRef.Name))
}
logger.Error(err, "failed to get Velero strategy")
return ctrl.Result{}, err
@@ -123,7 +143,7 @@ func (r *BackupJobReconciler) reconcileVelero(ctx context.Context, j *backupsv1a
if len(veleroBackupList.Items) == 0 {
// Create Velero Backup
logger.Debug("Velero Backup not found, creating new one")
if err := r.createVeleroBackup(ctx, j, veleroStrategy, resolved); err != nil {
if err := r.createVeleroBackup(ctx, j, veleroStrategy); err != nil {
logger.Error(err, "failed to create Velero Backup")
return r.markBackupJobFailed(ctx, j, fmt.Sprintf("failed to create Velero Backup: %v", err))
}
@@ -175,7 +195,7 @@ func (r *BackupJobReconciler) reconcileVelero(ctx context.Context, j *backupsv1a
if phase == "Completed" {
// Check if we already created the Backup resource
if j.Status.BackupRef == nil {
backup, err := r.createBackupResource(ctx, j, veleroBackup, resolved)
backup, err := r.createBackupResource(ctx, j, veleroBackup)
if err != nil {
return r.markBackupJobFailed(ctx, j, fmt.Sprintf("failed to create Backup resource: %v", err))
}
@@ -206,6 +226,268 @@ func (r *BackupJobReconciler) reconcileVelero(ctx context.Context, j *backupsv1a
return ctrl.Result{RequeueAfter: 5 * time.Second}, nil
}
// resolveBucketStorageRef discovers S3 credentials from a Bucket storageRef
// It follows this flow:
// 1. Get the Bucket resource (apps.cozystack.io/v1alpha1)
// 2. Find the BucketAccess that references this bucket
// 3. Get the secret from BucketAccess.spec.credentialsSecretName
// 4. Decode BucketInfo from secret.data.BucketInfo and extract S3 credentials
func (r *BackupJobReconciler) resolveBucketStorageRef(ctx context.Context, storageRef corev1.TypedLocalObjectReference, namespace string) (*S3Credentials, error) {
logger := getLogger(ctx)
// Step 1: Get the Bucket resource
bucket := &unstructured.Unstructured{}
bucket.SetGroupVersionKind(schema.GroupVersionKind{
Group: *storageRef.APIGroup,
Version: "v1alpha1",
Kind: storageRef.Kind,
})
if *storageRef.APIGroup != "apps.cozystack.io" {
return nil, fmt.Errorf("Unsupported storage APIGroup: %v, expected apps.cozystack.io", storageRef.APIGroup)
}
bucketKey := client.ObjectKey{Namespace: namespace, Name: storageRef.Name}
if err := r.Get(ctx, bucketKey, bucket); err != nil {
return nil, fmt.Errorf("failed to get Bucket %s: %w", storageRef.Name, err)
}
// Step 2: Determine the bucket claim name
// For apps.cozystack.io Bucket, the BucketClaim name is typically the same as the Bucket name
// or follows a pattern. Based on the templates, it's usually the Release.Name which equals the Bucket name
bucketName := storageRef.Name
// Step 3: Get BucketAccess by name (assuming BucketAccess name matches bucketName)
bucketAccess := &unstructured.Unstructured{}
bucketAccess.SetGroupVersionKind(schema.GroupVersionKind{
Group: "objectstorage.k8s.io",
Version: "v1alpha1",
Kind: "BucketAccess",
})
bucketAccessKey := client.ObjectKey{Name: "bucket-" + bucketName, Namespace: namespace}
if err := r.Get(ctx, bucketAccessKey, bucketAccess); err != nil {
return nil, fmt.Errorf("failed to get BucketAccess %s in namespace %s: %w", bucketName, namespace, err)
}
// Step 4: Get the secret name from BucketAccess
secretName, found, err := unstructured.NestedString(bucketAccess.Object, "spec", "credentialsSecretName")
if err != nil {
return nil, fmt.Errorf("failed to get credentialsSecretName from BucketAccess: %w", err)
}
if !found || secretName == "" {
return nil, fmt.Errorf("credentialsSecretName not found in BucketAccess %s", bucketAccessKey.Name)
}
// Step 5: Get the secret
secret := &corev1.Secret{}
secretKey := client.ObjectKey{Namespace: namespace, Name: secretName}
if err := r.Get(ctx, secretKey, secret); err != nil {
return nil, fmt.Errorf("failed to get secret %s: %w", secretName, err)
}
// Step 6: Decode BucketInfo from secret.data.BucketInfo
bucketInfoData, found := secret.Data["BucketInfo"]
if !found {
return nil, fmt.Errorf("BucketInfo key not found in secret %s", secretName)
}
// Parse JSON value
var info bucketInfo
if err := json.Unmarshal(bucketInfoData, &info); err != nil {
return nil, fmt.Errorf("failed to unmarshal BucketInfo from secret %s: %w", secretName, err)
}
// Step 7: Extract and return S3 credentials
creds := &S3Credentials{
BucketName: info.Spec.BucketName,
Endpoint: info.Spec.SecretS3.Endpoint,
Region: info.Spec.SecretS3.Region,
AccessKeyID: info.Spec.SecretS3.AccessKeyID,
AccessSecretKey: info.Spec.SecretS3.AccessSecretKey,
}
logger.Debug("resolved S3 credentials from Bucket storageRef",
"bucket", storageRef.Name,
"bucketName", creds.BucketName,
"endpoint", creds.Endpoint)
return creds, nil
}
// createS3CredsForVelero creates or updates a Kubernetes Secret containing
// Velero S3 credentials in the format expected by Velero's cloud-credentials plugin.
func (r *BackupJobReconciler) createS3CredsForVelero(ctx context.Context, backupJob *backupsv1alpha1.BackupJob, creds *S3Credentials) error {
logger := getLogger(ctx)
secretName := storageS3SecretName(backupJob.Namespace, backupJob.Name)
secretNamespace := veleroNamespace
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: secretNamespace,
},
Type: corev1.SecretTypeOpaque,
StringData: map[string]string{
"cloud": fmt.Sprintf(`[default]
aws_access_key_id=%s
aws_secret_access_key=%s
services = seaweed-s3
[services seaweed-s3]
s3 =
endpoint_url = %s
`, creds.AccessKeyID, creds.AccessSecretKey, creds.Endpoint),
},
}
foundSecret := &corev1.Secret{}
secretKey := client.ObjectKey{Name: secretName, Namespace: secretNamespace}
err := r.Get(ctx, secretKey, foundSecret)
if err != nil && errors.IsNotFound(err) {
// Create the Secret
if err := r.Create(ctx, secret); err != nil {
r.Recorder.Event(backupJob, corev1.EventTypeWarning, "SecretCreationFailed",
fmt.Sprintf("Failed to create Velero credentials secret %s/%s: %v", secretNamespace, secretName, err))
return fmt.Errorf("failed to create Velero credentials secret: %w", err)
}
logger.Debug("created Velero credentials secret", "secret", secretName)
r.Recorder.Event(backupJob, corev1.EventTypeNormal, "SecretCreated",
fmt.Sprintf("Created Velero credentials secret %s/%s", secretNamespace, secretName))
} else if err == nil {
// Update if necessary - only update if the secret data has actually changed
// Compare the new secret data with existing secret data
existingData := foundSecret.Data
if existingData == nil {
existingData = make(map[string][]byte)
}
newData := make(map[string][]byte)
for k, v := range secret.StringData {
newData[k] = []byte(v)
}
// Check if data has changed
dataChanged := false
if len(existingData) != len(newData) {
dataChanged = true
} else {
for k, newVal := range newData {
existingVal, exists := existingData[k]
if !exists || !reflect.DeepEqual(existingVal, newVal) {
dataChanged = true
break
}
}
}
if dataChanged {
foundSecret.StringData = secret.StringData
foundSecret.Data = nil // Clear .Data so .StringData will be used
if err := r.Update(ctx, foundSecret); err != nil {
r.Recorder.Event(backupJob, corev1.EventTypeWarning, "SecretUpdateFailed",
fmt.Sprintf("Failed to update Velero credentials secret %s/%s: %v", secretNamespace, secretName, err))
return fmt.Errorf("failed to update Velero credentials secret: %w", err)
}
logger.Debug("updated Velero credentials secret", "secret", secretName)
r.Recorder.Event(backupJob, corev1.EventTypeNormal, "SecretUpdated",
fmt.Sprintf("Updated Velero credentials secret %s/%s", secretNamespace, secretName))
} else {
logger.Debug("Velero credentials secret data unchanged, skipping update", "secret", secretName)
}
} else if err != nil {
return fmt.Errorf("error checking for existing Velero credentials secret: %w", err)
}
return nil
}
// createBackupStorageLocation creates or updates a Velero BackupStorageLocation resource.
func (r *BackupJobReconciler) createBackupStorageLocation(ctx context.Context, bsl *velerov1.BackupStorageLocation) error {
logger := getLogger(ctx)
foundBSL := &velerov1.BackupStorageLocation{}
bslKey := client.ObjectKey{Name: bsl.Name, Namespace: bsl.Namespace}
err := r.Get(ctx, bslKey, foundBSL)
if err != nil && errors.IsNotFound(err) {
// Create the BackupStorageLocation
if err := r.Create(ctx, bsl); err != nil {
return fmt.Errorf("failed to create BackupStorageLocation: %w", err)
}
logger.Debug("created BackupStorageLocation", "name", bsl.Name, "namespace", bsl.Namespace)
} else if err == nil {
// Update if necessary - use patch to avoid conflicts with Velero's status updates
// Only update if the spec has actually changed
if !reflect.DeepEqual(foundBSL.Spec, bsl.Spec) {
// Retry on conflict since Velero may be updating status concurrently
for i := 0; i < 3; i++ {
if err := r.Get(ctx, bslKey, foundBSL); err != nil {
return fmt.Errorf("failed to get BackupStorageLocation for update: %w", err)
}
foundBSL.Spec = bsl.Spec
if err := r.Update(ctx, foundBSL); err != nil {
if errors.IsConflict(err) && i < 2 {
logger.Debug("conflict updating BackupStorageLocation, retrying", "attempt", i+1)
time.Sleep(100 * time.Millisecond)
continue
}
return fmt.Errorf("failed to update BackupStorageLocation: %w", err)
}
logger.Debug("updated BackupStorageLocation", "name", bsl.Name, "namespace", bsl.Namespace)
return nil
}
} else {
logger.Debug("BackupStorageLocation spec unchanged, skipping update", "name", bsl.Name, "namespace", bsl.Namespace)
}
} else if err != nil {
return fmt.Errorf("error checking for existing BackupStorageLocation: %w", err)
}
return nil
}
// createVolumeSnapshotLocation creates or updates a Velero VolumeSnapshotLocation resource.
func (r *BackupJobReconciler) createVolumeSnapshotLocation(ctx context.Context, vsl *velerov1.VolumeSnapshotLocation) error {
logger := getLogger(ctx)
foundVSL := &velerov1.VolumeSnapshotLocation{}
vslKey := client.ObjectKey{Name: vsl.Name, Namespace: vsl.Namespace}
err := r.Get(ctx, vslKey, foundVSL)
if err != nil && errors.IsNotFound(err) {
// Create the VolumeSnapshotLocation
if err := r.Create(ctx, vsl); err != nil {
return fmt.Errorf("failed to create VolumeSnapshotLocation: %w", err)
}
logger.Debug("created VolumeSnapshotLocation", "name", vsl.Name, "namespace", vsl.Namespace)
} else if err == nil {
// Update if necessary - only update if the spec has actually changed
if !reflect.DeepEqual(foundVSL.Spec, vsl.Spec) {
// Retry on conflict since Velero may be updating status concurrently
for i := 0; i < 3; i++ {
if err := r.Get(ctx, vslKey, foundVSL); err != nil {
return fmt.Errorf("failed to get VolumeSnapshotLocation for update: %w", err)
}
foundVSL.Spec = vsl.Spec
if err := r.Update(ctx, foundVSL); err != nil {
if errors.IsConflict(err) && i < 2 {
logger.Debug("conflict updating VolumeSnapshotLocation, retrying", "attempt", i+1)
time.Sleep(100 * time.Millisecond)
continue
}
return fmt.Errorf("failed to update VolumeSnapshotLocation: %w", err)
}
logger.Debug("updated VolumeSnapshotLocation", "name", vsl.Name, "namespace", vsl.Namespace)
return nil
}
} else {
logger.Debug("VolumeSnapshotLocation spec unchanged, skipping update", "name", vsl.Name, "namespace", vsl.Namespace)
}
} else if err != nil {
return fmt.Errorf("error checking for existing VolumeSnapshotLocation: %w", err)
}
return nil
}
func (r *BackupJobReconciler) markBackupJobFailed(ctx context.Context, backupJob *backupsv1alpha1.BackupJob, message string) (ctrl.Result, error) {
logger := getLogger(ctx)
now := metav1.Now()
@@ -230,7 +512,7 @@ func (r *BackupJobReconciler) markBackupJobFailed(ctx context.Context, backupJob
return ctrl.Result{}, nil
}
func (r *BackupJobReconciler) createVeleroBackup(ctx context.Context, backupJob *backupsv1alpha1.BackupJob, strategy *strategyv1alpha1.Velero, resolved *ResolvedBackupConfig) error {
func (r *BackupJobReconciler) createVeleroBackup(ctx context.Context, backupJob *backupsv1alpha1.BackupJob, strategy *strategyv1alpha1.Velero) error {
logger := getLogger(ctx)
logger.Debug("createVeleroBackup called", "strategy", strategy.Name)
@@ -247,12 +529,7 @@ func (r *BackupJobReconciler) createVeleroBackup(ctx context.Context, backupJob
return err
}
templateContext := map[string]interface{}{
"Application": app.Object,
"Parameters": resolved.Parameters,
}
veleroBackupSpec, err := template.Template(&strategy.Spec.Template.Spec, templateContext)
veleroBackupSpec, err := template.Template(&strategy.Spec.Template.Spec, app.Object)
if err != nil {
return err
}
@@ -284,8 +561,13 @@ func (r *BackupJobReconciler) createVeleroBackup(ctx context.Context, backupJob
return nil
}
func (r *BackupJobReconciler) createBackupResource(ctx context.Context, backupJob *backupsv1alpha1.BackupJob, veleroBackup *velerov1.Backup, resolved *ResolvedBackupConfig) (*backupsv1alpha1.Backup, error) {
func (r *BackupJobReconciler) createBackupResource(ctx context.Context, backupJob *backupsv1alpha1.BackupJob, veleroBackup *velerov1.Backup) (*backupsv1alpha1.Backup, error) {
logger := getLogger(ctx)
// Extract artifact information from Velero Backup
// Create a basic artifact referencing the Velero backup
artifact := &backupsv1alpha1.BackupArtifact{
URI: fmt.Sprintf("velero://%s/%s", backupJob.Namespace, veleroBackup.Name),
}
// Get takenAt from Velero Backup creation timestamp or status
takenAt := metav1.Now()
@@ -301,14 +583,9 @@ func (r *BackupJobReconciler) createBackupResource(ctx context.Context, backupJo
"velero.io/backup-namespace": veleroBackup.Namespace,
}
// Create a basic artifact referencing the Velero backup
artifact := &backupsv1alpha1.BackupArtifact{
URI: fmt.Sprintf("velero://%s/%s", veleroBackup.Namespace, veleroBackup.Name),
}
backup := &backupsv1alpha1.Backup{
ObjectMeta: metav1.ObjectMeta{
Name: backupJob.Name,
Name: fmt.Sprintf("%s", backupJob.Name),
Namespace: backupJob.Namespace,
OwnerReferences: []metav1.OwnerReference{
{
@@ -322,13 +599,13 @@ func (r *BackupJobReconciler) createBackupResource(ctx context.Context, backupJo
},
Spec: backupsv1alpha1.BackupSpec{
ApplicationRef: backupJob.Spec.ApplicationRef,
StrategyRef: resolved.StrategyRef,
StorageRef: backupJob.Spec.StorageRef,
StrategyRef: backupJob.Spec.StrategyRef,
TakenAt: takenAt,
DriverMetadata: driverMetadata,
},
Status: backupsv1alpha1.BackupStatus{
Phase: backupsv1alpha1.BackupPhaseReady,
Artifact: artifact,
Phase: backupsv1alpha1.BackupPhaseReady,
},
}
@@ -336,6 +613,10 @@ func (r *BackupJobReconciler) createBackupResource(ctx context.Context, backupJo
backup.Spec.PlanRef = backupJob.Spec.PlanRef
}
if artifact != nil {
backup.Status.Artifact = artifact
}
if err := r.Create(ctx, backup); err != nil {
logger.Error(err, "failed to create Backup resource")
return nil, err

View File

@@ -1,208 +0,0 @@
package backupcontroller
import (
"context"
"testing"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
dynamicfake "k8s.io/client-go/dynamic/fake"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client"
clientfake "sigs.k8s.io/controller-runtime/pkg/client/fake"
strategyv1alpha1 "github.com/cozystack/cozystack/api/backups/strategy/v1alpha1"
backupsv1alpha1 "github.com/cozystack/cozystack/api/backups/v1alpha1"
velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
)
// mockRESTMapper implements meta.RESTMapper for testing
type mockRESTMapper struct {
mapping *meta.RESTMapping
}
func (m *mockRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) {
return m.mapping, nil
}
func (m *mockRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) {
return []*meta.RESTMapping{m.mapping}, nil
}
func (m *mockRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) {
return m.mapping.GroupVersionKind, nil
}
func (m *mockRESTMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) {
return []schema.GroupVersionKind{m.mapping.GroupVersionKind}, nil
}
func (m *mockRESTMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) {
return m.mapping.Resource, nil
}
func (m *mockRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) {
return []schema.GroupVersionResource{m.mapping.Resource}, nil
}
func (m *mockRESTMapper) ResourceSingularizer(resource string) (singular string, err error) {
return resource, nil
}
func TestCreateVeleroBackup_TemplateContext(t *testing.T) {
// Setup scheme
testScheme := runtime.NewScheme()
_ = scheme.AddToScheme(testScheme)
_ = backupsv1alpha1.AddToScheme(testScheme)
_ = strategyv1alpha1.AddToScheme(testScheme)
_ = velerov1.AddToScheme(testScheme)
// Create test application (VirtualMachine-like object)
testApp := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "test-vm",
Namespace: "default",
Labels: map[string]string{
"apps.cozystack.io/application.Kind": "VirtualMachine",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "test-container",
Image: "test-image:latest",
},
},
},
}
// Create dynamic client with the test application
dynamicClient := dynamicfake.NewSimpleDynamicClient(testScheme, testApp)
// Create REST mapping
mapping := &meta.RESTMapping{
Resource: schema.GroupVersionResource{
Group: "",
Version: "v1",
Resource: "pods",
},
GroupVersionKind: schema.GroupVersionKind{
Group: "",
Version: "v1",
Kind: "Pod",
},
Scope: meta.RESTScopeNamespace,
}
// Create BackupJob
backupJob := &backupsv1alpha1.BackupJob{
ObjectMeta: metav1.ObjectMeta{
Name: "test-backup-job",
Namespace: "default",
},
Spec: backupsv1alpha1.BackupJobSpec{
ApplicationRef: corev1.TypedLocalObjectReference{
APIGroup: stringPtr(""),
Kind: "Pod",
Name: "test-vm",
},
BackupClassName: "velero",
},
}
// Create Velero strategy with template that uses Application and Parameters
veleroStrategy := &strategyv1alpha1.Velero{
ObjectMeta: metav1.ObjectMeta{
Name: "velero-strategy",
},
Spec: strategyv1alpha1.VeleroSpec{
Template: strategyv1alpha1.VeleroTemplate{
Spec: velerov1.BackupSpec{
// Use template variables to verify context is passed correctly
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "{{ .Application.metadata.name }}",
},
},
// Use Parameters in template
StorageLocation: "{{ .Parameters.backupStorageLocationName }}",
},
},
},
}
// Create ResolvedBackupConfig with parameters
resolved := &ResolvedBackupConfig{
StrategyRef: corev1.TypedLocalObjectReference{
APIGroup: stringPtr("strategy.backups.cozystack.io"),
Kind: "Velero",
Name: "velero-strategy",
},
Parameters: map[string]string{
"backupStorageLocationName": "default-storage",
},
}
// Create fake client for controller
fakeClient := clientfake.NewClientBuilder().
WithScheme(testScheme).
WithObjects(backupJob, veleroStrategy).
Build()
// Create reconciler with fake event recorder
reconciler := &BackupJobReconciler{
Client: fakeClient,
Interface: dynamicClient,
RESTMapper: &mockRESTMapper{mapping: mapping},
Scheme: testScheme,
Recorder: record.NewFakeRecorder(10),
}
// Create context with logger
ctx := context.Background()
// Call createVeleroBackup
err := reconciler.createVeleroBackup(ctx, backupJob, veleroStrategy, resolved)
if err != nil {
t.Fatalf("createVeleroBackup() error = %v", err)
}
// Verify that the template was executed correctly by checking the created Velero Backup
// The template should have replaced {{ .Application.metadata.name }} with "test-vm"
// and {{ .Parameters.backupStorageLocationName }} with "default-storage"
// Get the created Velero Backup
veleroBackups := &velerov1.BackupList{}
err = fakeClient.List(ctx, veleroBackups, client.InNamespace(veleroNamespace))
if err != nil {
t.Fatalf("Failed to list Velero Backups: %v", err)
}
if len(veleroBackups.Items) == 0 {
t.Fatal("Expected Velero Backup to be created, but none found")
}
veleroBackup := veleroBackups.Items[0]
// Verify template context was used correctly:
// 1. Application.metadata.name should be replaced with "test-vm"
if veleroBackup.Spec.LabelSelector == nil {
t.Fatal("Expected LabelSelector to be set by template")
}
if appLabel, ok := veleroBackup.Spec.LabelSelector.MatchLabels["app"]; ok {
if appLabel != "test-vm" {
t.Errorf("Template context Application.metadata.name not applied correctly. Expected 'test-vm', got '%s'", appLabel)
}
} else {
t.Error("Template context Application.metadata.name not found in label selector")
}
// 2. Parameters.backupStorageLocationName should be replaced with "default-storage"
if veleroBackup.Spec.StorageLocation != "default-storage" {
t.Errorf("Template context Parameters.backupStorageLocationName not applied correctly. Expected 'default-storage', got '%s'", veleroBackup.Spec.StorageLocation)
}
}

View File

@@ -23,7 +23,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
type ApplicationDefinitionReconciler struct {
type CozystackResourceDefinitionReconciler struct {
client.Client
Scheme *runtime.Scheme
@@ -36,21 +36,21 @@ type ApplicationDefinitionReconciler struct {
CozystackAPIKind string
}
func (r *ApplicationDefinitionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
func (r *CozystackResourceDefinitionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
// Only handle debounced restart logic
// HelmRelease reconciliation is handled by ApplicationDefinitionHelmReconciler
// HelmRelease reconciliation is handled by CozystackResourceDefinitionHelmReconciler
return r.debouncedRestart(ctx)
}
func (r *ApplicationDefinitionReconciler) SetupWithManager(mgr ctrl.Manager) error {
func (r *CozystackResourceDefinitionReconciler) SetupWithManager(mgr ctrl.Manager) error {
if r.Debounce == 0 {
r.Debounce = 5 * time.Second
}
return ctrl.NewControllerManagedBy(mgr).
Named("applicationdefinition-controller").
Named("cozystackresource-controller").
Watches(
&cozyv1alpha1.ApplicationDefinition{},
&cozyv1alpha1.CozystackResourceDefinition{},
handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request {
r.mu.Lock()
r.lastEvent = time.Now()
@@ -66,22 +66,22 @@ func (r *ApplicationDefinitionReconciler) SetupWithManager(mgr ctrl.Manager) err
Complete(r)
}
type appDefHashView struct {
Name string `json:"name"`
Spec cozyv1alpha1.ApplicationDefinitionSpec `json:"spec"`
type crdHashView struct {
Name string `json:"name"`
Spec cozyv1alpha1.CozystackResourceDefinitionSpec `json:"spec"`
}
func (r *ApplicationDefinitionReconciler) computeConfigHash(ctx context.Context) (string, error) {
list := &cozyv1alpha1.ApplicationDefinitionList{}
func (r *CozystackResourceDefinitionReconciler) computeConfigHash(ctx context.Context) (string, error) {
list := &cozyv1alpha1.CozystackResourceDefinitionList{}
if err := r.List(ctx, list); err != nil {
return "", err
}
slices.SortFunc(list.Items, sortAppDefs)
slices.SortFunc(list.Items, sortCozyRDs)
views := make([]appDefHashView, 0, len(list.Items))
views := make([]crdHashView, 0, len(list.Items))
for i := range list.Items {
views = append(views, appDefHashView{
views = append(views, crdHashView{
Name: list.Items[i].Name,
Spec: list.Items[i].Spec,
})
@@ -94,7 +94,7 @@ func (r *ApplicationDefinitionReconciler) computeConfigHash(ctx context.Context)
return hex.EncodeToString(sum[:]), nil
}
func (r *ApplicationDefinitionReconciler) debouncedRestart(ctx context.Context) (ctrl.Result, error) {
func (r *CozystackResourceDefinitionReconciler) debouncedRestart(ctx context.Context) (ctrl.Result, error) {
logger := log.FromContext(ctx)
r.mu.Lock()
@@ -132,7 +132,7 @@ func (r *ApplicationDefinitionReconciler) debouncedRestart(ctx context.Context)
r.mu.Lock()
r.lastHandled = le
r.mu.Unlock()
logger.Info("No changes in ApplicationDefinition config; skipping restart", "hash", newHash)
logger.Info("No changes in CRD config; skipping restart", "hash", newHash)
return ctrl.Result{}, nil
}
@@ -151,7 +151,7 @@ func (r *ApplicationDefinitionReconciler) debouncedRestart(ctx context.Context)
return ctrl.Result{}, nil
}
func (r *ApplicationDefinitionReconciler) getWorkload(
func (r *CozystackResourceDefinitionReconciler) getWorkload(
ctx context.Context,
key types.NamespacedName,
) (tpl *corev1.PodTemplateSpec, obj client.Object, patch client.Patch, err error) {
@@ -178,7 +178,7 @@ func (r *ApplicationDefinitionReconciler) getWorkload(
return tpl, obj, patch, nil
}
func sortAppDefs(a, b cozyv1alpha1.ApplicationDefinition) int {
func sortCozyRDs(a, b cozyv1alpha1.CozystackResourceDefinition) int {
if a.Name == b.Name {
return 0
}

View File

@@ -14,65 +14,66 @@ import (
"sigs.k8s.io/controller-runtime/pkg/log"
)
// +kubebuilder:rbac:groups=cozystack.io,resources=applicationdefinitions,verbs=get;list;watch
// +kubebuilder:rbac:groups=cozystack.io,resources=cozystackresourcedefinitions,verbs=get;list;watch
// +kubebuilder:rbac:groups=helm.toolkit.fluxcd.io,resources=helmreleases,verbs=get;list;watch;update;patch
// ApplicationDefinitionHelmReconciler reconciles ApplicationDefinitions
// and updates related HelmReleases when an ApplicationDefinition changes.
// CozystackResourceDefinitionHelmReconciler reconciles CozystackResourceDefinitions
// and updates related HelmReleases when a CozyRD changes.
// This controller does NOT watch HelmReleases to avoid mutual reconciliation storms
// with Flux's helm-controller.
type ApplicationDefinitionHelmReconciler struct {
type CozystackResourceDefinitionHelmReconciler struct {
client.Client
Scheme *runtime.Scheme
}
func (r *ApplicationDefinitionHelmReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
func (r *CozystackResourceDefinitionHelmReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logger := log.FromContext(ctx)
// Get the ApplicationDefinition that triggered this reconciliation
appDef := &cozyv1alpha1.ApplicationDefinition{}
if err := r.Get(ctx, req.NamespacedName, appDef); err != nil {
logger.Error(err, "failed to get ApplicationDefinition", "name", req.Name)
// Get the CozystackResourceDefinition that triggered this reconciliation
crd := &cozyv1alpha1.CozystackResourceDefinition{}
if err := r.Get(ctx, req.NamespacedName, crd); err != nil {
logger.Error(err, "failed to get CozystackResourceDefinition", "name", req.Name)
return ctrl.Result{}, client.IgnoreNotFound(err)
}
// Update HelmReleases related to this specific ApplicationDefinition
if err := r.updateHelmReleasesForAppDef(ctx, appDef); err != nil {
logger.Error(err, "failed to update HelmReleases for ApplicationDefinition", "appDef", appDef.Name)
// Update HelmReleases related to this specific CozyRD
if err := r.updateHelmReleasesForCRD(ctx, crd); err != nil {
logger.Error(err, "failed to update HelmReleases for CRD", "crd", crd.Name)
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
func (r *ApplicationDefinitionHelmReconciler) SetupWithManager(mgr ctrl.Manager) error {
func (r *CozystackResourceDefinitionHelmReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
Named("applicationdefinition-helm-reconciler").
For(&cozyv1alpha1.ApplicationDefinition{}).
Named("cozystackresourcedefinition-helm-reconciler").
For(&cozyv1alpha1.CozystackResourceDefinition{}).
Complete(r)
}
// updateHelmReleasesForAppDef updates all HelmReleases that match the application labels from ApplicationDefinition
func (r *ApplicationDefinitionHelmReconciler) updateHelmReleasesForAppDef(ctx context.Context, appDef *cozyv1alpha1.ApplicationDefinition) error {
// updateHelmReleasesForCRD updates all HelmReleases that match the application labels from CozystackResourceDefinition
func (r *CozystackResourceDefinitionHelmReconciler) updateHelmReleasesForCRD(ctx context.Context, crd *cozyv1alpha1.CozystackResourceDefinition) error {
logger := log.FromContext(ctx)
// Use application labels to find HelmReleases
// Labels: apps.cozystack.io/application.kind and apps.cozystack.io/application.group
applicationKind := appDef.Spec.Application.Kind
applicationKind := crd.Spec.Application.Kind
// Validate that applicationKind is non-empty
if applicationKind == "" {
logger.V(4).Info("Skipping HelmRelease update: Application.Kind is empty", "appDef", appDef.Name)
logger.V(4).Info("Skipping HelmRelease update: Application.Kind is empty", "crd", crd.Name)
return nil
}
applicationGroup := "apps.cozystack.io" // All applications use this group
// Build label selector for HelmReleases
// Only reconcile HelmReleases with apps.cozystack.io/application.* labels
// Only reconcile HelmReleases with cozystack.io/ui=true label
labelSelector := client.MatchingLabels{
"apps.cozystack.io/application.kind": applicationKind,
"apps.cozystack.io/application.group": applicationGroup,
"cozystack.io/ui": "true",
}
// List all HelmReleases with matching labels
@@ -82,12 +83,12 @@ func (r *ApplicationDefinitionHelmReconciler) updateHelmReleasesForAppDef(ctx co
return err
}
logger.V(4).Info("Found HelmReleases to update", "appDef", appDef.Name, "kind", applicationKind, "count", len(hrList.Items))
logger.V(4).Info("Found HelmReleases to update", "crd", crd.Name, "kind", applicationKind, "count", len(hrList.Items))
// Update each HelmRelease
for i := range hrList.Items {
hr := &hrList.Items[i]
if err := r.updateHelmReleaseChart(ctx, hr, appDef); err != nil {
if err := r.updateHelmReleaseChart(ctx, hr, crd); err != nil {
logger.Error(err, "failed to update HelmRelease", "name", hr.Name, "namespace", hr.Namespace)
continue
}
@@ -123,24 +124,24 @@ func valuesFromEqual(a, b []helmv2.ValuesReference) bool {
return true
}
// updateHelmReleaseChart updates the chart and valuesFrom in HelmRelease based on ApplicationDefinition
func (r *ApplicationDefinitionHelmReconciler) updateHelmReleaseChart(ctx context.Context, hr *helmv2.HelmRelease, appDef *cozyv1alpha1.ApplicationDefinition) error {
// updateHelmReleaseChart updates the chart and valuesFrom in HelmRelease based on CozystackResourceDefinition
func (r *CozystackResourceDefinitionHelmReconciler) updateHelmReleaseChart(ctx context.Context, hr *helmv2.HelmRelease, crd *cozyv1alpha1.CozystackResourceDefinition) error {
logger := log.FromContext(ctx)
hrCopy := hr.DeepCopy()
updated := false
// Validate ChartRef configuration exists
if appDef.Spec.Release.ChartRef == nil ||
appDef.Spec.Release.ChartRef.Kind == "" ||
appDef.Spec.Release.ChartRef.Name == "" ||
appDef.Spec.Release.ChartRef.Namespace == "" {
logger.Error(fmt.Errorf("invalid ChartRef in ApplicationDefinition"), "Skipping HelmRelease chartRef update: ChartRef is nil or incomplete",
"appDef", appDef.Name)
if crd.Spec.Release.ChartRef == nil ||
crd.Spec.Release.ChartRef.Kind == "" ||
crd.Spec.Release.ChartRef.Name == "" ||
crd.Spec.Release.ChartRef.Namespace == "" {
logger.Error(fmt.Errorf("invalid ChartRef in CRD"), "Skipping HelmRelease chartRef update: ChartRef is nil or incomplete",
"crd", crd.Name)
return nil
}
// Use ChartRef directly from ApplicationDefinition
expectedChartRef := appDef.Spec.Release.ChartRef
// Use ChartRef directly from CRD
expectedChartRef := crd.Spec.Release.ChartRef
// Check if chartRef needs to be updated
if hrCopy.Spec.ChartRef == nil {
@@ -163,22 +164,8 @@ func (r *ApplicationDefinitionHelmReconciler) updateHelmReleaseChart(ctx context
updated = true
}
// Check and update labels from ApplicationDefinition
if len(appDef.Spec.Release.Labels) > 0 {
if hrCopy.Labels == nil {
hrCopy.Labels = make(map[string]string)
}
for key, value := range appDef.Spec.Release.Labels {
if hrCopy.Labels[key] != value {
logger.V(4).Info("Updating HelmRelease label", "name", hr.Name, "namespace", hr.Namespace, "label", key, "value", value)
hrCopy.Labels[key] = value
updated = true
}
}
}
if updated {
logger.V(4).Info("Updating HelmRelease", "name", hr.Name, "namespace", hr.Namespace)
logger.V(4).Info("Updating HelmRelease chartRef", "name", hr.Name, "namespace", hr.Namespace)
if err := r.Update(ctx, hrCopy); err != nil {
return fmt.Errorf("failed to update HelmRelease: %w", err)
}

View File

@@ -1,355 +0,0 @@
# Dashboard Resource Integration Guide
This guide explains how to add a new Kubernetes resource to the Cozystack dashboard. The dashboard provides a unified interface for viewing and managing Kubernetes resources through custom table views, detail pages, and sidebar navigation.
## Overview
Adding a new resource to the dashboard requires three main components:
1. **CustomColumnsOverride**: Defines how the resource appears in list/table views
2. **Factory**: Defines the detail page layout for individual resource instances
3. **Sidebar Entry**: Adds navigation to the resource in the sidebar menu
## Prerequisites
- The resource must have a Kubernetes CustomResourceDefinition (CRD) or be a built-in Kubernetes resource
- Know the resource's API group, version, kind, and plural name
- Understand the resource's spec structure to display relevant fields
## Step-by-Step Guide
### Step 1: Add CustomColumnsOverride
The CustomColumnsOverride defines the columns shown in the resource list table and how clicking on a row navigates to the detail page.
**Location**: `internal/controller/dashboard/static_refactored.go` in `CreateAllCustomColumnsOverrides()`
**Example**:
```go
// Stock namespace backups cozystack io v1alpha1 plans
createCustomColumnsOverride("stock-namespace-/backups.cozystack.io/v1alpha1/plans", []any{
createCustomColumnWithJsonPath("Name", ".metadata.name", "Plan", "", "/openapi-ui/{2}/{reqsJsonPath[0]['.metadata.namespace']['-']}/factory/plan-details/{reqsJsonPath[0]['.metadata.name']['-']}"),
createApplicationRefColumn("Application"),
createTimestampColumn("Created", ".metadata.creationTimestamp"),
}),
```
**Key Components**:
- **ID Format**: `stock-namespace-/{group}/{version}/{plural}`
- **Name Column**: Use `createCustomColumnWithJsonPath()` with:
- Badge value: Resource kind in PascalCase (e.g., "Plan", "Service")
- Link href: `/openapi-ui/{2}/{namespace}/factory/{resource-details}/{name}`
- **Additional Columns**: Use helper functions like:
- `createStringColumn()`: Simple string values
- `createTimestampColumn()`: Timestamp fields
- `createReadyColumn()`: Ready status from conditions
- Custom helpers for complex fields
**Helper Functions Available**:
- `createCustomColumnWithJsonPath(name, jsonPath, badgeValue, badgeColor, linkHref)`: Column with badge and link
- `createStringColumn(name, jsonPath)`: Simple string column
- `createTimestampColumn(name, jsonPath)`: Timestamp with formatting
- `createReadyColumn()`: Ready status column
- `createBoolColumn(name, jsonPath)`: Boolean column
- `createArrayColumn(name, jsonPath)`: Array column
### Step 2: Add Factory (Detail Page)
The Factory defines the detail page layout when viewing an individual resource instance.
**Location**: `internal/controller/dashboard/static_refactored.go` in `CreateAllFactories()`
**Using Unified Factory Approach** (Recommended):
```go
// Resource details factory using unified approach
resourceConfig := UnifiedResourceConfig{
Name: "resource-details", // Must match the href in CustomColumnsOverride
ResourceType: "factory",
Kind: "ResourceKind", // PascalCase
Plural: "resources", // lowercase plural
Title: "resource", // lowercase singular
}
resourceTabs := []any{
map[string]any{
"key": "details",
"label": "Details",
"children": []any{
contentCard("details-card", map[string]any{
"marginBottom": "24px",
}, []any{
antdText("details-title", true, "Resource details", map[string]any{
"fontSize": 20,
"marginBottom": "12px",
}),
spacer("details-spacer", 16),
antdRow("details-grid", []any{48, 12}, []any{
antdCol("col-left", 12, []any{
antdFlexVertical("col-left-stack", 24, []any{
// Metadata fields: Name, Namespace, Created, etc.
}),
}),
antdCol("col-right", 12, []any{
antdFlexVertical("col-right-stack", 24, []any{
// Spec fields
}),
}),
}),
}),
},
},
}
resourceSpec := createUnifiedFactory(resourceConfig, resourceTabs, []any{"/api/clusters/{2}/k8s/apis/{group}/{version}/namespaces/{3}/{plural}/{6}"})
// Add to return list
return []*dashboardv1alpha1.Factory{
// ... other factories
createFactory("resource-details", resourceSpec),
}
```
**Key Components**:
- **Factory Key**: Must match the href path segment (e.g., `plan-details` matches `/factory/plan-details/...`)
- **API Endpoint**: `/api/clusters/{2}/k8s/apis/{group}/{version}/namespaces/{3}/{plural}/{6}`
- `{2}`: Cluster name
- `{3}`: Namespace
- `{6}`: Resource name
- **Sidebar Tags**: Automatically set to `["{lowercase-kind}-sidebar"]` by `createUnifiedFactory()`
- **Tabs**: Define the detail page content (Details, YAML, etc.)
**UI Helper Functions**:
- `contentCard(id, style, children)`: Container card
- `antdText(id, strong, text, style)`: Text element
- `antdFlexVertical(id, gap, children)`: Vertical flex container
- `antdRow(id, gutter, children)`: Row layout
- `antdCol(id, span, children)`: Column layout
- `parsedText(id, text, style)`: Text with JSON path parsing
- `parsedTextWithFormatter(id, text, formatter)`: Formatted text (e.g., timestamp)
- `spacer(id, space)`: Spacing element
**Displaying Fields**:
```go
antdFlexVertical("field-block", 4, []any{
antdText("field-label", true, "Field Label", nil),
parsedText("field-value", "{reqsJsonPath[0]['.spec.fieldName']['-']}", nil),
}),
```
### Step 3: Add Sidebar Entry
The sidebar entry adds navigation to the resource in the sidebar menu.
**Location**: `internal/controller/dashboard/sidebar.go` in `ensureSidebar()`
**3a. Add to keysAndTags** (around line 110-116):
```go
// Add sidebar for {group} {kind} resource
keysAndTags["{plural}"] = []any{"{lowercase-kind}-sidebar"}
```
**3b. Add Sidebar Section** (if creating a new section, around line 169):
```go
// Add hardcoded {SectionName} section
menuItems = append(menuItems, map[string]any{
"key": "{section-key}",
"label": "{SectionName}",
"children": []any{
map[string]any{
"key": "{plural}",
"label": "{ResourceLabel}",
"link": "/openapi-ui/{clusterName}/{namespace}/api-table/{group}/{version}/{plural}",
},
},
}),
```
**3c. Add Sidebar ID to targetIDs** (around line 220):
```go
"stock-project-factory-{lowercase-kind}-details",
```
**3d. Update Category Ordering** (if adding a new section):
- Update the comment around line 29 to include the new section
- Update `orderCategoryLabels()` function if needed
- Add skip condition in the category loop (around line 149)
**Important Notes**:
- The sidebar tag (`{lowercase-kind}-sidebar`) must match what the Factory uses
- The link format: `/openapi-ui/{clusterName}/{namespace}/api-table/{group}/{version}/{plural}`
- All sidebars share the same `keysAndTags` and `menuItems`, so changes affect all sidebar instances
### Step 4: Verify Integration
1. **Check Factory-Sidebar Connection**:
- Factory uses `sidebarTags: ["{lowercase-kind}-sidebar"]`
- Sidebar has `keysAndTags["{plural}"] = []any{"{lowercase-kind}-sidebar"}`
- Sidebar ID `stock-project-factory-{lowercase-kind}-details` exists in `targetIDs`
2. **Check Navigation Flow**:
- Sidebar link → List table (CustomColumnsOverride)
- List table Name column → Detail page (Factory)
- All paths use consistent naming
3. **Test**:
- Verify the resource appears in the sidebar
- Verify the list table displays correctly
- Verify clicking a resource navigates to the detail page
- Verify the detail page displays all relevant fields
## Common Patterns
### Displaying Object References
For fields that reference other resources (e.g., `applicationRef`, `storageRef`):
```go
// In CustomColumnsOverride
createApplicationRefColumn("Application"), // Uses helper function
// In Factory details tab
parsedText("application-ref-value",
"{reqsJsonPath[0]['.spec.applicationRef.kind']['-']}.{reqsJsonPath[0]['.spec.applicationRef.apiGroup']['-']}/{reqsJsonPath[0]['.spec.applicationRef.name']['-']}",
nil),
```
### Displaying Timestamps
```go
antdFlexVertical("created-block", 4, []any{
antdText("time-label", true, "Created", nil),
antdFlex("time-block", 6, []any{
map[string]any{
"type": "antdText",
"data": map[string]any{
"id": "time-icon",
"text": "🌐",
},
},
map[string]any{
"type": "parsedText",
"data": map[string]any{
"formatter": "timestamp",
"id": "time-value",
"text": "{reqsJsonPath[0]['.metadata.creationTimestamp']['-']}",
},
},
}),
}),
```
### Displaying Namespace with Link
```go
antdFlexVertical("meta-namespace-block", 8, []any{
antdText("meta-namespace-label", true, "Namespace", nil),
antdFlex("header-row", 6, []any{
// Badge component
map[string]any{
"type": "antdText",
"data": map[string]any{
"id": "header-badge",
"text": "NS",
"title": "namespace",
"style": map[string]any{
"backgroundColor": "#a25792ff",
// ... badge styles
},
},
},
// Link component
map[string]any{
"type": "antdLink",
"data": map[string]any{
"id": "namespace-link",
"text": "{reqsJsonPath[0]['.metadata.namespace']['-']}",
"href": "/openapi-ui/{2}/{reqsJsonPath[0]['.metadata.namespace']['-']}/factory/marketplace",
},
},
}),
}),
```
## File Reference
- **CustomColumnsOverride**: `internal/controller/dashboard/static_refactored.go``CreateAllCustomColumnsOverrides()`
- **Factory**: `internal/controller/dashboard/static_refactored.go``CreateAllFactories()`
- **Sidebar**: `internal/controller/dashboard/sidebar.go``ensureSidebar()`
- **Helper Functions**: `internal/controller/dashboard/static_helpers.go`
- **UI Helpers**: `internal/controller/dashboard/ui_helpers.go`
- **Unified Helpers**: `internal/controller/dashboard/unified_helpers.go`
## AI Agent Prompt Template
Use this template when asking an AI agent to add a new resource to the dashboard:
```
Please add support for the {ResourceKind} resource ({group}/{version}/{plural}) to the Cozystack dashboard.
Resource Details:
- API Group: {group}
- Version: {version}
- Kind: {ResourceKind}
- Plural: {plural}
- Namespaced: {true/false}
Requirements:
1. Add a CustomColumnsOverride in CreateAllCustomColumnsOverrides() with:
- ID: stock-namespace-/{group}/{version}/{plural}
- Name column with {ResourceKind} badge linking to /factory/{lowercase-kind}-details/{name}
- Additional columns: {list relevant columns}
2. Add a Factory in CreateAllFactories() with:
- Key: {lowercase-kind}-details
- API endpoint: /api/clusters/{2}/k8s/apis/{group}/{version}/namespaces/{3}/{plural}/{6}
- Details tab showing all spec fields:
{list spec fields to display}
- Use createUnifiedFactory() approach
3. Add sidebar entry in ensureSidebar():
- Add keysAndTags entry: keysAndTags["{plural}"] = []any{"{lowercase-kind}-sidebar"}
- Add sidebar section: {specify section name or "add to existing section"}
- Add to targetIDs: "stock-project-factory-{lowercase-kind}-details"
4. Ensure Factory sidebarTags matches the keysAndTags entry
Please follow the existing patterns in the codebase, particularly the Plan resource implementation as a reference.
```
## Example: Plan Resource
The Plan resource (`backups.cozystack.io/v1alpha1/plans`) serves as a complete reference implementation:
- **CustomColumnsOverride**: [Diff](https://github.com/cozystack/cozystack/compare/1f0b5ff9ac0d9d8896af46f8a19501c8b728671d..88da2d1f642b6cf03873d368dfdc675de23f1513#diff-8309b1db3362715b3d94a8b0beae7e95d3ccaf248d4f8702aaa12fba398da895R374-R380) in `static_refactored.go`
- **Factory**: [Diff](https://github.com/cozystack/cozystack/compare/1f0b5ff9ac0d9d8896af46f8a19501c8b728671d..88da2d1f642b6cf03873d368dfdc675de23f1513#diff-8309b1db3362715b3d94a8b0beae7e95d3ccaf248d4f8702aaa12fba398da895R1443-R1558) in `static_refactored.go`
- **Sidebar**: [Diff](https://github.com/cozystack/cozystack/compare/1f0b5ff9ac0d9d8896af46f8a19501c8b728671d..88da2d1f642b6cf03873d368dfdc675de23f1513#diff-be79027f7179e457a8f10e225bb921a197ffa390eb8f916d8d21379fadd54a56) in `sidebar.go`
- **Helper Function**: `createApplicationRefColumn()` in `static_helpers.go` ([diff](https://github.com/cozystack/cozystack/compare/1f0b5ff9ac0d9d8896af46f8a19501c8b728671d..88da2d1f642b6cf03873d368dfdc675de23f1513#diff-f17bcccc089cac3a8e965b13b9ab26e678d45bfc9a58d842399f218703e06a08R1026-R1046))
Review [this implementation](https://github.com/cozystack/cozystack/compare/1f0b5ff9ac0d9d8896af46f8a19501c8b728671d..88da2d1f642b6cf03873d368dfdc675de23f1513) for a complete working example.
## Troubleshooting
### Resource doesn't appear in sidebar
- Check that `keysAndTags["{plural}"]` is set correctly
- Verify the sidebar section is added to `menuItems`
- Ensure the sidebar ID is in `targetIDs`
### Clicking resource doesn't navigate to detail page
- Verify the CustomColumnsOverride href matches the Factory key
- Check that the Factory key is exactly `{lowercase-kind}-details`
- Ensure the Factory is added to the return list in `CreateAllFactories()`
### Detail page shows wrong sidebar
- Verify Factory `sidebarTags` matches `keysAndTags["{plural}"]`
- Check that the sidebar ID `stock-project-factory-{lowercase-kind}-details` exists
- Ensure all sidebars are updated (they share the same `keysAndTags`)
### Fields not displaying correctly
- Verify JSON paths are correct (use `.spec.fieldName` format)
- Check that `reqsJsonPath[0]` index is used for single resource views
- Ensure field names match the actual resource spec structure
## Additional Resources
- Kubernetes API Conventions: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md
- Dashboard API Types: `api/dashboard/v1alpha1/`
- Resource Types: `api/backups/v1alpha1/` (example)

View File

@@ -14,7 +14,7 @@ import (
)
// ensureBreadcrumb creates or updates a Breadcrumb resource for the given CRD
func (m *Manager) ensureBreadcrumb(ctx context.Context, crd *cozyv1alpha1.ApplicationDefinition) error {
func (m *Manager) ensureBreadcrumb(ctx context.Context, crd *cozyv1alpha1.CozystackResourceDefinition) error {
group, version, kind := pickGVK(crd)
lowerKind := strings.ToLower(kind)

View File

@@ -21,7 +21,7 @@ import (
//
// metadata.name: stock-namespace-<group>.<version>.<plural>
// spec.id: stock-namespace-/<group>/<version>/<plural>
func (m *Manager) ensureCustomColumnsOverride(ctx context.Context, crd *cozyv1alpha1.ApplicationDefinition) (controllerutil.OperationResult, error) {
func (m *Manager) ensureCustomColumnsOverride(ctx context.Context, crd *cozyv1alpha1.CozystackResourceDefinition) (controllerutil.OperationResult, error) {
g, v, kind := pickGVK(crd)
plural := pickPlural(kind, crd)
// Details page segment uses lowercase kind, mirroring your example
@@ -34,6 +34,9 @@ func (m *Manager) ensureCustomColumnsOverride(ctx context.Context, crd *cozyv1al
obj.SetName(name)
href := fmt.Sprintf("/openapi-ui/{2}/{reqsJsonPath[0]['.metadata.namespace']['-']}/factory/%s/{reqsJsonPath[0]['.metadata.name']['-']}", detailsSegment)
if g == "apps.cozystack.io" && kind == "Tenant" && plural == "tenants" {
href = "/openapi-ui/{2}/{reqsJsonPath[0]['.status.namespace']['-']}/api-table/core.cozystack.io/v1alpha1/tenantmodules"
}
desired := map[string]any{
"spec": map[string]any{

View File

@@ -15,7 +15,7 @@ import (
)
// ensureCustomFormsOverride creates or updates a CustomFormsOverride resource for the given CRD
func (m *Manager) ensureCustomFormsOverride(ctx context.Context, crd *cozyv1alpha1.ApplicationDefinition) error {
func (m *Manager) ensureCustomFormsOverride(ctx context.Context, crd *cozyv1alpha1.CozystackResourceDefinition) error {
g, v, kind := pickGVK(crd)
plural := pickPlural(kind, crd)

View File

@@ -16,7 +16,7 @@ import (
)
// ensureCustomFormsPrefill creates or updates a CustomFormsPrefill resource for the given CRD
func (m *Manager) ensureCustomFormsPrefill(ctx context.Context, crd *cozyv1alpha1.ApplicationDefinition) (reconcile.Result, error) {
func (m *Manager) ensureCustomFormsPrefill(ctx context.Context, crd *cozyv1alpha1.CozystackResourceDefinition) (reconcile.Result, error) {
logger := log.FromContext(ctx)
app := crd.Spec.Application

View File

@@ -15,7 +15,7 @@ import (
)
// ensureFactory creates or updates a Factory resource for the given CRD
func (m *Manager) ensureFactory(ctx context.Context, crd *cozyv1alpha1.ApplicationDefinition) error {
func (m *Manager) ensureFactory(ctx context.Context, crd *cozyv1alpha1.CozystackResourceDefinition) error {
g, v, kind := pickGVK(crd)
plural := pickPlural(kind, crd)
@@ -174,31 +174,6 @@ func detailsTab(kind, endpoint, schemaJSON string, keysOrder [][]string) map[str
}),
)
}
if kind == "Tenant" {
leftColStack = append(leftColStack, antdFlexVertical("tenant-external-ip-count", 4, []any{
antdText("tenant-external-ip-count-label", true, "External IPs count", nil),
parsedText("tenant-external-ip-count-value", `{reqsJsonPath[0]['.status.externalIPsCount']['0']}`, nil),
}))
rightColStack = append(rightColStack,
antdFlexVertical("resource-quotas-block", 4, []any{
antdText("resource-quotas-label", true, "Resource Quotas", map[string]any{
"fontSize": float64(20),
"marginBottom": float64(12),
}),
map[string]any{
"type": "EnrichedTable",
"data": map[string]any{
"id": "resource-quotas-table",
"baseprefix": "/openapi-ui",
"clusterNamePartOfUrl": "{2}",
"customizationId": "factory-resource-quotas",
"fetchUrl": "/api/clusters/{2}/k8s/api/v1/namespaces/{reqsJsonPath[0]['.status.namespace']}/resourcequotas",
"pathToItems": []any{`items`},
},
},
}),
)
}
return map[string]any{
"key": "details",
@@ -582,7 +557,7 @@ type factoryFlags struct {
// factoryFeatureFlags tries several conventional locations so you can evolve the API
// without breaking the controller. Defaults are false (hidden).
func factoryFeatureFlags(crd *cozyv1alpha1.ApplicationDefinition) factoryFlags {
func factoryFeatureFlags(crd *cozyv1alpha1.CozystackResourceDefinition) factoryFlags {
var f factoryFlags
f.Workloads = true

View File

@@ -23,7 +23,7 @@ type fieldInfo struct {
// pickGVK tries to read group/version/kind from the CRD. We prefer the "application" section,
// falling back to other likely fields if your schema differs.
func pickGVK(crd *cozyv1alpha1.ApplicationDefinition) (group, version, kind string) {
func pickGVK(crd *cozyv1alpha1.CozystackResourceDefinition) (group, version, kind string) {
// Best guess based on your examples:
if crd.Spec.Application.Kind != "" {
kind = crd.Spec.Application.Kind
@@ -41,7 +41,7 @@ func pickGVK(crd *cozyv1alpha1.ApplicationDefinition) (group, version, kind stri
}
// pickPlural prefers a field on the CRD if you have it; otherwise do a simple lowercase + "s".
func pickPlural(kind string, crd *cozyv1alpha1.ApplicationDefinition) string {
func pickPlural(kind string, crd *cozyv1alpha1.CozystackResourceDefinition) string {
// If you have crd.Spec.Application.Plural, prefer it. Example:
if crd.Spec.Application.Plural != "" {
return crd.Spec.Application.Plural

View File

@@ -41,7 +41,7 @@ func AddToScheme(s *runtime.Scheme) error {
}
// Manager owns logic for creating/updating dashboard resources derived from CRDs.
// Its easy to extend: add new ensure* methods and wire them into EnsureForAppDef.
// Its easy to extend: add new ensure* methods and wire them into EnsureForCRD.
type Manager struct {
client.Client
Scheme *runtime.Scheme
@@ -56,7 +56,7 @@ func NewManager(c client.Client, scheme *runtime.Scheme) *Manager {
func (m *Manager) SetupWithManager(mgr ctrl.Manager) error {
if err := ctrl.NewControllerManagedBy(mgr).
Named("dashboard-reconciler").
For(&cozyv1alpha1.ApplicationDefinition{}).
For(&cozyv1alpha1.CozystackResourceDefinition{}).
Complete(m); err != nil {
return err
}
@@ -72,7 +72,7 @@ func (m *Manager) SetupWithManager(mgr ctrl.Manager) error {
func (m *Manager) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
l := log.FromContext(ctx)
crd := &cozyv1alpha1.ApplicationDefinition{}
crd := &cozyv1alpha1.CozystackResourceDefinition{}
err := m.Get(ctx, types.NamespacedName{Name: req.Name}, crd)
if err != nil {
@@ -85,10 +85,10 @@ func (m *Manager) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result,
return ctrl.Result{}, err
}
return m.EnsureForAppDef(ctx, crd)
return m.EnsureForCRD(ctx, crd)
}
// EnsureForAppDef is the single entry-point used by the controller.
// EnsureForCRD is the single entry-point used by the controller.
// Add more ensure* calls here as you implement support for other resources:
//
// - ensureBreadcrumb (implemented)
@@ -99,7 +99,7 @@ func (m *Manager) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result,
// - ensureMarketplacePanel (implemented)
// - ensureSidebar (implemented)
// - ensureTableUriMapping (implemented)
func (m *Manager) EnsureForAppDef(ctx context.Context, crd *cozyv1alpha1.ApplicationDefinition) (reconcile.Result, error) {
func (m *Manager) EnsureForCRD(ctx context.Context, crd *cozyv1alpha1.CozystackResourceDefinition) (reconcile.Result, error) {
// Early return if crd.Spec.Dashboard is nil to prevent oscillation
if crd.Spec.Dashboard == nil {
return reconcile.Result{}, nil
@@ -148,7 +148,7 @@ func (m *Manager) InitializeStaticResources(ctx context.Context) error {
}
// addDashboardLabels adds standard dashboard management labels to a resource
func (m *Manager) addDashboardLabels(obj client.Object, crd *cozyv1alpha1.ApplicationDefinition, resourceType string) {
func (m *Manager) addDashboardLabels(obj client.Object, crd *cozyv1alpha1.CozystackResourceDefinition, resourceType string) {
labels := obj.GetLabels()
if labels == nil {
labels = make(map[string]string)
@@ -197,7 +197,7 @@ func (m *Manager) getStaticResourceSelector() client.MatchingLabels {
// CleanupOrphanedResources removes dashboard resources that are no longer needed
// This should be called after cache warming to ensure all current resources are known
func (m *Manager) CleanupOrphanedResources(ctx context.Context) error {
var crdList cozyv1alpha1.ApplicationDefinitionList
var crdList cozyv1alpha1.CozystackResourceDefinitionList
if err := m.List(ctx, &crdList, &client.ListOptions{}); err != nil {
return err
}
@@ -228,7 +228,7 @@ func (m *Manager) CleanupOrphanedResources(ctx context.Context) error {
}
// buildExpectedResourceSet creates a map of expected resource names by type
func (m *Manager) buildExpectedResourceSet(crds []cozyv1alpha1.ApplicationDefinition) map[string]map[string]bool {
func (m *Manager) buildExpectedResourceSet(crds []cozyv1alpha1.CozystackResourceDefinition) map[string]map[string]bool {
expected := make(map[string]map[string]bool)
// Initialize maps for each resource type

View File

@@ -16,7 +16,7 @@ import (
)
// ensureMarketplacePanel creates or updates a MarketplacePanel resource for the given CRD
func (m *Manager) ensureMarketplacePanel(ctx context.Context, crd *cozyv1alpha1.ApplicationDefinition) (reconcile.Result, error) {
func (m *Manager) ensureMarketplacePanel(ctx context.Context, crd *cozyv1alpha1.CozystackResourceDefinition) (reconcile.Result, error) {
logger := log.FromContext(ctx)
mp := &dashv1alpha1.MarketplacePanel{}

View File

@@ -26,14 +26,14 @@ import (
// - Tenant Info (/openapi-ui/{clusterName}/{namespace}/factory/info-details/info)
// - All other sections are built from CRDs where spec.dashboard != nil.
// - Categories are ordered strictly as:
// Marketplace, IaaS, PaaS, NaaS, <others A→Z>, Resources, Backups, Administration
// Marketplace, IaaS, PaaS, NaaS, <others A→Z>, Resources, Administration
// - Items within each category: sort by Weight (desc), then Label (A→Z).
func (m *Manager) ensureSidebar(ctx context.Context, crd *cozyv1alpha1.ApplicationDefinition) error {
func (m *Manager) ensureSidebar(ctx context.Context, crd *cozyv1alpha1.CozystackResourceDefinition) error {
// Build the full menu once.
// 1) Fetch all CRDs
var all []cozyv1alpha1.ApplicationDefinition
var crdList cozyv1alpha1.ApplicationDefinitionList
var all []cozyv1alpha1.CozystackResourceDefinition
var crdList cozyv1alpha1.CozystackResourceDefinitionList
if err := m.List(ctx, &crdList, &client.ListOptions{}); err != nil {
return err
}
@@ -111,17 +111,6 @@ func (m *Manager) ensureSidebar(ctx context.Context, crd *cozyv1alpha1.Applicati
keysAndTags["services"] = []any{"service-sidebar"}
keysAndTags["secrets"] = []any{"secret-sidebar"}
keysAndTags["ingresses"] = []any{"ingress-sidebar"}
// Add sidebar for v1/services type loadbalancer
keysAndTags["loadbalancer-services"] = []any{"external-ips-sidebar"}
// Add sidebar for backups.cozystack.io Plan resource
keysAndTags["plans"] = []any{"plan-sidebar"}
// Add sidebar for backups.cozystack.io BackupJob resource
keysAndTags["backupjobs"] = []any{"backupjob-sidebar"}
// Add sidebar for backups.cozystack.io Backup resource
keysAndTags["backups"] = []any{"backup-sidebar"}
// 3) Sort items within each category by Weight (desc), then Label (A→Z)
for cat := range categories {
@@ -134,10 +123,10 @@ func (m *Manager) ensureSidebar(ctx context.Context, crd *cozyv1alpha1.Applicati
}
// 4) Order categories strictly:
// Marketplace (hardcoded), IaaS, PaaS, NaaS, <others A→Z>, Resources, Backups (hardcoded), Administration (hardcoded)
// Marketplace (hardcoded), IaaS, PaaS, NaaS, <others A→Z>, Resources, Administration
orderedCats := orderCategoryLabels(categories)
// 5) Build menuItems (hardcode "Marketplace"; then dynamic categories; then hardcode "Backups" and "Administration")
// 5) Build menuItems (hardcode "Marketplace"; then dynamic categories; then hardcode "Administration")
menuItems := []any{
map[string]any{
"key": "marketplace",
@@ -153,8 +142,8 @@ func (m *Manager) ensureSidebar(ctx context.Context, crd *cozyv1alpha1.Applicati
}
for _, cat := range orderedCats {
// Skip "Marketplace", "Backups", and "Administration" here since they're hardcoded
if strings.EqualFold(cat, "Marketplace") || strings.EqualFold(cat, "Backups") || strings.EqualFold(cat, "Administration") {
// Skip "Marketplace" and "Administration" here since they're hardcoded
if strings.EqualFold(cat, "Marketplace") || strings.EqualFold(cat, "Administration") {
continue
}
children := []any{}
@@ -174,29 +163,6 @@ func (m *Manager) ensureSidebar(ctx context.Context, crd *cozyv1alpha1.Applicati
}
}
// Add hardcoded Backups section
menuItems = append(menuItems, map[string]any{
"key": "backups",
"label": "Backups",
"children": []any{
map[string]any{
"key": "plans",
"label": "Plans",
"link": "/openapi-ui/{clusterName}/{namespace}/api-table/backups.cozystack.io/v1alpha1/plans",
},
map[string]any{
"key": "backupjobs",
"label": "BackupJobs",
"link": "/openapi-ui/{clusterName}/{namespace}/api-table/backups.cozystack.io/v1alpha1/backupjobs",
},
map[string]any{
"key": "backups",
"label": "Backups",
"link": "/openapi-ui/{clusterName}/{namespace}/api-table/backups.cozystack.io/v1alpha1/backups",
},
},
})
// Add hardcoded Administration section
menuItems = append(menuItems, map[string]any{
"key": "administration",
@@ -212,11 +178,6 @@ func (m *Manager) ensureSidebar(ctx context.Context, crd *cozyv1alpha1.Applicati
"label": "Modules",
"link": "/openapi-ui/{clusterName}/{namespace}/api-table/core.cozystack.io/v1alpha1/tenantmodules",
},
map[string]any{
"key": "loadbalancer-services",
"label": "External IPs",
"link": "/openapi-ui/{clusterName}/{namespace}/factory/external-ips",
},
map[string]any{
"key": "tenants",
"label": "Tenants",
@@ -240,10 +201,6 @@ func (m *Manager) ensureSidebar(ctx context.Context, crd *cozyv1alpha1.Applicati
"stock-project-factory-kube-service-details",
"stock-project-factory-kube-secret-details",
"stock-project-factory-kube-ingress-details",
"stock-project-factory-plan-details",
"stock-project-factory-backupjob-details",
"stock-project-factory-backup-details",
"stock-project-factory-external-ips",
"stock-project-api-form",
"stock-project-api-table",
"stock-project-builtin-form",
@@ -271,7 +228,7 @@ func (m *Manager) ensureSidebar(ctx context.Context, crd *cozyv1alpha1.Applicati
// upsertMultipleSidebars creates/updates several Sidebar resources with the same menu spec.
func (m *Manager) upsertMultipleSidebars(
ctx context.Context,
crd *cozyv1alpha1.ApplicationDefinition,
crd *cozyv1alpha1.CozystackResourceDefinition,
ids []string,
keysAndTags map[string]any,
menuItems []any,
@@ -378,7 +335,7 @@ func orderCategoryLabels[T any](cats map[string][]T) []string {
}
// safeCategory returns spec.dashboard.category or "Resources" if not set.
func safeCategory(def *cozyv1alpha1.ApplicationDefinition) string {
func safeCategory(def *cozyv1alpha1.CozystackResourceDefinition) string {
if def == nil || def.Spec.Dashboard == nil {
return "Resources"
}

View File

@@ -1023,27 +1023,6 @@ func createReadyColumn() map[string]any {
}
}
// createApplicationRefColumn creates a column that displays
// applicationRef in the format "Kind.apiGroup/name"
func createApplicationRefColumn(name string) map[string]any {
return map[string]any{
"name": name,
"type": "factory",
"customProps": map[string]any{
"disableEventBubbling": true,
"items": []any{
map[string]any{
"type": "parsedText",
"data": map[string]any{
"id": "application-ref-text",
"text": "{reqsJsonPath[0]['.spec.applicationRef.kind']['-']}.{reqsJsonPath[0]['.spec.applicationRef.apiGroup']['-']}/{reqsJsonPath[0]['.spec.applicationRef.name']['-']}",
},
},
},
},
}
}
// createConverterBytesColumn creates a column with ConverterBytes component
func createConverterBytesColumn(name, jsonPath string) map[string]any {
return map[string]any{

View File

@@ -134,7 +134,7 @@ func CreateAllCustomColumnsOverrides() []*dashboardv1alpha1.CustomColumnsOverrid
createCustomColumnsOverride("factory-details-v1.services", []any{
createCustomColumnWithSpecificColor("Name", "Service", "", "/openapi-ui/{2}/{reqsJsonPath[0]['.metadata.namespace']['-']}/factory/kube-service-details/{reqsJsonPath[0]['.metadata.name']['-']}"),
createStringColumn("ClusterIP", ".spec.clusterIP"),
createStringColumn("LoadbalancerIP", ".status.loadBalancer.ingress[0].ip"),
createStringColumn("LoadbalancerIP", ".spec.loadBalancerIP"),
createTimestampColumn("Created", ".metadata.creationTimestamp"),
}),
@@ -189,14 +189,6 @@ func CreateAllCustomColumnsOverrides() []*dashboardv1alpha1.CustomColumnsOverrid
createStringColumn("Values", "_flatMapData_Value"),
}),
// Factory resource quotas
createCustomColumnsOverride("factory-resource-quotas", []any{
createFlatMapColumn("Data", ".spec.hard"),
createStringColumn("Resource", "_flatMapData_Key"),
createStringColumn("Hard", "_flatMapData_Value"),
createStringColumn("Used", ".status.used[_flatMapData_Key]"),
}),
// Factory ingress details rules
createCustomColumnsOverride("factory-kube-ingress-details-rules", []any{
createStringColumn("Host", ".host"),
@@ -379,30 +371,6 @@ func CreateAllCustomColumnsOverrides() []*dashboardv1alpha1.CustomColumnsOverrid
createCustomColumnWithJsonPath("Name", ".metadata.name", "TenantNamespace", "", "/openapi-ui/{2}/{reqsJsonPath[0]['.metadata.name']['-']}/factory/marketplace"),
createTimestampColumn("Created", ".metadata.creationTimestamp"),
}),
// Stock namespace backups cozystack io v1alpha1 plans
createCustomColumnsOverride("stock-namespace-/backups.cozystack.io/v1alpha1/plans", []any{
createCustomColumnWithJsonPath("Name", ".metadata.name", "Plan", "", "/openapi-ui/{2}/{reqsJsonPath[0]['.metadata.namespace']['-']}/factory/plan-details/{reqsJsonPath[0]['.metadata.name']['-']}"),
createApplicationRefColumn("Application"),
createTimestampColumn("Created", ".metadata.creationTimestamp"),
}),
// Stock namespace backups cozystack io v1alpha1 backupjobs
createCustomColumnsOverride("stock-namespace-/backups.cozystack.io/v1alpha1/backupjobs", []any{
createCustomColumnWithJsonPath("Name", ".metadata.name", "BackupJob", "", "/openapi-ui/{2}/{reqsJsonPath[0]['.metadata.namespace']['-']}/factory/backupjob-details/{reqsJsonPath[0]['.metadata.name']['-']}"),
createStringColumn("Phase", ".status.phase"),
createApplicationRefColumn("Application"),
createTimestampColumn("Created", ".metadata.creationTimestamp"),
}),
// Stock namespace backups cozystack io v1alpha1 backups
createCustomColumnsOverride("stock-namespace-/backups.cozystack.io/v1alpha1/backups", []any{
createCustomColumnWithJsonPath("Name", ".metadata.name", "Backup", "", "/openapi-ui/{2}/{reqsJsonPath[0]['.metadata.namespace']['-']}/factory/backup-details/{reqsJsonPath[0]['.metadata.name']['-']}"),
createStringColumn("Phase", ".status.phase"),
createApplicationRefColumn("Application"),
createTimestampColumn("Taken At", ".spec.takenAt"),
createTimestampColumn("Created", ".metadata.creationTimestamp"),
}),
}
}
@@ -1152,7 +1120,7 @@ func CreateAllFactories() []*dashboardv1alpha1.Factory {
"clusterNamePartOfUrl": "{2}",
"customizationId": "factory-node-details-/v1/pods",
"fetchUrl": "/api/clusters/{2}/k8s/api/v1/namespaces/{3}/pods",
"labelSelectorFull": map[string]any{
"labelsSelectorFull": map[string]any{
"pathToLabels": ".spec.selector",
"reqIndex": 0,
},
@@ -1465,474 +1433,6 @@ func CreateAllFactories() []*dashboardv1alpha1.Factory {
}
workloadmonitorSpec := createFactorySpec("workloadmonitor-details", []any{"workloadmonitor-sidebar"}, []any{"/api/clusters/{2}/k8s/apis/cozystack.io/v1alpha1/namespaces/{3}/workloadmonitors/{6}"}, workloadmonitorHeader, workloadmonitorTabs)
// Plan details factory using unified approach
planConfig := UnifiedResourceConfig{
Name: "plan-details",
ResourceType: "factory",
Kind: "Plan",
Plural: "plans",
Title: "plan",
}
planTabs := []any{
map[string]any{
"key": "details",
"label": "Details",
"children": []any{
contentCard("details-card", map[string]any{
"marginBottom": "24px",
}, []any{
antdText("details-title", true, "Plan details", map[string]any{
"fontSize": 20,
"marginBottom": "12px",
}),
spacer("details-spacer", 16),
antdRow("details-grid", []any{48, 12}, []any{
antdCol("col-left", 12, []any{
antdFlexVertical("col-left-stack", 24, []any{
antdFlexVertical("meta-name-block", 4, []any{
antdText("meta-name-label", true, "Name", nil),
parsedText("meta-name-value", "{reqsJsonPath[0]['.metadata.name']['-']}", nil),
}),
antdFlexVertical("meta-namespace-block", 8, []any{
antdText("meta-namespace-label", true, "Namespace", nil),
antdFlex("header-row", 6, []any{
map[string]any{
"type": "antdText",
"data": map[string]any{
"id": "header-badge",
"text": "NS",
"title": "namespace",
"style": map[string]any{
"backgroundColor": "#a25792ff",
"borderRadius": "20px",
"color": "#fff",
"display": "inline-block",
"fontFamily": "RedHatDisplay, Overpass, overpass, helvetica, arial, sans-serif",
"fontSize": "15px",
"fontWeight": 400,
"lineHeight": "24px",
"minWidth": 24,
"padding": "0 9px",
"textAlign": "center",
"whiteSpace": "nowrap",
},
},
},
map[string]any{
"type": "antdLink",
"data": map[string]any{
"id": "namespace-link",
"text": "{reqsJsonPath[0]['.metadata.namespace']['-']}",
"href": "/openapi-ui/{2}/{reqsJsonPath[0]['.metadata.namespace']['-']}/factory/marketplace",
},
},
}),
}),
antdFlexVertical("meta-created-block", 4, []any{
antdText("time-label", true, "Created", nil),
antdFlex("time-block", 6, []any{
map[string]any{
"type": "antdText",
"data": map[string]any{
"id": "time-icon",
"text": "🌐",
},
},
map[string]any{
"type": "parsedText",
"data": map[string]any{
"formatter": "timestamp",
"id": "time-value",
"text": "{reqsJsonPath[0]['.metadata.creationTimestamp']['-']}",
},
},
}),
}),
}),
}),
antdCol("col-right", 12, []any{
antdFlexVertical("col-right-stack", 24, []any{
antdFlexVertical("spec-application-ref-block", 4, []any{
antdText("application-ref-label", true, "Application", nil),
parsedText("application-ref-value", "{reqsJsonPath[0]['.spec.applicationRef.kind']['-']}.{reqsJsonPath[0]['.spec.applicationRef.apiGroup']['-']}/{reqsJsonPath[0]['.spec.applicationRef.name']['-']}", nil),
}),
antdFlexVertical("spec-storage-ref-block", 4, []any{
antdText("storage-ref-label", true, "Storage", nil),
parsedText("storage-ref-value", "{reqsJsonPath[0]['.spec.storageRef.kind']['-']}.{reqsJsonPath[0]['.spec.storageRef.apiGroup']['-']}/{reqsJsonPath[0]['.spec.storageRef.name']['-']}", nil),
}),
antdFlexVertical("spec-strategy-ref-block", 4, []any{
antdText("strategy-ref-label", true, "Strategy", nil),
parsedText("strategy-ref-value", "{reqsJsonPath[0]['.spec.strategyRef.kind']['-']}.{reqsJsonPath[0]['.spec.strategyRef.apiGroup']['-']}/{reqsJsonPath[0]['.spec.strategyRef.name']['-']}", nil),
}),
antdFlexVertical("spec-schedule-type-block", 4, []any{
antdText("schedule-type-label", true, "Schedule Type", nil),
parsedText("schedule-type-value", "{reqsJsonPath[0]['.spec.schedule.type']['-']}", nil),
}),
antdFlexVertical("spec-schedule-cron-block", 4, []any{
antdText("schedule-cron-label", true, "Schedule Cron", nil),
parsedText("schedule-cron-value", "{reqsJsonPath[0]['.spec.schedule.cron']['-']}", nil),
}),
}),
}),
}),
}),
},
},
}
planSpec := createUnifiedFactory(planConfig, planTabs, []any{"/api/clusters/{2}/k8s/apis/backups.cozystack.io/v1alpha1/namespaces/{3}/plans/{6}"})
// BackupJob details factory using unified approach
backupJobConfig := UnifiedResourceConfig{
Name: "backupjob-details",
ResourceType: "factory",
Kind: "BackupJob",
Plural: "backupjobs",
Title: "backupjob",
}
backupJobTabs := []any{
map[string]any{
"key": "details",
"label": "Details",
"children": []any{
contentCard("details-card", map[string]any{
"marginBottom": "24px",
}, []any{
antdText("details-title", true, "BackupJob details", map[string]any{
"fontSize": 20,
"marginBottom": "12px",
}),
spacer("details-spacer", 16),
antdRow("details-grid", []any{48, 12}, []any{
antdCol("col-left", 12, []any{
antdFlexVertical("col-left-stack", 24, []any{
antdFlexVertical("meta-name-block", 4, []any{
antdText("meta-name-label", true, "Name", nil),
parsedText("meta-name-value", "{reqsJsonPath[0]['.metadata.name']['-']}", nil),
}),
antdFlexVertical("meta-namespace-block", 8, []any{
antdText("meta-namespace-label", true, "Namespace", nil),
antdFlex("header-row", 6, []any{
map[string]any{
"type": "antdText",
"data": map[string]any{
"id": "header-badge",
"text": "NS",
"title": "namespace",
"style": map[string]any{
"backgroundColor": "#a25792ff",
"borderRadius": "20px",
"color": "#fff",
"display": "inline-block",
"fontFamily": "RedHatDisplay, Overpass, overpass, helvetica, arial, sans-serif",
"fontSize": "15px",
"fontWeight": 400,
"lineHeight": "24px",
"minWidth": 24,
"padding": "0 9px",
"textAlign": "center",
"whiteSpace": "nowrap",
},
},
},
map[string]any{
"type": "antdLink",
"data": map[string]any{
"id": "namespace-link",
"text": "{reqsJsonPath[0]['.metadata.namespace']['-']}",
"href": "/openapi-ui/{2}/{reqsJsonPath[0]['.metadata.namespace']['-']}/factory/marketplace",
},
},
}),
}),
antdFlexVertical("meta-created-block", 4, []any{
antdText("time-label", true, "Created", nil),
antdFlex("time-block", 6, []any{
map[string]any{
"type": "antdText",
"data": map[string]any{
"id": "time-icon",
"text": "🌐",
},
},
map[string]any{
"type": "parsedText",
"data": map[string]any{
"formatter": "timestamp",
"id": "time-value",
"text": "{reqsJsonPath[0]['.metadata.creationTimestamp']['-']}",
},
},
}),
}),
}),
}),
antdCol("col-right", 12, []any{
antdFlexVertical("col-right-stack", 24, []any{
antdFlexVertical("status-phase-block", 4, []any{
antdText("phase-label", true, "Phase", nil),
parsedText("phase-value", "{reqsJsonPath[0]['.status.phase']['-']}", nil),
}),
antdFlexVertical("spec-plan-ref-block", 4, []any{
antdText("plan-ref-label", true, "Plan Ref", nil),
parsedText("plan-ref-value", "{reqsJsonPath[0]['.spec.planRef.name']['-']}", nil),
}),
antdFlexVertical("spec-application-ref-block", 4, []any{
antdText("application-ref-label", true, "Application", nil),
parsedText("application-ref-value", "{reqsJsonPath[0]['.spec.applicationRef.kind']['-']}.{reqsJsonPath[0]['.spec.applicationRef.apiGroup']['-']}/{reqsJsonPath[0]['.spec.applicationRef.name']['-']}", nil),
}),
antdFlexVertical("spec-storage-ref-block", 4, []any{
antdText("storage-ref-label", true, "Storage", nil),
parsedText("storage-ref-value", "{reqsJsonPath[0]['.spec.storageRef.kind']['-']}.{reqsJsonPath[0]['.spec.storageRef.apiGroup']['-']}/{reqsJsonPath[0]['.spec.storageRef.name']['-']}", nil),
}),
antdFlexVertical("spec-strategy-ref-block", 4, []any{
antdText("strategy-ref-label", true, "Strategy", nil),
parsedText("strategy-ref-value", "{reqsJsonPath[0]['.spec.strategyRef.name']['-']}", nil),
}),
antdFlexVertical("status-backup-ref-block", 4, []any{
antdText("backup-ref-label", true, "Backup Ref", nil),
parsedText("backup-ref-value", "{reqsJsonPath[0]['.status.backupRef.name']['-']}", nil),
}),
antdFlexVertical("status-started-at-block", 4, []any{
antdText("started-at-label", true, "Started At", nil),
antdFlex("time-block", 6, []any{
map[string]any{
"type": "antdText",
"data": map[string]any{
"id": "time-icon",
"text": "🌐",
},
},
map[string]any{
"type": "parsedText",
"data": map[string]any{
"formatter": "timestamp",
"id": "time-value",
"text": "{reqsJsonPath[0]['.status.startedAt']['-']}",
},
},
}),
}),
antdFlexVertical("status-completed-at-block", 4, []any{
antdText("completed-at-label", true, "Completed At", nil),
antdFlex("time-block", 6, []any{
map[string]any{
"type": "antdText",
"data": map[string]any{
"id": "time-icon",
"text": "🌐",
},
},
map[string]any{
"type": "parsedText",
"data": map[string]any{
"formatter": "timestamp",
"id": "time-value",
"text": "{reqsJsonPath[0]['.status.completedAt']['-']}",
},
},
}),
}),
antdFlexVertical("status-message-block", 4, []any{
antdText("message-label", true, "Message", nil),
parsedText("message-value", "{reqsJsonPath[0]['.status.message']['-']}", nil),
}),
}),
}),
}),
}),
},
},
}
backupJobSpec := createUnifiedFactory(backupJobConfig, backupJobTabs, []any{"/api/clusters/{2}/k8s/apis/backups.cozystack.io/v1alpha1/namespaces/{3}/backupjobs/{6}"})
// Backup details factory using unified approach
backupConfig := UnifiedResourceConfig{
Name: "backup-details",
ResourceType: "factory",
Kind: "Backup",
Plural: "backups",
Title: "backup",
}
backupTabs := []any{
map[string]any{
"key": "details",
"label": "Details",
"children": []any{
contentCard("details-card", map[string]any{
"marginBottom": "24px",
}, []any{
antdText("details-title", true, "Backup details", map[string]any{
"fontSize": 20,
"marginBottom": "12px",
}),
spacer("details-spacer", 16),
antdRow("details-grid", []any{48, 12}, []any{
antdCol("col-left", 12, []any{
antdFlexVertical("col-left-stack", 24, []any{
antdFlexVertical("meta-name-block", 4, []any{
antdText("meta-name-label", true, "Name", nil),
parsedText("meta-name-value", "{reqsJsonPath[0]['.metadata.name']['-']}", nil),
}),
antdFlexVertical("meta-namespace-block", 8, []any{
antdText("meta-namespace-label", true, "Namespace", nil),
antdFlex("header-row", 6, []any{
map[string]any{
"type": "antdText",
"data": map[string]any{
"id": "header-badge",
"text": "NS",
"title": "namespace",
"style": map[string]any{
"backgroundColor": "#a25792ff",
"borderRadius": "20px",
"color": "#fff",
"display": "inline-block",
"fontFamily": "RedHatDisplay, Overpass, overpass, helvetica, arial, sans-serif",
"fontSize": "15px",
"fontWeight": 400,
"lineHeight": "24px",
"minWidth": 24,
"padding": "0 9px",
"textAlign": "center",
"whiteSpace": "nowrap",
},
},
},
map[string]any{
"type": "antdLink",
"data": map[string]any{
"id": "namespace-link",
"text": "{reqsJsonPath[0]['.metadata.namespace']['-']}",
"href": "/openapi-ui/{2}/{reqsJsonPath[0]['.metadata.namespace']['-']}/factory/marketplace",
},
},
}),
}),
antdFlexVertical("meta-created-block", 4, []any{
antdText("time-label", true, "Created", nil),
antdFlex("time-block", 6, []any{
map[string]any{
"type": "antdText",
"data": map[string]any{
"id": "time-icon",
"text": "🌐",
},
},
map[string]any{
"type": "parsedText",
"data": map[string]any{
"formatter": "timestamp",
"id": "time-value",
"text": "{reqsJsonPath[0]['.metadata.creationTimestamp']['-']}",
},
},
}),
}),
}),
}),
antdCol("col-right", 12, []any{
antdFlexVertical("col-right-stack", 24, []any{
antdFlexVertical("status-phase-block", 4, []any{
antdText("phase-label", true, "Phase", nil),
parsedText("phase-value", "{reqsJsonPath[0]['.status.phase']['-']}", nil),
}),
antdFlexVertical("spec-taken-at-block", 4, []any{
antdText("taken-at-label", true, "Taken At", nil),
antdFlex("time-block", 6, []any{
map[string]any{
"type": "antdText",
"data": map[string]any{
"id": "time-icon",
"text": "🌐",
},
},
map[string]any{
"type": "parsedText",
"data": map[string]any{
"formatter": "timestamp",
"id": "time-value",
"text": "{reqsJsonPath[0]['.spec.takenAt']['-']}",
},
},
}),
}),
antdFlexVertical("spec-plan-ref-block", 4, []any{
antdText("plan-ref-label", true, "Plan Ref", nil),
parsedText("plan-ref-value", "{reqsJsonPath[0]['.spec.planRef.name']['-']}", nil),
}),
antdFlexVertical("spec-application-ref-block", 4, []any{
antdText("application-ref-label", true, "Application", nil),
parsedText("application-ref-value", "{reqsJsonPath[0]['.spec.applicationRef.kind']['-']}.{reqsJsonPath[0]['.spec.applicationRef.apiGroup']['-']}/{reqsJsonPath[0]['.spec.applicationRef.name']['-']}", nil),
}),
antdFlexVertical("spec-storage-ref-block", 4, []any{
antdText("storage-ref-label", true, "Storage", nil),
parsedText("storage-ref-value", "{reqsJsonPath[0]['.spec.storageRef.kind']['-']}.{reqsJsonPath[0]['.spec.storageRef.apiGroup']['-']}/{reqsJsonPath[0]['.spec.storageRef.name']['-']}", nil),
}),
antdFlexVertical("spec-strategy-ref-block", 4, []any{
antdText("strategy-ref-label", true, "Strategy", nil),
parsedText("strategy-ref-value", "{reqsJsonPath[0]['.spec.strategyRef.kind']['-']}.{reqsJsonPath[0]['.spec.strategyRef.apiGroup']['-']}/{reqsJsonPath[0]['.spec.strategyRef.name']['-']}", nil),
}),
antdFlexVertical("status-artifact-uri-block", 4, []any{
antdText("artifact-uri-label", true, "Artifact URI", nil),
parsedText("artifact-uri-value", "{reqsJsonPath[0]['.status.artifact.uri']['-']}", nil),
}),
antdFlexVertical("status-artifact-size-block", 4, []any{
antdText("artifact-size-label", true, "Artifact Size", nil),
parsedText("artifact-size-value", "{reqsJsonPath[0]['.status.artifact.sizeBytes']['-']}", nil),
}),
antdFlexVertical("status-artifact-checksum-block", 4, []any{
antdText("artifact-checksum-label", true, "Artifact Checksum", nil),
parsedText("artifact-checksum-value", "{reqsJsonPath[0]['.status.artifact.checksum']['-']}", nil),
}),
}),
}),
}),
}),
},
},
}
backupSpec := createUnifiedFactory(backupConfig, backupTabs, []any{"/api/clusters/{2}/k8s/apis/backups.cozystack.io/v1alpha1/namespaces/{3}/backups/{6}"})
// External IPs factory (filtered services)
externalIPsTabs := []any{
map[string]any{
"key": "services",
"label": "Services",
"children": []any{
map[string]any{
"type": "EnrichedTable",
"data": map[string]any{
"id": "external-ips-table",
"fetchUrl": "/api/clusters/{2}/k8s/api/v1/namespaces/{3}/services",
"clusterNamePartOfUrl": "{2}",
"baseprefix": "/openapi-ui",
"customizationId": "factory-details-v1.services",
"pathToItems": []any{"items"},
"fieldSelector": map[string]any{
"spec.type": "LoadBalancer",
},
},
},
},
},
}
externalIPsSpec := map[string]any{
"key": "external-ips",
"sidebarTags": []any{"external-ips-sidebar"},
"withScrollableMainContentCard": true,
"urlsToFetch": []any{},
"data": []any{
map[string]any{
"type": "antdTabs",
"data": map[string]any{
"id": "tabs-root",
"defaultActiveKey": "services",
"items": externalIPsTabs,
},
},
},
}
return []*dashboardv1alpha1.Factory{
createFactory("marketplace", marketplaceSpec),
createFactory("namespace-details", namespaceSpec),
@@ -1942,10 +1442,6 @@ func CreateAllFactories() []*dashboardv1alpha1.Factory {
createFactory("kube-service-details", serviceSpec),
createFactory("kube-ingress-details", ingressSpec),
createFactory("workloadmonitor-details", workloadmonitorSpec),
createFactory("plan-details", planSpec),
createFactory("backupjob-details", backupJobSpec),
createFactory("backup-details", backupSpec),
createFactory("external-ips", externalIPsSpec),
}
}

View File

@@ -7,7 +7,7 @@ import (
)
// ensureTableUriMapping creates or updates a TableUriMapping resource for the given CRD
func (m *Manager) ensureTableUriMapping(ctx context.Context, crd *cozyv1alpha1.ApplicationDefinition) error {
func (m *Manager) ensureTableUriMapping(ctx context.Context, crd *cozyv1alpha1.CozystackResourceDefinition) error {
// Links are fully managed by the CustomColumnsOverride.
return nil
}

View File

@@ -102,22 +102,6 @@ func antdFlex(id string, gap float64, children []any) map[string]any {
}
}
func antdFlexSpaceBetween(id string, children []any) map[string]any {
if id == "" {
id = generateContainerID("auto", "flex")
}
return map[string]any{
"type": "antdFlex",
"data": map[string]any{
"id": id,
"align": "center",
"justify": "space-between",
},
"children": children,
}
}
func antdFlexVertical(id string, gap float64, children []any) map[string]any {
// Auto-generate ID if not provided
if id == "" {

View File

@@ -237,16 +237,9 @@ func createUnifiedFactory(config UnifiedResourceConfig, tabs []any, urlsToFetch
"lineHeight": "24px",
})
header := antdFlexSpaceBetween(generateContainerID("header", "row"), []any{
antdFlex(generateContainerID("header", "title-text"), float64(6), []any{
badge,
nameText,
}),
antdLink(generateLinkID("header", "edit"),
"Edit",
fmt.Sprintf("/openapi-ui/{2}/{3}/forms/apis/{reqsJsonPath[0]['.apiVersion']['-']}/%s/{reqsJsonPath[0]['.metadata.name']['-']}",
config.Plural),
),
header := antdFlex(generateContainerID("header", "row"), float64(6), []any{
badge,
nameText,
})
// Add marginBottom style to header

View File

@@ -1,333 +0,0 @@
package fluxplunger
import (
"context"
"fmt"
"sort"
"strconv"
"strings"
helmv2 "github.com/fluxcd/helm-controller/api/v2"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/predicate"
)
const (
annotationLastProcessedVersion = "flux-plunger.cozystack.io/last-processed-version"
errorMessageNoDeployedReleases = "has no deployed releases"
fieldManager = "flux-client-side-apply"
)
// FluxPlunger watches HelmRelease resources and fixes "has no deployed releases" errors
type FluxPlunger struct {
client.Client
}
// +kubebuilder:rbac:groups=helm.toolkit.fluxcd.io,resources=helmreleases,verbs=get;list;watch;update;patch
// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;delete
// Reconcile handles HelmRelease resources with "has no deployed releases" error
func (r *FluxPlunger) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logger := log.FromContext(ctx)
// Get the HelmRelease
hr := &helmv2.HelmRelease{}
if err := r.Get(ctx, req.NamespacedName, hr); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
// Check if HelmRelease is suspended
if hr.Spec.Suspend {
logger.Info("HelmRelease is suspended, checking if we need to unsuspend")
// Get the list of Helm release secrets
secrets, err := r.listHelmReleaseSecrets(ctx, hr.Namespace, hr.Name)
if err != nil {
logger.Error(err, "Failed to list Helm release secrets")
return ctrl.Result{}, err
}
// If no secrets, treat latest version as 0
latestVersion := 0
if len(secrets) > 0 {
latestSecret := getLatestSecret(secrets)
latestVersion = extractVersionNumber(latestSecret.Name)
} else {
logger.Info("No Helm release secrets found while suspended, treating as version 0")
}
// Check if version is previous to just processed (latestVersion+1 == processedVersion)
// This is the ONLY condition when we unsuspend
shouldUnsuspend := false
if hr.Annotations != nil {
if processedVersionStr, exists := hr.Annotations[annotationLastProcessedVersion]; exists {
processedVersion, err := strconv.Atoi(processedVersionStr)
if err == nil && latestVersion+1 == processedVersion {
shouldUnsuspend = true
}
}
}
if shouldUnsuspend {
// Unsuspend the HelmRelease
logger.Info("Secret was already deleted in previous run, removing suspend", "latest", latestVersion, "processed", latestVersion+1)
if err := r.unsuspendHelmRelease(ctx, hr); err != nil {
logger.Info("Could not unsuspend HelmRelease, will retry on next reconcile", "error", err.Error())
return ctrl.Result{}, nil
}
return ctrl.Result{}, nil
}
// If not previous to processed, skip all actions
logger.Info("HelmRelease is suspended by external process, skipping", "latest", latestVersion)
return ctrl.Result{}, nil
}
// Check if HelmRelease has the specific error
if !hasNoDeployedReleasesError(hr) {
logger.V(1).Info("HelmRelease does not have 'has no deployed releases' error, skipping")
return ctrl.Result{}, nil
}
logger.Info("Detected HelmRelease with 'has no deployed releases' error")
// Get the list of Helm release secrets
secrets, err := r.listHelmReleaseSecrets(ctx, hr.Namespace, hr.Name)
if err != nil {
logger.Error(err, "Failed to list Helm release secrets")
return ctrl.Result{}, err
}
if len(secrets) == 0 {
logger.Info("No Helm release secrets found, skipping")
return ctrl.Result{}, nil
}
// Find the latest version
latestSecret := getLatestSecret(secrets)
latestVersion := extractVersionNumber(latestSecret.Name)
logger.Info("Found latest Helm release version", "version", latestVersion, "secret", latestSecret.Name)
// Check if we just processed the next version (current + 1 == processed)
if hr.Annotations != nil {
if processedVersionStr, exists := hr.Annotations[annotationLastProcessedVersion]; exists {
processedVersion, err := strconv.Atoi(processedVersionStr)
if err == nil {
if latestVersion+1 == processedVersion {
logger.Info("Already processed, secret was deleted previously", "latest", latestVersion, "processed", processedVersion)
return ctrl.Result{}, nil
}
} else {
// Failed to parse annotation, treat as if annotation doesn't exist
logger.Info("Failed to parse annotation, will process", "annotation", processedVersionStr, "error", err)
}
}
}
// Suspend the HelmRelease
logger.Info("Suspending HelmRelease")
if err := r.suspendHelmRelease(ctx, hr); err != nil {
// Optimistic lock conflicts are normal - FluxCD also updates HelmRelease
// Don't return error, just log and let controller-runtime requeue on next update
logger.Info("Could not suspend HelmRelease, will retry on next reconcile", "error", err.Error())
return ctrl.Result{}, nil
}
// Delete the latest secret
logger.Info("Deleting latest Helm release secret", "secret", latestSecret.Name)
if err := r.Delete(ctx, &latestSecret); err != nil {
logger.Error(err, "Failed to delete Helm release secret")
return ctrl.Result{}, err
}
// Update annotation with processed version
logger.Info("Updating annotation with processed version", "version", latestVersion)
if err := r.updateProcessedVersionAnnotation(ctx, hr, latestVersion); err != nil {
logger.Info("Could not update annotation, will retry on next reconcile", "error", err.Error())
return ctrl.Result{}, nil
}
// Unsuspend the HelmRelease
logger.Info("Unsuspending HelmRelease")
if err := r.unsuspendHelmRelease(ctx, hr); err != nil {
logger.Info("Could not unsuspend HelmRelease, will retry on next reconcile", "error", err.Error())
return ctrl.Result{}, nil
}
logger.Info("Successfully processed HelmRelease", "version", latestVersion)
return ctrl.Result{}, nil
}
// hasNoDeployedReleasesError checks if the HelmRelease has the specific error
func hasNoDeployedReleasesError(hr *helmv2.HelmRelease) bool {
for _, condition := range hr.Status.Conditions {
if condition.Type == "Ready" && condition.Status == metav1.ConditionFalse {
if strings.Contains(condition.Message, errorMessageNoDeployedReleases) {
return true
}
}
}
return false
}
// listHelmReleaseSecrets lists all Helm release secrets for a specific release
func (r *FluxPlunger) listHelmReleaseSecrets(ctx context.Context, namespace, releaseName string) ([]corev1.Secret, error) {
secretList := &corev1.SecretList{}
listOpts := []client.ListOption{
client.InNamespace(namespace),
client.MatchingLabels{
"name": releaseName,
"owner": "helm",
},
}
if err := r.List(ctx, secretList, listOpts...); err != nil {
return nil, fmt.Errorf("failed to list secrets: %w", err)
}
// Filter only helm.sh/release.v1 secrets
filtered := []corev1.Secret{}
for _, secret := range secretList.Items {
if secret.Type == "helm.sh/release.v1" {
filtered = append(filtered, secret)
}
}
return filtered, nil
}
// getLatestSecret returns the secret with the highest version number
func getLatestSecret(secrets []corev1.Secret) corev1.Secret {
if len(secrets) == 1 {
return secrets[0]
}
sort.Slice(secrets, func(i, j int) bool {
vi := extractVersionNumber(secrets[i].Name)
vj := extractVersionNumber(secrets[j].Name)
return vi > vj
})
return secrets[0]
}
// extractVersionFromSecretName extracts version string from secret name
// e.g., "sh.helm.release.v1.cozystack-resource-definitions.v10" -> "v10"
func extractVersionFromSecretName(secretName string) string {
parts := strings.Split(secretName, ".")
if len(parts) > 0 {
return parts[len(parts)-1]
}
return ""
}
// extractVersionNumber extracts numeric version from secret name
// e.g., "sh.helm.release.v1.cozystack-resource-definitions.v10" -> 10
func extractVersionNumber(secretName string) int {
version := extractVersionFromSecretName(secretName)
// Remove 'v' prefix if present
version = strings.TrimPrefix(version, "v")
num, err := strconv.Atoi(version)
if err != nil {
return 0
}
return num
}
// suspendHelmRelease sets suspend to true on the HelmRelease
func (r *FluxPlunger) suspendHelmRelease(ctx context.Context, hr *helmv2.HelmRelease) error {
// Re-fetch the HelmRelease to get the latest state
key := types.NamespacedName{Namespace: hr.Namespace, Name: hr.Name}
latestHR := &helmv2.HelmRelease{}
if err := r.Get(ctx, key, latestHR); err != nil {
return fmt.Errorf("failed to get latest HelmRelease: %w", err)
}
// If already suspended, nothing to do
if latestHR.Spec.Suspend {
return nil
}
patch := client.MergeFromWithOptions(latestHR.DeepCopy(), client.MergeFromWithOptimisticLock{})
latestHR.Spec.Suspend = true
return r.Patch(ctx, latestHR, patch, client.FieldOwner(fieldManager))
}
// unsuspendHelmRelease sets suspend to false on the HelmRelease
func (r *FluxPlunger) unsuspendHelmRelease(ctx context.Context, hr *helmv2.HelmRelease) error {
// Re-fetch the HelmRelease to get the latest state
key := types.NamespacedName{Namespace: hr.Namespace, Name: hr.Name}
latestHR := &helmv2.HelmRelease{}
if err := r.Get(ctx, key, latestHR); err != nil {
return fmt.Errorf("failed to get latest HelmRelease: %w", err)
}
// If already unsuspended, nothing to do
if !latestHR.Spec.Suspend {
return nil
}
patch := client.MergeFromWithOptions(latestHR.DeepCopy(), client.MergeFromWithOptimisticLock{})
latestHR.Spec.Suspend = false
return r.Patch(ctx, latestHR, patch, client.FieldOwner(fieldManager))
}
// updateProcessedVersionAnnotation updates the annotation with the processed version
func (r *FluxPlunger) updateProcessedVersionAnnotation(ctx context.Context, hr *helmv2.HelmRelease, version int) error {
// Re-fetch the HelmRelease to get the latest state
key := types.NamespacedName{Namespace: hr.Namespace, Name: hr.Name}
latestHR := &helmv2.HelmRelease{}
if err := r.Get(ctx, key, latestHR); err != nil {
return fmt.Errorf("failed to get latest HelmRelease: %w", err)
}
patch := client.MergeFromWithOptions(latestHR.DeepCopy(), client.MergeFromWithOptimisticLock{})
if latestHR.Annotations == nil {
latestHR.Annotations = make(map[string]string)
}
latestHR.Annotations[annotationLastProcessedVersion] = strconv.Itoa(version)
return r.Patch(ctx, latestHR, patch, client.FieldOwner(fieldManager))
}
// SetupWithManager sets up the controller with the Manager
func (r *FluxPlunger) SetupWithManager(mgr ctrl.Manager) error {
// Watch HelmReleases that either:
// 1. Have the specific error, OR
// 2. Are suspended with our annotation (to handle crash recovery)
pred := predicate.NewPredicateFuncs(func(obj client.Object) bool {
hr, ok := obj.(*helmv2.HelmRelease)
if !ok {
return false
}
// Always process if has error
if hasNoDeployedReleasesError(hr) {
return true
}
// Also process suspended HelmReleases with our annotation (crash recovery)
if hr.Spec.Suspend && hr.Annotations != nil {
if _, exists := hr.Annotations[annotationLastProcessedVersion]; exists {
return true
}
}
return false
})
return ctrl.NewControllerManagedBy(mgr).
Named("fluxplunger").
For(&helmv2.HelmRelease{}).
WithEventFilter(pred).
Complete(r)
}

View File

@@ -467,8 +467,5 @@ func (r *WorkloadMonitorReconciler) getWorkloadMetadata(obj client.Object) map[s
if instanceType, ok := annotations["kubevirt.io/cluster-instancetype-name"]; ok {
labels["workloads.cozystack.io/kubevirt-vmi-instance-type"] = instanceType
}
if instanceProfile, ok := annotations["kubevirt.io/cluster-instanceprofile-name"]; ok {
labels["workloads.cozystack.io/kubevirt-vmi-instance-profile"] = instanceProfile
}
return labels
}

View File

@@ -56,31 +56,26 @@ func Install(ctx context.Context, k8sClient client.Client, writeEmbeddedManifest
return fmt.Errorf("failed to extract embedded manifests: %w", err)
}
// Find all YAML manifest files
entries, err := os.ReadDir(manifestsDir)
if err != nil {
return fmt.Errorf("failed to read manifests directory: %w", err)
}
var manifestFiles []string
for _, entry := range entries {
if strings.HasSuffix(entry.Name(), ".yaml") {
manifestFiles = append(manifestFiles, filepath.Join(manifestsDir, entry.Name()))
}
}
if len(manifestFiles) == 0 {
return fmt.Errorf("no YAML manifest files found in directory")
}
// Parse all manifest files
var objects []*unstructured.Unstructured
for _, manifestPath := range manifestFiles {
objs, err := parseManifests(manifestPath)
// Find the manifest file (should be fluxcd.yaml from cozypkg)
manifestPath := filepath.Join(manifestsDir, "fluxcd.yaml")
if _, err := os.Stat(manifestPath); err != nil {
// Try to find any YAML file if fluxcd.yaml doesn't exist
entries, err := os.ReadDir(manifestsDir)
if err != nil {
return fmt.Errorf("failed to parse manifests from %s: %w", manifestPath, err)
return fmt.Errorf("failed to read manifests directory: %w", err)
}
objects = append(objects, objs...)
for _, entry := range entries {
if strings.HasSuffix(entry.Name(), ".yaml") {
manifestPath = filepath.Join(manifestsDir, entry.Name())
break
}
}
}
// Parse and apply manifests
objects, err := parseManifests(manifestPath)
if err != nil {
return fmt.Errorf("failed to parse manifests: %w", err)
}
if len(objects) == 0 {
@@ -101,7 +96,7 @@ func Install(ctx context.Context, k8sClient client.Client, writeEmbeddedManifest
logger.Info("Installing Flux components", "namespace", namespace)
// Apply manifests using server-side apply
logger.Info("Applying Flux manifests", "count", len(objects), "files", len(manifestFiles), "namespace", namespace)
logger.Info("Applying Flux manifests", "count", len(objects), "manifest", manifestPath, "namespace", namespace)
if err := applyManifests(ctx, k8sClient, objects); err != nil {
return fmt.Errorf("failed to apply manifests: %w", err)
}
@@ -256,17 +251,11 @@ func injectKubernetesServiceEnv(objects []*unstructured.Unstructured) error {
continue
}
// Navigate to spec.template.spec
// Navigate to spec.template.spec.containers
spec, found, err := unstructured.NestedMap(obj.Object, "spec", "template", "spec")
if !found {
continue
}
// Skip pods that don't use hostNetwork - they should use normal Kubernetes DNS
hostNetwork, _, _ := unstructured.NestedBool(spec, "hostNetwork")
if !hostNetwork {
continue
}
if err != nil {
if firstErr == nil {
firstErr = fmt.Errorf("failed to get spec for %s/%s: %w", kind, obj.GetName(), err)

View File

@@ -1,18 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/name: flux
app.kubernetes.io/part-of: flux
name: flux
namespace: cozy-fluxcd
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: http-sc
selector:
app.kubernetes.io/name: flux
type: ClusterIP

View File

@@ -1,102 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubernetes.io/name: flux-tenants
app.kubernetes.io/part-of: flux
app.kubernetes.io/version: v2.7.3
sharding.fluxcd.io/role: shard
name: flux-tenants
namespace: cozy-fluxcd
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: flux-tenants
strategy:
type: Recreate
template:
metadata:
annotations:
cluster-autoscaler.kubernetes.io/safe-to-evict: "true"
prometheus.io/scrape: "true"
labels:
app.kubernetes.io/name: flux-tenants
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
containers:
- args:
- --watch-all-namespaces
- --log-level=info
- --log-encoding=json
- --enable-leader-election=false
- --metrics-addr=:9795
- --health-addr=:9796
- --watch-label-selector=sharding.fluxcd.io/key=tenants
- --concurrent=5
- --requeue-dependency=30s
- --feature-gates=ExternalArtifact=true
env:
- name: RUNTIME_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: GOMEMLIMIT
valueFrom:
resourceFieldRef:
resource: limits.memory
- name: TUF_ROOT
value: /tmp/.sigstore
image: "ghcr.io/fluxcd/helm-controller:v1.4.3"
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /healthz
port: healthz
name: helm-controller
ports:
- containerPort: 9795
name: http-prom
protocol: TCP
- containerPort: 9796
name: healthz
protocol: TCP
readinessProbe:
httpGet:
path: /readyz
port: healthz
resources:
limits:
memory: 1Gi
requests:
cpu: 100m
memory: 64Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
volumeMounts:
- mountPath: /tmp
name: tmp
priorityClassName: system-cluster-critical
securityContext:
fsGroup: 1337
serviceAccountName: flux
terminationGracePeriodSeconds: 60
volumes:
- emptyDir: {}
name: tmp

View File

@@ -11871,7 +11871,7 @@ spec:
- --health-addr=:9693
- --storage-addr=:9691
- --storage-path=/data
- --storage-adv-addr=flux.$(RUNTIME_NAMESPACE).svc
- --storage-adv-addr=source-watcher.$(RUNTIME_NAMESPACE).svc
- --events-addr=http://localhost:9690
env:
- name: SOURCE_CONTROLLER_LOCALHOST
@@ -11940,12 +11940,10 @@ spec:
tolerations:
- key: node.kubernetes.io/not-ready
operator: Exists
- key: node.kubernetes.io/unreachable
operator: Exists
- key: node.cilium.io/agent-not-ready
operator: Exists
- key: node.cloudprovider.kubernetes.io/uninitialized
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 300
volumes:
- emptyDir: {}
name: data

View File

@@ -14,14 +14,14 @@ type appRef struct {
}
type runtimeConfig struct {
appCRDMap map[appRef]*cozyv1alpha1.ApplicationDefinition
appCRDMap map[appRef]*cozyv1alpha1.CozystackResourceDefinition
}
func (l *LineageControllerWebhook) initConfig() {
l.initOnce.Do(func() {
if l.config.Load() == nil {
l.config.Store(&runtimeConfig{
appCRDMap: make(map[appRef]*cozyv1alpha1.ApplicationDefinition),
appCRDMap: make(map[appRef]*cozyv1alpha1.CozystackResourceDefinition),
})
}
})

View File

@@ -8,23 +8,23 @@ import (
"sigs.k8s.io/controller-runtime/pkg/log"
)
// +kubebuilder:rbac:groups=cozystack.io,resources=applicationdefinitions,verbs=list;watch;get
// +kubebuilder:rbac:groups=cozystack.io,resources=cozystackresourcedefinitions,verbs=list;watch;get
func (c *LineageControllerWebhook) SetupWithManagerAsController(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&cozyv1alpha1.ApplicationDefinition{}).
For(&cozyv1alpha1.CozystackResourceDefinition{}).
Complete(c)
}
func (c *LineageControllerWebhook) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
l := log.FromContext(ctx)
crds := &cozyv1alpha1.ApplicationDefinitionList{}
crds := &cozyv1alpha1.CozystackResourceDefinitionList{}
if err := c.List(ctx, crds); err != nil {
l.Error(err, "failed reading ApplicationDefinitions")
l.Error(err, "failed reading CozystackResourceDefinitions")
return ctrl.Result{}, err
}
cfg := &runtimeConfig{
appCRDMap: make(map[appRef]*cozyv1alpha1.ApplicationDefinition),
appCRDMap: make(map[appRef]*cozyv1alpha1.CozystackResourceDefinition),
}
for _, crd := range crds.Items {
appRef := appRef{

View File

@@ -42,7 +42,7 @@ func matchName(ctx context.Context, name string, templateContext map[string]stri
return false
}
func matchResourceToSelector(ctx context.Context, name string, templateContext, l map[string]string, s *cozyv1alpha1.ApplicationDefinitionResourceSelector) bool {
func matchResourceToSelector(ctx context.Context, name string, templateContext, l map[string]string, s *cozyv1alpha1.CozystackResourceDefinitionResourceSelector) bool {
sel, err := metav1.LabelSelectorAsSelector(&s.LabelSelector)
if err != nil {
log.FromContext(ctx).Error(err, "failed to convert label selector to selector")
@@ -53,7 +53,7 @@ func matchResourceToSelector(ctx context.Context, name string, templateContext,
return labelMatches && nameMatches
}
func matchResourceToSelectorArray(ctx context.Context, name string, templateContext, l map[string]string, ss []*cozyv1alpha1.ApplicationDefinitionResourceSelector) bool {
func matchResourceToSelectorArray(ctx context.Context, name string, templateContext, l map[string]string, ss []*cozyv1alpha1.CozystackResourceDefinitionResourceSelector) bool {
for _, s := range ss {
if matchResourceToSelector(ctx, name, templateContext, l, s) {
return true
@@ -62,7 +62,7 @@ func matchResourceToSelectorArray(ctx context.Context, name string, templateCont
return false
}
func matchResourceToExcludeInclude(ctx context.Context, name string, templateContext, l map[string]string, resources *cozyv1alpha1.ApplicationDefinitionResources) bool {
func matchResourceToExcludeInclude(ctx context.Context, name string, templateContext, l map[string]string, resources *cozyv1alpha1.CozystackResourceDefinitionResources) bool {
if resources == nil {
return false
}

View File

@@ -33,8 +33,8 @@ const (
ManagerNameKey = "apps.cozystack.io/application.name"
)
// getResourceSelectors returns the appropriate ApplicationDefinitionResources for a given GroupKind
func (h *LineageControllerWebhook) getResourceSelectors(gk schema.GroupKind, crd *cozyv1alpha1.ApplicationDefinition) *cozyv1alpha1.ApplicationDefinitionResources {
// getResourceSelectors returns the appropriate CozystackResourceDefinitionResources for a given GroupKind
func (h *LineageControllerWebhook) getResourceSelectors(gk schema.GroupKind, crd *cozyv1alpha1.CozystackResourceDefinition) *cozyv1alpha1.CozystackResourceDefinitionResources {
switch {
case gk.Group == "" && gk.Kind == "Secret":
return &crd.Spec.Secrets

View File

@@ -211,13 +211,11 @@ func (r *PackageReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct
Namespace: "cozy-system",
},
Install: &helmv2.Install{
Timeout: &metav1.Duration{Duration: 10 * 60 * 1000000000}, // 10m
Remediation: &helmv2.InstallRemediation{
Retries: -1,
},
},
Upgrade: &helmv2.Upgrade{
Timeout: &metav1.Duration{Duration: 10 * 60 * 1000000000}, // 10m
Remediation: &helmv2.UpgradeRemediation{
Retries: -1,
},

View File

@@ -11,13 +11,13 @@ import (
type Memory struct {
mu sync.RWMutex
data map[string]cozyv1alpha1.ApplicationDefinition
data map[string]cozyv1alpha1.CozystackResourceDefinition
primed bool
primeOnce sync.Once
}
func New() *Memory {
return &Memory{data: make(map[string]cozyv1alpha1.ApplicationDefinition)}
return &Memory{data: make(map[string]cozyv1alpha1.CozystackResourceDefinition)}
}
var (
@@ -30,7 +30,7 @@ func Global() *Memory {
return global
}
func (m *Memory) Upsert(obj *cozyv1alpha1.ApplicationDefinition) {
func (m *Memory) Upsert(obj *cozyv1alpha1.CozystackResourceDefinition) {
if obj == nil {
return
}
@@ -45,10 +45,10 @@ func (m *Memory) Delete(name string) {
m.mu.Unlock()
}
func (m *Memory) Snapshot() []cozyv1alpha1.ApplicationDefinition {
func (m *Memory) Snapshot() []cozyv1alpha1.CozystackResourceDefinition {
m.mu.RLock()
defer m.mu.RUnlock()
out := make([]cozyv1alpha1.ApplicationDefinition, 0, len(m.data))
out := make([]cozyv1alpha1.CozystackResourceDefinition, 0, len(m.data))
for _, v := range m.data {
out = append(out, v)
}
@@ -72,7 +72,7 @@ func (m *Memory) EnsurePrimingWithManager(mgr ctrl.Manager) error {
if ok := mgr.GetCache().WaitForCacheSync(ctx); !ok {
return nil
}
var list cozyv1alpha1.ApplicationDefinitionList
var list cozyv1alpha1.CozystackResourceDefinitionList
if err := mgr.GetClient().List(ctx, &list); err == nil {
for i := range list.Items {
m.Upsert(&list.Items[i])
@@ -87,11 +87,11 @@ func (m *Memory) EnsurePrimingWithManager(mgr ctrl.Manager) error {
return errOut
}
func (m *Memory) ListFromCacheOrAPI(ctx context.Context, c client.Client) ([]cozyv1alpha1.ApplicationDefinition, error) {
func (m *Memory) ListFromCacheOrAPI(ctx context.Context, c client.Client) ([]cozyv1alpha1.CozystackResourceDefinition, error) {
if m.IsPrimed() {
return m.Snapshot(), nil
}
var list cozyv1alpha1.ApplicationDefinitionList
var list cozyv1alpha1.CozystackResourceDefinitionList
if err := c.List(ctx, &list); err != nil {
return nil, err
}

View File

@@ -9,34 +9,35 @@ import (
"time"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/discovery"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
helmv2 "github.com/fluxcd/helm-controller/api/v2"
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
)
const (
// ApplicationKindLabel is the label used to identify application kind on HelmReleases
ApplicationKindLabel = "apps.cozystack.io/application.kind"
)
// Collector handles telemetry data collection for cozystack-controller
// Collector handles telemetry data collection and sending
type Collector struct {
client client.Client
config *Config
ticker *time.Ticker
stopCh chan struct{}
client client.Client
discoveryClient discovery.DiscoveryInterface
config *Config
ticker *time.Ticker
stopCh chan struct{}
}
// NewCollector creates a new telemetry collector for cozystack-controller
func NewCollector(c client.Client, config *Config, _ *rest.Config) (*Collector, error) {
// NewCollector creates a new telemetry collector
func NewCollector(client client.Client, config *Config, kubeConfig *rest.Config) (*Collector, error) {
discoveryClient, err := discovery.NewDiscoveryClientForConfig(kubeConfig)
if err != nil {
return nil, fmt.Errorf("failed to create discovery client: %w", err)
}
return &Collector{
client: c,
config: config,
client: client,
discoveryClient: discoveryClient,
config: config,
}, nil
}
@@ -66,9 +67,46 @@ func (c *Collector) Start(ctx context.Context) error {
// NeedLeaderElection implements manager.LeaderElectionRunnable
func (c *Collector) NeedLeaderElection() bool {
// Only run telemetry collector on the leader
return true
}
// Stop halts telemetry collection
func (c *Collector) Stop() {
close(c.stopCh)
}
// getSizeGroup returns the exponential size group for PVC
func getSizeGroup(size resource.Quantity) string {
gb := size.Value() / (1024 * 1024 * 1024)
switch {
case gb <= 1:
return "1Gi"
case gb <= 5:
return "5Gi"
case gb <= 10:
return "10Gi"
case gb <= 25:
return "25Gi"
case gb <= 50:
return "50Gi"
case gb <= 100:
return "100Gi"
case gb <= 250:
return "250Gi"
case gb <= 500:
return "500Gi"
case gb <= 1024:
return "1Ti"
case gb <= 2048:
return "2Ti"
case gb <= 5120:
return "5Ti"
default:
return "10Ti"
}
}
// collect gathers and sends telemetry data
func (c *Collector) collect(ctx context.Context) {
logger := log.FromContext(ctx).V(1)
@@ -82,54 +120,151 @@ func (c *Collector) collect(ctx context.Context) {
clusterID := string(kubeSystemNS.UID)
// Get all ApplicationDefinitions to know which kinds exist
var appDefList cozyv1alpha1.ApplicationDefinitionList
if err := c.client.List(ctx, &appDefList); err != nil {
logger.Info(fmt.Sprintf("Failed to list ApplicationDefinitions: %v", err))
var cozystackCM corev1.ConfigMap
if err := c.client.Get(ctx, types.NamespacedName{Namespace: "cozy-system", Name: "cozystack"}, &cozystackCM); err != nil {
logger.Info(fmt.Sprintf("Failed to get cozystack configmap in cozy-system namespace: %v", err))
return
}
// Build a map of all known application kinds (initialized with 0)
appKindCounts := make(map[string]int)
for _, appDef := range appDefList.Items {
kind := appDef.Spec.Application.Kind
if kind != "" {
appKindCounts[kind] = 0
}
}
oidcEnabled := cozystackCM.Data["oidc-enabled"]
bundle := cozystackCM.Data["bundle-name"]
bundleEnable := cozystackCM.Data["bundle-enable"]
bundleDisable := cozystackCM.Data["bundle-disable"]
// Get all HelmReleases with apps.cozystack.io/application.kind label in one request
var hrList helmv2.HelmReleaseList
if err := c.client.List(ctx, &hrList, client.HasLabels{ApplicationKindLabel}); err != nil {
logger.Info(fmt.Sprintf("Failed to list HelmReleases: %v", err))
// Get Kubernetes version from nodes
var nodeList corev1.NodeList
if err := c.client.List(ctx, &nodeList); err != nil {
logger.Info(fmt.Sprintf("Failed to list nodes: %v", err))
return
}
// Count HelmReleases by application kind
for _, hr := range hrList.Items {
kind := hr.Labels[ApplicationKindLabel]
if kind != "" {
appKindCounts[kind]++
}
}
// Create metrics buffer
var metrics strings.Builder
// Write application count metrics
for kind, count := range appKindCounts {
// Add Cozystack info metric
if len(nodeList.Items) > 0 {
k8sVersion, _ := c.discoveryClient.ServerVersion()
metrics.WriteString(fmt.Sprintf(
"cozy_application_count{kind=\"%s\"} %d\n",
kind,
"cozy_cluster_info{cozystack_version=\"%s\",kubernetes_version=\"%s\",oidc_enabled=\"%s\",bundle_name=\"%s\",bunde_enable=\"%s\",bunde_disable=\"%s\"} 1\n",
c.config.CozystackVersion,
k8sVersion,
oidcEnabled,
bundle,
bundleEnable,
bundleDisable,
))
}
// Collect node metrics
nodeOSCount := make(map[string]int)
for _, node := range nodeList.Items {
key := fmt.Sprintf("%s (%s)", node.Status.NodeInfo.OperatingSystem, node.Status.NodeInfo.OSImage)
nodeOSCount[key] = nodeOSCount[key] + 1
}
for osKey, count := range nodeOSCount {
metrics.WriteString(fmt.Sprintf(
"cozy_nodes_count{os=\"%s\",kernel=\"%s\"} %d\n",
osKey,
nodeList.Items[0].Status.NodeInfo.KernelVersion,
count,
))
}
// Send metrics only if there's something to send
if metrics.Len() > 0 {
if err := c.sendMetrics(clusterID, metrics.String()); err != nil {
logger.Info(fmt.Sprintf("Failed to send metrics: %v", err))
// Collect LoadBalancer services metrics
var serviceList corev1.ServiceList
if err := c.client.List(ctx, &serviceList); err != nil {
logger.Info(fmt.Sprintf("Failed to list Services: %v", err))
} else {
lbCount := 0
for _, svc := range serviceList.Items {
if svc.Spec.Type == corev1.ServiceTypeLoadBalancer {
lbCount++
}
}
metrics.WriteString(fmt.Sprintf("cozy_loadbalancers_count %d\n", lbCount))
}
// Count tenant namespaces
var nsList corev1.NamespaceList
if err := c.client.List(ctx, &nsList); err != nil {
logger.Info(fmt.Sprintf("Failed to list Namespaces: %v", err))
} else {
tenantCount := 0
for _, ns := range nsList.Items {
if strings.HasPrefix(ns.Name, "tenant-") {
tenantCount++
}
}
metrics.WriteString(fmt.Sprintf("cozy_tenants_count %d\n", tenantCount))
}
// Collect PV metrics grouped by driver and size
var pvList corev1.PersistentVolumeList
if err := c.client.List(ctx, &pvList); err != nil {
logger.Info(fmt.Sprintf("Failed to list PVs: %v", err))
} else {
// Map to store counts by size and driver
pvMetrics := make(map[string]map[string]int)
for _, pv := range pvList.Items {
if capacity, ok := pv.Spec.Capacity[corev1.ResourceStorage]; ok {
sizeGroup := getSizeGroup(capacity)
// Get the CSI driver name
driver := "unknown"
if pv.Spec.CSI != nil {
driver = pv.Spec.CSI.Driver
} else if pv.Spec.HostPath != nil {
driver = "hostpath"
} else if pv.Spec.NFS != nil {
driver = "nfs"
}
// Initialize nested map if needed
if _, exists := pvMetrics[sizeGroup]; !exists {
pvMetrics[sizeGroup] = make(map[string]int)
}
// Increment count for this size/driver combination
pvMetrics[sizeGroup][driver]++
}
}
// Write metrics
for size, drivers := range pvMetrics {
for driver, count := range drivers {
metrics.WriteString(fmt.Sprintf(
"cozy_pvs_count{driver=\"%s\",size=\"%s\"} %d\n",
driver,
size,
count,
))
}
}
}
// Collect workload metrics
var monitorList cozyv1alpha1.WorkloadMonitorList
if err := c.client.List(ctx, &monitorList); err != nil {
logger.Info(fmt.Sprintf("Failed to list WorkloadMonitors: %v", err))
return
}
for _, monitor := range monitorList.Items {
metrics.WriteString(fmt.Sprintf(
"cozy_workloads_count{uid=\"%s\",kind=\"%s\",type=\"%s\",version=\"%s\"} %d\n",
monitor.UID,
monitor.Spec.Kind,
monitor.Spec.Type,
monitor.Spec.Version,
monitor.Status.ObservedReplicas,
))
}
// Send metrics
if err := c.sendMetrics(clusterID, metrics.String()); err != nil {
logger.Info(fmt.Sprintf("Failed to send metrics: %v", err))
}
}

View File

@@ -12,13 +12,16 @@ type Config struct {
Endpoint string
// Interval between telemetry data collection
Interval time.Duration
// CozystackVersion represents the current version of Cozystack
CozystackVersion string
}
// DefaultConfig returns default telemetry configuration
func DefaultConfig() *Config {
return &Config{
Disabled: false,
Endpoint: "https://telemetry.cozystack.io",
Interval: 15 * time.Minute,
Disabled: false,
Endpoint: "https://telemetry.cozystack.io",
Interval: 15 * time.Minute,
CozystackVersion: "unknown",
}
}

View File

@@ -1,282 +0,0 @@
package telemetry
import (
"bytes"
"context"
"fmt"
"net/http"
"strings"
"time"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/discovery"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
"github.com/cozystack/cozystack/pkg/version"
)
// OperatorCollector handles telemetry data collection for cozystack-operator
type OperatorCollector struct {
reader client.Reader
discoveryClient discovery.DiscoveryInterface
config *Config
ticker *time.Ticker
stopCh chan struct{}
}
// NewOperatorCollector creates a new telemetry collector for cozystack-operator
func NewOperatorCollector(r client.Reader, config *Config, kubeConfig *rest.Config) (*OperatorCollector, error) {
discoveryClient, err := discovery.NewDiscoveryClientForConfig(kubeConfig)
if err != nil {
return nil, fmt.Errorf("failed to create discovery client: %w", err)
}
return &OperatorCollector{
reader: r,
discoveryClient: discoveryClient,
config: config,
}, nil
}
// Start implements manager.Runnable
func (c *OperatorCollector) Start(ctx context.Context) error {
if c.config.Disabled {
return nil
}
c.ticker = time.NewTicker(c.config.Interval)
c.stopCh = make(chan struct{})
// Initial collection
c.collect(ctx)
for {
select {
case <-ctx.Done():
c.ticker.Stop()
close(c.stopCh)
return nil
case <-c.ticker.C:
c.collect(ctx)
}
}
}
// NeedLeaderElection implements manager.LeaderElectionRunnable
func (c *OperatorCollector) NeedLeaderElection() bool {
return true
}
// getSizeGroup returns the exponential size group for PVC
func getSizeGroup(size resource.Quantity) string {
gb := size.Value() / (1024 * 1024 * 1024)
switch {
case gb <= 1:
return "1Gi"
case gb <= 5:
return "5Gi"
case gb <= 10:
return "10Gi"
case gb <= 25:
return "25Gi"
case gb <= 50:
return "50Gi"
case gb <= 100:
return "100Gi"
case gb <= 250:
return "250Gi"
case gb <= 500:
return "500Gi"
case gb <= 1024:
return "1Ti"
case gb <= 2048:
return "2Ti"
case gb <= 5120:
return "5Ti"
default:
return "10Ti"
}
}
// collect gathers and sends telemetry data
func (c *OperatorCollector) collect(ctx context.Context) {
logger := log.FromContext(ctx).V(1)
// Get cluster ID from kube-system namespace
var kubeSystemNS corev1.Namespace
if err := c.reader.Get(ctx, types.NamespacedName{Name: "kube-system"}, &kubeSystemNS); err != nil {
logger.Info(fmt.Sprintf("Failed to get kube-system namespace: %v", err))
return
}
clusterID := string(kubeSystemNS.UID)
// Get Kubernetes version
k8sVersion, err := c.discoveryClient.ServerVersion()
if err != nil {
logger.Info(fmt.Sprintf("Failed to get Kubernetes version: %v", err))
return
}
// Get nodes
var nodeList corev1.NodeList
if err := c.reader.List(ctx, &nodeList); err != nil {
logger.Info(fmt.Sprintf("Failed to list nodes: %v", err))
return
}
// Create metrics buffer
var metrics strings.Builder
// Add cluster info metric
metrics.WriteString(fmt.Sprintf(
"cozy_cluster_info{cozystack_version=\"%s\",kubernetes_version=\"%s\"} 1\n",
version.Version,
k8sVersion.GitVersion,
))
// Collect node metrics grouped by OS and kernel
nodeOSCount := make(map[string]map[string]int) // os -> kernel -> count
for _, node := range nodeList.Items {
osKey := fmt.Sprintf("%s (%s)", node.Status.NodeInfo.OperatingSystem, node.Status.NodeInfo.OSImage)
kernelKey := node.Status.NodeInfo.KernelVersion
if _, exists := nodeOSCount[osKey]; !exists {
nodeOSCount[osKey] = make(map[string]int)
}
nodeOSCount[osKey][kernelKey]++
}
for osKey, kernels := range nodeOSCount {
for kernel, count := range kernels {
metrics.WriteString(fmt.Sprintf(
"cozy_nodes_count{os=\"%s\",kernel=\"%s\"} %d\n",
osKey,
kernel,
count,
))
}
}
// Collect cluster capacity metrics (cpu, memory, gpu)
capacityTotals := make(map[string]int64)
for _, node := range nodeList.Items {
for resourceName, quantity := range node.Status.Capacity {
name := string(resourceName)
if name == "cpu" || name == "memory" || strings.HasPrefix(name, "nvidia.com/") {
capacityTotals[name] += quantity.Value()
}
}
}
for resourceName, total := range capacityTotals {
metrics.WriteString(fmt.Sprintf(
"cozy_cluster_capacity{resource=\"%s\"} %d\n",
resourceName,
total,
))
}
// Collect LoadBalancer services metrics
var serviceList corev1.ServiceList
if err := c.reader.List(ctx, &serviceList); err != nil {
logger.Info(fmt.Sprintf("Failed to list Services: %v", err))
} else {
lbCount := 0
for _, svc := range serviceList.Items {
if svc.Spec.Type == corev1.ServiceTypeLoadBalancer {
lbCount++
}
}
metrics.WriteString(fmt.Sprintf("cozy_loadbalancers_count %d\n", lbCount))
}
// Collect PV metrics grouped by driver and size
var pvList corev1.PersistentVolumeList
if err := c.reader.List(ctx, &pvList); err != nil {
logger.Info(fmt.Sprintf("Failed to list PVs: %v", err))
} else {
pvMetrics := make(map[string]map[string]int) // size -> driver -> count
for _, pv := range pvList.Items {
if capacity, ok := pv.Spec.Capacity[corev1.ResourceStorage]; ok {
sizeGroup := getSizeGroup(capacity)
driver := "unknown"
if pv.Spec.CSI != nil {
driver = pv.Spec.CSI.Driver
} else if pv.Spec.HostPath != nil {
driver = "hostpath"
} else if pv.Spec.NFS != nil {
driver = "nfs"
}
if _, exists := pvMetrics[sizeGroup]; !exists {
pvMetrics[sizeGroup] = make(map[string]int)
}
pvMetrics[sizeGroup][driver]++
}
}
for size, drivers := range pvMetrics {
for driver, count := range drivers {
metrics.WriteString(fmt.Sprintf(
"cozy_pvs_count{driver=\"%s\",size=\"%s\"} %d\n",
driver,
size,
count,
))
}
}
}
// Collect installed packages
var packageList cozyv1alpha1.PackageList
if err := c.reader.List(ctx, &packageList); err != nil {
logger.Info(fmt.Sprintf("Failed to list Packages: %v", err))
} else {
for _, pkg := range packageList.Items {
variant := pkg.Spec.Variant
if variant == "" {
variant = "default"
}
metrics.WriteString(fmt.Sprintf(
"cozy_package_info{name=\"%s\",variant=\"%s\"} 1\n",
pkg.Name,
variant,
))
}
}
// Send metrics
if err := c.sendMetrics(clusterID, metrics.String()); err != nil {
logger.Info(fmt.Sprintf("Failed to send metrics: %v", err))
}
}
// sendMetrics sends collected metrics to the configured endpoint
func (c *OperatorCollector) sendMetrics(clusterID, metrics string) error {
req, err := http.NewRequest("POST", c.config.Endpoint, bytes.NewBufferString(metrics))
if err != nil {
return fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Content-Type", "text/plain")
req.Header.Set("X-Cluster-ID", clusterID)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return fmt.Errorf("failed to send request: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("unexpected status code: %d", resp.StatusCode)
}
return nil
}

View File

@@ -1,7 +1,7 @@
OUT=../../_out/repos/apps
CHARTS := $(shell find . -maxdepth 2 -name Chart.yaml | awk -F/ '{print $$2}')
include ../../hack/common-envs.mk
include ../../scripts/common-envs.mk
repo:
rm -rf "$(OUT)"

View File

@@ -1,4 +1,4 @@
include ../../../hack/package.mk
include ../../../scripts/package.mk
generate:
cozyvalues-gen -v values.yaml -s values.schema.json -r README.md

View File

@@ -2,13 +2,16 @@ apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: {{ .Release.Name }}-system
labels:
sharding.fluxcd.io/key: tenants
spec:
chartRef:
kind: ExternalArtifact
name: cozystack-bucket-application-default-bucket-system
namespace: cozy-system
chart:
spec:
chart: cozy-bucket
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
version: '>= 0.0.0-0'
interval: 5m
timeout: 10m
install:

View File

@@ -1,7 +1,7 @@
CLICKHOUSE_BACKUP_TAG = $(shell awk '$$0 ~ /^version:/ {print $$2}' Chart.yaml)
include ../../../hack/common-envs.mk
include ../../../hack/package.mk
include ../../../scripts/common-envs.mk
include ../../../scripts/package.mk
generate:
cozyvalues-gen -v values.yaml -s values.schema.json -r README.md

View File

@@ -1,4 +1,4 @@
include ../../../hack/package.mk
include ../../../scripts/package.mk
generate:
cozyvalues-gen -v values.yaml -s values.schema.json -r README.md

Some files were not shown because too many files have changed in this diff Show More