Compare commits

..

1 Commits

Author SHA1 Message Date
Timofei Larkin
ec33f6b980 Draft: generate cozystack values
Signed-off-by: Timofei Larkin <lllamnyp@gmail.com>
2026-01-05 19:16:45 +03:00
1047 changed files with 21317 additions and 80058 deletions

2
.github/CODEOWNERS vendored
View File

@@ -1 +1 @@
* @kvaps @lllamnyp @lexfrei @androndo @IvanHunters
* @kvaps @lllamnyp @nbykov0

View File

@@ -71,6 +71,12 @@ jobs:
name: pr-patch
path: _out/assets/pr.patch
- name: Upload installer
uses: actions/upload-artifact@v4
with:
name: cozystack-installer
path: _out/assets/cozystack-installer.yaml
- name: Upload Talos image
uses: actions/upload-artifact@v4
with:
@@ -82,7 +88,8 @@ jobs:
runs-on: ubuntu-latest
if: contains(github.event.pull_request.labels.*.name, 'release')
outputs:
disk_id: ${{ steps.fetch_assets.outputs.disk_id }}
installer_id: ${{ steps.fetch_assets.outputs.installer_id }}
disk_id: ${{ steps.fetch_assets.outputs.disk_id }}
steps:
- name: Checkout code
@@ -125,18 +132,19 @@ jobs:
return;
}
const find = (n) => draft.assets.find(a => a.name === n)?.id;
const diskId = find('nocloud-amd64.raw.xz');
if (!diskId) {
const installerId = find('cozystack-installer.yaml');
const diskId = find('nocloud-amd64.raw.xz');
if (!installerId || !diskId) {
core.setFailed('Required assets missing in draft release');
return;
}
core.setOutput('disk_id', diskId);
core.setOutput('installer_id', installerId);
core.setOutput('disk_id', diskId);
e2e:
name: "E2E Tests"
runs-on: ${{ contains(github.event.pull_request.labels.*.name, 'debug') && 'self-hosted' || 'oracle-vm-24cpu-96gb-x86-64' }}
#runs-on: [oracle-vm-32cpu-128gb-x86-64]
prepare_env:
name: "Prepare environment"
runs-on: [self-hosted]
permissions:
contents: read
packages: read
@@ -182,7 +190,7 @@ jobs:
- name: Set sandbox ID
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
# ▸ Prepare environment
# ▸ Start actual job steps
- name: Prepare workspace
run: |
rm -rf /tmp/$SANDBOX_NAME
@@ -202,7 +210,47 @@ jobs:
done
echo "✅ The task completed successfully after $attempt attempts"
# ▸ Install Cozystack
install_cozystack:
name: "Install Cozystack"
runs-on: [self-hosted]
permissions:
contents: read
packages: read
needs: ["prepare_env", "resolve_assets"]
if: ${{ always() && needs.prepare_env.result == 'success' }}
steps:
- name: Prepare _out/assets directory
run: mkdir -p _out/assets
# ▸ Regular PR path download artefacts produced by the *build* job
- name: "Download installer (regular PR)"
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
uses: actions/download-artifact@v4
with:
name: cozystack-installer
path: _out/assets
# ▸ Release PR path fetch artefacts from the corresponding draft release
- name: Download assets from draft release (release PR)
if: contains(github.event.pull_request.labels.*.name, 'release')
run: |
mkdir -p _out/assets
curl -sSL -H "Authorization: token ${GH_PAT}" -H "Accept: application/octet-stream" \
-o _out/assets/cozystack-installer.yaml \
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/assets/${{ needs.resolve_assets.outputs.installer_id }}"
env:
GH_PAT: ${{ secrets.GH_PAT }}
# ▸ Start actual job steps
- name: Set sandbox ID
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
- name: Sync _out/assets directory
run: |
mkdir -p /tmp/$SANDBOX_NAME/_out/assets
mv _out/assets/* /tmp/$SANDBOX_NAME/_out/assets/
- name: Install Cozystack into sandbox
run: |
cd /tmp/$SANDBOX_NAME
@@ -215,77 +263,107 @@ jobs:
fi
echo "❌ Attempt $attempt failed, retrying..."
done
echo "✅ The task completed successfully after $attempt attempts"
echo "✅ The task completed successfully after $attempt attempts."
- name: Run OpenAPI tests
run: |
cd /tmp/$SANDBOX_NAME
make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME test-openapi
# ▸ Run E2E tests
- name: Run E2E tests
id: e2e_tests
run: |
cd /tmp/$SANDBOX_NAME
failed_tests=""
for app in $(ls hack/e2e-apps/*.bats | xargs -n1 basename | cut -d. -f1); do
echo "::group::Testing $app"
attempt=0
success=false
until [ $attempt -ge 3 ]; do
if make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME test-apps-$app; then
success=true
break
fi
attempt=$((attempt + 1))
echo "❌ Attempt $attempt failed, retrying..."
done
if [ "$success" = true ]; then
echo "✅ Test $app completed successfully"
else
echo "❌ Test $app failed after $attempt attempts"
failed_tests="$failed_tests $app"
fi
echo "::endgroup::"
done
if [ -n "$failed_tests" ]; then
echo "❌ Failed tests:$failed_tests"
exit 1
fi
echo "✅ All E2E tests passed"
detect_test_matrix:
name: "Detect e2e test matrix"
runs-on: ubuntu-latest
outputs:
matrix: ${{ steps.set.outputs.matrix }}
# ▸ Collect debug information (always runs)
- name: Collect report
if: always()
steps:
- uses: actions/checkout@v4
- id: set
run: |
apps=$(ls hack/e2e-apps/*.bats | cut -f3 -d/ | cut -f1 -d. | jq -R | jq -cs)
echo "matrix={\"app\":$apps}" >> "$GITHUB_OUTPUT"
test_apps:
strategy:
fail-fast: false
matrix: ${{ fromJson(needs.detect_test_matrix.outputs.matrix) }}
name: Test ${{ matrix.app }}
runs-on: [self-hosted]
needs: [install_cozystack,detect_test_matrix]
if: ${{ always() && (needs.install_cozystack.result == 'success' && needs.detect_test_matrix.result == 'success') }}
steps:
- name: Set sandbox ID
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
- name: E2E Apps
run: |
cd /tmp/$SANDBOX_NAME
make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME collect-report || true
attempt=0
until make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME test-apps-${{ matrix.app }}; do
attempt=$((attempt + 1))
if [ $attempt -ge 3 ]; then
echo "❌ Attempt $attempt failed, exiting..."
exit 1
fi
echo "❌ Attempt $attempt failed, retrying..."
done
echo "✅ The task completed successfully after $attempt attempts"
collect_debug_information:
name: Collect debug information
runs-on: [self-hosted]
needs: [test_apps]
if: ${{ always() }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set sandbox ID
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
- name: Collect report
run: |
cd /tmp/$SANDBOX_NAME
make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME collect-report
- name: Upload cozyreport.tgz
if: always()
uses: actions/upload-artifact@v4
with:
name: cozyreport
path: /tmp/${{ env.SANDBOX_NAME }}/_out/cozyreport.tgz
- name: Collect images list
if: always()
run: |
cd /tmp/$SANDBOX_NAME
make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME collect-images || true
make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME collect-images
- name: Upload image list
if: always()
uses: actions/upload-artifact@v4
with:
name: image-list
path: /tmp/${{ env.SANDBOX_NAME }}/_out/images.txt
# ▸ Tear down environment (always runs)
cleanup:
name: Tear down environment
runs-on: [self-hosted]
needs: [collect_debug_information]
if: ${{ always() && needs.test_apps.result == 'success' }}
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
fetch-tags: true
- name: Set sandbox ID
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
- name: Tear down sandbox
if: always()
run: make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME delete || true
run: make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME delete
- name: Remove workspace
if: always()
run: rm -rf /tmp/$SANDBOX_NAME

View File

@@ -213,160 +213,3 @@ jobs:
} else {
console.log(`PR already exists from ${head} to ${base}`);
}
generate-changelog:
name: Generate Changelog
runs-on: [self-hosted]
needs: [prepare-release]
permissions:
contents: write
pull-requests: write
if: needs.prepare-release.result == 'success'
steps:
- name: Parse tag
id: tag
uses: actions/github-script@v7
with:
script: |
const ref = context.ref.replace('refs/tags/', '');
const m = ref.match(/^v(\d+\.\d+\.\d+)(-(?:alpha|beta|rc)\.\d+)?$/);
if (!m) {
core.setFailed(`❌ tag '${ref}' must match 'vX.Y.Z' or 'vX.Y.Z-(alpha|beta|rc).N'`);
return;
}
const version = m[1] + (m[2] ?? '');
core.setOutput('version', version);
core.setOutput('tag', ref);
- name: Checkout main branch
uses: actions/checkout@v4
with:
ref: main
fetch-depth: 0
fetch-tags: true
token: ${{ secrets.GH_PAT }}
- name: Check if changelog already exists
id: check_changelog
run: |
CHANGELOG_FILE="docs/changelogs/v${{ steps.tag.outputs.version }}.md"
if [ -f "$CHANGELOG_FILE" ]; then
echo "exists=true" >> $GITHUB_OUTPUT
echo "Changelog file $CHANGELOG_FILE already exists"
else
echo "exists=false" >> $GITHUB_OUTPUT
echo "Changelog file $CHANGELOG_FILE does not exist"
fi
- name: Setup Node.js
if: steps.check_changelog.outputs.exists == 'false'
uses: actions/setup-node@v4
with:
node-version: 22
- name: Install GitHub Copilot CLI
if: steps.check_changelog.outputs.exists == 'false'
run: npm i -g @github/copilot
- name: Generate changelog using AI
if: steps.check_changelog.outputs.exists == 'false'
env:
COPILOT_GITHUB_TOKEN: ${{ secrets.COPILOT_GITHUB_TOKEN }}
GH_TOKEN: ${{ secrets.GH_PAT }}
run: |
copilot --prompt "prepare changelog file for tagged release v${{ steps.tag.outputs.version }}, use @docs/agents/changelog.md for it. Create the changelog file at docs/changelogs/v${{ steps.tag.outputs.version }}.md" \
--allow-all-tools --allow-all-paths < /dev/null
- name: Create changelog branch and commit
if: steps.check_changelog.outputs.exists == 'false'
env:
GH_PAT: ${{ secrets.GH_PAT }}
run: |
git config user.name "cozystack-bot"
git config user.email "217169706+cozystack-bot@users.noreply.github.com"
git remote set-url origin https://cozystack-bot:${GH_PAT}@github.com/${GITHUB_REPOSITORY}
CHANGELOG_FILE="docs/changelogs/v${{ steps.tag.outputs.version }}.md"
CHANGELOG_BRANCH="changelog-v${{ steps.tag.outputs.version }}"
if [ -f "$CHANGELOG_FILE" ]; then
# Fetch latest main branch
git fetch origin main
# Delete local branch if it exists
git branch -D "$CHANGELOG_BRANCH" 2>/dev/null || true
# Create and checkout new branch from main
git checkout -b "$CHANGELOG_BRANCH" origin/main
# Add and commit changelog
git add "$CHANGELOG_FILE"
if git diff --staged --quiet; then
echo "⚠️ No changes to commit (file may already be committed)"
else
git commit -m "docs: add changelog for v${{ steps.tag.outputs.version }}" -s
echo "✅ Changelog committed to branch $CHANGELOG_BRANCH"
fi
# Push the branch (force push to update if it exists)
git push -f origin "$CHANGELOG_BRANCH"
else
echo "⚠️ Changelog file was not generated"
exit 1
fi
- name: Create PR for changelog
if: steps.check_changelog.outputs.exists == 'false'
uses: actions/github-script@v7
with:
github-token: ${{ secrets.GH_PAT }}
script: |
const version = '${{ steps.tag.outputs.version }}';
const changelogBranch = `changelog-v${version}`;
const baseBranch = 'main';
// Check if PR already exists
const prs = await github.rest.pulls.list({
owner: context.repo.owner,
repo: context.repo.repo,
head: `${context.repo.owner}:${changelogBranch}`,
base: baseBranch,
state: 'open'
});
if (prs.data.length > 0) {
const pr = prs.data[0];
console.log(`PR #${pr.number} already exists for changelog branch ${changelogBranch}`);
// Update PR body with latest info
const body = `This PR adds the changelog for release \`v${version}\`.\n\n✅ Changelog has been automatically generated in \`docs/changelogs/v${version}.md\`.`;
await github.rest.pulls.update({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: pr.number,
body: body
});
console.log(`Updated existing PR #${pr.number}`);
} else {
// Create new PR
const pr = await github.rest.pulls.create({
owner: context.repo.owner,
repo: context.repo.repo,
head: changelogBranch,
base: baseBranch,
title: `docs: add changelog for v${version}`,
body: `This PR adds the changelog for release \`v${version}\`.\n\n✅ Changelog has been automatically generated in \`docs/changelogs/v${version}.md\`.`,
draft: false
});
// Add label if needed
await github.rest.issues.addLabels({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: pr.data.number,
labels: ['documentation', 'automated']
});
console.log(`Created PR #${pr.data.number} for changelog`);
}

View File

@@ -1,6 +1,4 @@
.PHONY: manifests assets unit-tests helm-unit-tests verify-crds
include hack/common-envs.mk
.PHONY: manifests repos assets unit-tests helm-unit-tests
build-deps:
@command -V find docker skopeo jq gh helm > /dev/null
@@ -11,17 +9,15 @@ build-deps:
build: build-deps
make -C packages/apps/http-cache image
make -C packages/apps/mariadb image
make -C packages/apps/mysql image
make -C packages/apps/clickhouse image
make -C packages/apps/kubernetes image
make -C packages/system/monitoring image
make -C packages/extra/monitoring image
make -C packages/system/cozystack-api image
make -C packages/system/cozystack-controller image
make -C packages/system/backup-controller image
make -C packages/system/backupstrategy-controller image
make -C packages/system/lineage-controller-webhook image
make -C packages/system/cilium image
make -C packages/system/linstor image
make -C packages/system/kubeovn-webhook image
make -C packages/system/kubeovn-plunger image
make -C packages/system/dashboard image
@@ -29,62 +25,30 @@ build: build-deps
make -C packages/system/kamaji image
make -C packages/system/bucket image
make -C packages/system/objectstorage-controller image
make -C packages/system/grafana-operator image
make -C packages/core/testing image
make -C packages/core/talos image
make -C packages/core/platform image
make -C packages/core/installer image
make manifests
repos:
rm -rf _out
make -C packages/system repo
make -C packages/apps repo
make -C packages/extra repo
manifests:
mkdir -p _out/assets
cat packages/core/installer/crds/*.yaml > _out/assets/cozystack-crds.yaml
# Talos variant (default)
helm template installer packages/core/installer -n cozy-system \
-s templates/cozystack-operator.yaml \
-s templates/packagesource.yaml \
> _out/assets/cozystack-operator-talos.yaml
# Generic Kubernetes variant (k3s, kubeadm, RKE2)
helm template installer packages/core/installer -n cozy-system \
--set cozystackOperator.variant=generic \
--set cozystack.apiServerHost=REPLACE_ME \
-s templates/cozystack-operator.yaml \
-s templates/packagesource.yaml \
> _out/assets/cozystack-operator-generic.yaml
# Hosted variant (managed Kubernetes)
helm template installer packages/core/installer -n cozy-system \
--set cozystackOperator.variant=hosted \
-s templates/cozystack-operator.yaml \
-s templates/packagesource.yaml \
> _out/assets/cozystack-operator-hosted.yaml
(cd packages/core/installer/; helm template -n cozy-installer installer .) > _out/assets/cozystack-installer.yaml
cozypkg:
go build -ldflags "-X github.com/cozystack/cozystack/cmd/cozypkg/cmd.Version=v$(COZYSTACK_VERSION)" -o _out/bin/cozypkg ./cmd/cozypkg
assets: assets-talos assets-cozypkg
assets-talos:
assets:
make -C packages/core/talos assets
assets-cozypkg: assets-cozypkg-linux-amd64 assets-cozypkg-linux-arm64 assets-cozypkg-darwin-amd64 assets-cozypkg-darwin-arm64 assets-cozypkg-windows-amd64 assets-cozypkg-windows-arm64
(cd _out/assets/ && sha256sum cozypkg-*.tar.gz) > _out/assets/cozypkg-checksums.txt
assets-cozypkg-%:
$(eval EXT := $(if $(filter windows,$(firstword $(subst -, ,$*))),.exe,))
mkdir -p _out/assets
GOOS=$(firstword $(subst -, ,$*)) GOARCH=$(lastword $(subst -, ,$*)) go build -ldflags "-X github.com/cozystack/cozystack/cmd/cozypkg/cmd.Version=v$(COZYSTACK_VERSION)" -o _out/bin/cozypkg-$*/cozypkg$(EXT) ./cmd/cozypkg
cp LICENSE _out/bin/cozypkg-$*/LICENSE
tar -C _out/bin/cozypkg-$* -czf _out/assets/cozypkg-$*.tar.gz LICENSE cozypkg$(EXT)
test:
make -C packages/core/testing apply
make -C packages/core/testing test
verify-crds:
@diff --recursive packages/core/installer/crds/ internal/crdinstall/manifests/ --exclude='.*' \
|| (echo "ERROR: CRD manifests out of sync. Run 'make generate' to fix." && exit 1)
unit-tests: helm-unit-tests verify-crds
unit-tests: helm-unit-tests
helm-unit-tests:
hack/helm-unit-tests.sh

View File

@@ -56,8 +56,6 @@ type VeleroSpec struct {
// templated from a Velero backup strategy.
type VeleroTemplate struct {
Spec velerov1.BackupSpec `json:"spec"`
// +optional
RestoreSpec *velerov1.RestoreSpec `json:"restoreSpec,omitempty"`
}
type VeleroStatus struct {

View File

@@ -21,7 +21,6 @@ limitations under the License.
package v1alpha1
import (
velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
@@ -224,11 +223,6 @@ func (in *VeleroStatus) DeepCopy() *VeleroStatus {
func (in *VeleroTemplate) DeepCopyInto(out *VeleroTemplate) {
*out = *in
in.Spec.DeepCopyInto(&out.Spec)
if in.RestoreSpec != nil {
in, out := &in.RestoreSpec, &out.RestoreSpec
*out = new(velerov1.RestoreSpec)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VeleroTemplate.

View File

@@ -100,13 +100,13 @@ Describe **when**, **how**, and **where** to back up a specific managed applicat
```go
type PlanSpec struct {
// Application to back up.
// If apiGroup is not specified, it defaults to "apps.cozystack.io".
ApplicationRef corev1.TypedLocalObjectReference `json:"applicationRef"`
// BackupClassName references a BackupClass that contains strategy and other parameters (e.g. storage reference).
// The BackupClass will be resolved to determine the appropriate strategy and parameters
// based on the ApplicationRef.
BackupClassName string `json:"backupClassName"`
// Where backups should be stored.
StorageRef corev1.TypedLocalObjectReference `json:"storageRef"`
// Driver-specific BackupStrategy to use.
StrategyRef corev1.TypedLocalObjectReference `json:"strategyRef"`
// When backups should run.
Schedule PlanSchedule `json:"schedule"`
@@ -145,12 +145,12 @@ Core Plan controller:
* Create a `BackupJob` in the same namespace:
* `spec.planRef.name = plan.Name`
* `spec.applicationRef = plan.spec.applicationRef` (normalized with default apiGroup if not specified)
* `spec.backupClassName = plan.spec.backupClassName`
* `spec.applicationRef = plan.spec.applicationRef`
* `spec.storageRef = plan.spec.storageRef`
* `spec.strategyRef = plan.spec.strategyRef`
* `spec.triggeredBy = "Plan"`
* Set `ownerReferences` so the `BackupJob` is owned by the `Plan`.
**Note:** The `BackupJob` controller resolves the `BackupClass` to determine the appropriate strategy and parameters, based on the `ApplicationRef`. The strategy template is processed with a context containing the `Application` object and `Parameters` from the `BackupClass`.
The Plan controller does **not**:
* Execute backups itself.
@@ -159,64 +159,17 @@ The Plan controller does **not**:
---
### 4.2 BackupClass
### 4.2 Storage
**Group/Kind**
`backups.cozystack.io/v1alpha1, Kind=BackupClass`
**API Shape**
**Purpose**
Define a class of backup configurations that encapsulate strategy and parameters per application type. `BackupClass` is a cluster-scoped resource that allows admins to configure backup strategies and parameters in a reusable way.
TBD
**Key fields (spec)**
**Storage usage**
```go
type BackupClassSpec struct {
// Strategies is a list of backup strategies, each matching a specific application type.
Strategies []BackupClassStrategy `json:"strategies"`
}
type BackupClassStrategy struct {
// StrategyRef references the driver-specific BackupStrategy (e.g., Velero).
StrategyRef corev1.TypedLocalObjectReference `json:"strategyRef"`
// Application specifies which application types this strategy applies to.
// If apiGroup is not specified, it defaults to "apps.cozystack.io".
Application ApplicationSelector `json:"application"`
// Parameters holds strategy-specific parameters, like storage reference.
// Common parameters include:
// - backupStorageLocationName: Name of Velero BackupStorageLocation
// +optional
Parameters map[string]string `json:"parameters,omitempty"`
}
type ApplicationSelector struct {
// APIGroup is the API group of the application.
// If not specified, defaults to "apps.cozystack.io".
// +optional
APIGroup *string `json:"apiGroup,omitempty"`
// Kind is the kind of the application (e.g., VirtualMachine, MariaDB).
Kind string `json:"kind"`
}
```
**BackupClass resolution**
* When a `BackupJob` or `Plan` references a `BackupClass` via `backupClassName`, the controller:
1. Fetches the `BackupClass` by name.
2. Matches the `ApplicationRef` against strategies in the `BackupClass`:
* Normalizes `ApplicationRef.apiGroup` (defaults to `"apps.cozystack.io"` if not specified).
* Finds a strategy where `ApplicationSelector` matches the `ApplicationRef` (apiGroup and kind).
3. Returns the matched `StrategyRef` and `Parameters`.
* Strategy templates (e.g., Velero's `backupTemplate.spec`) are processed with a context containing:
* `Application`: The application object being backed up.
* `Parameters`: The parameters from the matched `BackupClassStrategy`.
**Parameters**
* Parameters are passed via `Parameters` in the `BackupClass` (e.g., `backupStorageLocationName` for Velero).
* The driver uses these parameters to resolve the actual resources (e.g., Velero's `BackupStorageLocation` CRD).
* `Plan` and `BackupJob` reference `Storage` via `TypedLocalObjectReference`.
* Drivers read `Storage` to know how/where to store or read artifacts.
* Core treats `Storage` spec as opaque; it does not directly talk to S3 or buckets.
---
@@ -236,13 +189,16 @@ type BackupJobSpec struct {
PlanRef *corev1.LocalObjectReference `json:"planRef,omitempty"`
// Application to back up.
// If apiGroup is not specified, it defaults to "apps.cozystack.io".
ApplicationRef corev1.TypedLocalObjectReference `json:"applicationRef"`
// BackupClassName references a BackupClass that contains strategy and related parameters
// The BackupClass will be resolved to determine the appropriate strategy and parameters
// based on the ApplicationRef.
BackupClassName string `json:"backupClassName"`
// Storage to use.
StorageRef corev1.TypedLocalObjectReference `json:"storageRef"`
// Driver-specific BackupStrategy to use.
StrategyRef corev1.TypedLocalObjectReference `json:"strategyRef"`
// Informational: what triggered this run ("Plan", "Manual", etc.).
TriggeredBy string `json:"triggeredBy,omitempty"`
}
```
@@ -267,9 +223,7 @@ type BackupJobStatus struct {
* Each driver controller:
* Watches `BackupJob`.
* Resolves the `BackupClass` referenced by `spec.backupClassName`.
* Matches the `ApplicationRef` against strategies in the `BackupClass` to find the appropriate strategy.
* Reconciles runs where the resolved strategy's `apiGroup/kind` matches its **strategy type(s)**.
* Reconciles runs where `spec.strategyRef.apiGroup/kind` matches its **strategy type(s)**.
* Driver responsibilities:
1. On first reconcile:
@@ -278,12 +232,7 @@ type BackupJobStatus struct {
* Set `status.phase = Running`.
2. Resolve inputs:
* Resolve `BackupClass` from `spec.backupClassName`.
* Match `ApplicationRef` against `BackupClass` strategies to get `StrategyRef` and `Parameters`.
* Read `Strategy` (driver-owned CRD) from `StrategyRef`.
* Read `Application` from `ApplicationRef`.
* Extract parameters from `Parameters` (e.g., `backupStorageLocationName` for Velero).
* Process strategy template with context: `Application` object and `Parameters` from `BackupClass`.
* Read `Strategy` (driver-owned CRD), `Storage`, `Application`, optionally `Plan`.
3. Execute backup logic (implementation-specific).
4. On success:
@@ -315,14 +264,13 @@ Represent a single **backup artifact** for a given application, decoupled from a
type BackupSpec struct {
ApplicationRef corev1.TypedLocalObjectReference `json:"applicationRef"`
PlanRef *corev1.LocalObjectReference `json:"planRef,omitempty"`
StorageRef corev1.TypedLocalObjectReference `json:"storageRef"`
StrategyRef corev1.TypedLocalObjectReference `json:"strategyRef"`
TakenAt metav1.Time `json:"takenAt"`
DriverMetadata map[string]string `json:"driverMetadata,omitempty"`
}
```
**Note:** Parameters are not stored directly in `Backup`. Instead, they are resolved from `BackupClass` parameters when the backup was created. The storage location is managed by the driver (e.g., Velero's `BackupStorageLocation`) and referenced via parameters in the `BackupClass`.
**Key fields (status)**
```go
@@ -342,8 +290,7 @@ type BackupStatus struct {
* Creates a `Backup` in the same namespace (typically owned by the `BackupJob`).
* Populates `spec` fields with:
* The application reference.
* The strategy reference (resolved from `BackupClass` during `BackupJob` execution).
* The application, storage, strategy references.
* `takenAt`.
* Optional `driverMetadata`.
* Sets `status` with:
@@ -359,8 +306,6 @@ type BackupStatus struct {
* Anchor `RestoreJob` operations.
* Implement higher-level policies (retention) if needed.
**Note:** Parameters are resolved from `BackupClass` when the `BackupJob` is created. The driver uses these parameters to determine where to store backups. The storage location itself is managed by the driver (e.g., Velero's `BackupStorageLocation` CRD) and is not directly referenced in the `Backup` resource. When restoring, the driver resolves the storage location from the original `BackupClass` parameters or from the driver's own metadata.
---
### 4.5 RestoreJob
@@ -408,13 +353,13 @@ type RestoreJobStatus struct {
* Determines effective:
* **Strategy**: `backup.spec.strategyRef`.
* **Storage**: Resolved from driver metadata or `BackupClass` parameters (e.g., `backupStorageLocationName` stored in `driverMetadata` or resolved from the original `BackupClass`).
* **Storage**: `backup.spec.storageRef`.
* **Target application**: `spec.targetApplicationRef` or `backup.spec.applicationRef`.
* If effective strategys GVK is one of its supported strategy types → driver is responsible.
3. Behaviour:
* On first reconcile, set `status.startedAt` and `phase = Running`.
* Resolve `Backup`, storage location (from driver metadata or `BackupClass`), `Strategy`, target application.
* Resolve `Backup`, `Storage`, `Strategy`, target application.
* Execute restore logic (implementation-specific).
* On success:
@@ -469,10 +414,8 @@ The Cozystack backups core API:
* Uses a single group, `backups.cozystack.io`, for all core CRDs.
* Cleanly separates:
* **When** (Plan schedule) core-owned.
* **How & where** (BackupClass) central configuration unit that encapsulates strategy and parameters (e.g., storage reference) per application type, resolved per BackupJob/Plan.
* **Execution** (BackupJob) created by Plan when schedule fires, resolves BackupClass to get strategy and parameters, then delegates to driver.
* **When & where** (Plan + Storage) core-owned.
* **What backup artifacts exist** (Backup) driver-created but cluster-visible.
* **Restore lifecycle** (RestoreJob) shared contract boundary.
* **Execution lifecycle** (BackupJob, RestoreJob) shared contract boundary.
* Allows multiple strategy drivers to implement backup/restore logic without entangling their implementation with the core API.

View File

@@ -57,6 +57,10 @@ type BackupSpec struct {
// +optional
PlanRef *corev1.LocalObjectReference `json:"planRef,omitempty"`
// StorageRef refers to the Storage object that describes where the backup
// artifact is stored.
StorageRef corev1.TypedLocalObjectReference `json:"storageRef"`
// StrategyRef refers to the driver-specific BackupStrategy that was used
// to create this backup. This allows the driver to later perform restores.
StrategyRef corev1.TypedLocalObjectReference `json:"strategyRef"`

View File

@@ -1,85 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
// Package v1alpha1 defines backups.cozystack.io API types.
//
// Group: backups.cozystack.io
// Version: v1alpha1
package v1alpha1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
func init() {
SchemeBuilder.Register(func(s *runtime.Scheme) error {
s.AddKnownTypes(GroupVersion,
&BackupClass{},
&BackupClassList{},
)
return nil
})
}
// +kubebuilder:object:root=true
// +kubebuilder:resource:scope=Cluster
// +kubebuilder:subresource:status
// BackupClass defines a class of backup configurations that can be referenced
// by BackupJob and Plan resources. It encapsulates strategy and storage configuration
// per application type.
type BackupClass struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec BackupClassSpec `json:"spec,omitempty"`
Status BackupClassStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// BackupClassList contains a list of BackupClasses.
type BackupClassList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []BackupClass `json:"items"`
}
// BackupClassSpec defines the desired state of a BackupClass.
type BackupClassSpec struct {
// Strategies is a list of backup strategies, each matching a specific application type.
Strategies []BackupClassStrategy `json:"strategies"`
}
// BackupClassStrategy defines a backup strategy for a specific application type.
type BackupClassStrategy struct {
// StrategyRef references the driver-specific BackupStrategy (e.g., Velero).
StrategyRef corev1.TypedLocalObjectReference `json:"strategyRef"`
// Application specifies which application types this strategy applies to.
Application ApplicationSelector `json:"application"`
// Parameters holds strategy-specific and storage-specific parameters.
// Common parameters include:
// - backupStorageLocationName: Name of Velero BackupStorageLocation
// +optional
Parameters map[string]string `json:"parameters,omitempty"`
}
// ApplicationSelector specifies which application types a strategy applies to.
type ApplicationSelector struct {
// APIGroup is the API group of the application.
// If not specified, defaults to "apps.cozystack.io".
// +optional
APIGroup *string `json:"apiGroup,omitempty"`
// Kind is the kind of the application (e.g., VirtualMachine, MariaDB).
Kind string `json:"kind"`
}
// BackupClassStatus defines the observed state of a BackupClass.
type BackupClassStatus struct {
// Conditions represents the latest available observations of a BackupClass's state.
// +optional
Conditions []metav1.Condition `json:"conditions,omitempty"`
}

View File

@@ -24,10 +24,6 @@ func init() {
const (
OwningJobNameLabel = thisGroup + "/owned-by.BackupJobName"
OwningJobNamespaceLabel = thisGroup + "/owned-by.BackupJobNamespace"
// DefaultApplicationAPIGroup is the default API group for applications
// when not specified in ApplicationRef or ApplicationSelector.
DefaultApplicationAPIGroup = "apps.cozystack.io"
)
// BackupJobPhase represents the lifecycle phase of a BackupJob.
@@ -50,16 +46,15 @@ type BackupJobSpec struct {
// ApplicationRef holds a reference to the managed application whose state
// is being backed up.
// If apiGroup is not specified, it defaults to "apps.cozystack.io".
ApplicationRef corev1.TypedLocalObjectReference `json:"applicationRef"`
// BackupClassName references a BackupClass that contains strategy and storage configuration.
// The BackupClass will be resolved to determine the appropriate strategy and storage
// based on the ApplicationRef.
// This field is immutable once the BackupJob is created.
// +kubebuilder:validation:MinLength=1
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="backupClassName is immutable"
BackupClassName string `json:"backupClassName"`
// StorageRef holds a reference to the Storage object that describes where
// the backup will be stored.
StorageRef corev1.TypedLocalObjectReference `json:"storageRef"`
// StrategyRef holds a reference to the driver-specific BackupStrategy object
// that describes how the backup should be created.
StrategyRef corev1.TypedLocalObjectReference `json:"strategyRef"`
}
// BackupJobStatus represents the observed state of a BackupJob.
@@ -119,13 +114,3 @@ type BackupJobList struct {
metav1.ListMeta `json:"metadata,omitempty"`
Items []BackupJob `json:"items"`
}
// NormalizeApplicationRef sets the default apiGroup to DefaultApplicationAPIGroup if it's not specified.
// This function is exported so it can be used by other packages (e.g., controllers, factories).
func NormalizeApplicationRef(ref corev1.TypedLocalObjectReference) corev1.TypedLocalObjectReference {
if ref.APIGroup == nil || *ref.APIGroup == "" {
apiGroup := DefaultApplicationAPIGroup
ref.APIGroup = &apiGroup
}
return ref
}

View File

@@ -65,13 +65,15 @@ type PlanList struct {
type PlanSpec struct {
// ApplicationRef holds a reference to the managed application,
// whose state and configuration must be backed up.
// If apiGroup is not specified, it defaults to "apps.cozystack.io".
ApplicationRef corev1.TypedLocalObjectReference `json:"applicationRef"`
// BackupClassName references a BackupClass that contains strategy and storage configuration.
// The BackupClass will be resolved to determine the appropriate strategy and storage
// based on the ApplicationRef.
BackupClassName string `json:"backupClassName"`
// StorageRef holds a reference to the Storage object that
// describes the location where the backup will be stored.
StorageRef corev1.TypedLocalObjectReference `json:"storageRef"`
// StrategyRef holds a reference to the Strategy object that
// describes, how a backup copy is to be created.
StrategyRef corev1.TypedLocalObjectReference `json:"strategyRef"`
// Schedule specifies when backup copies are created.
Schedule PlanSchedule `json:"schedule"`

View File

@@ -71,8 +71,6 @@ type RestoreJobStatus struct {
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",priority=0
// RestoreJob represents a single execution of a restore from a Backup.
type RestoreJob struct {

View File

@@ -26,26 +26,6 @@ import (
"k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ApplicationSelector) DeepCopyInto(out *ApplicationSelector) {
*out = *in
if in.APIGroup != nil {
in, out := &in.APIGroup, &out.APIGroup
*out = new(string)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationSelector.
func (in *ApplicationSelector) DeepCopy() *ApplicationSelector {
if in == nil {
return nil
}
out := new(ApplicationSelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Backup) DeepCopyInto(out *Backup) {
*out = *in
@@ -88,133 +68,6 @@ func (in *BackupArtifact) DeepCopy() *BackupArtifact {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BackupClass) DeepCopyInto(out *BackupClass) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupClass.
func (in *BackupClass) DeepCopy() *BackupClass {
if in == nil {
return nil
}
out := new(BackupClass)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *BackupClass) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BackupClassList) DeepCopyInto(out *BackupClassList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]BackupClass, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupClassList.
func (in *BackupClassList) DeepCopy() *BackupClassList {
if in == nil {
return nil
}
out := new(BackupClassList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *BackupClassList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BackupClassSpec) DeepCopyInto(out *BackupClassSpec) {
*out = *in
if in.Strategies != nil {
in, out := &in.Strategies, &out.Strategies
*out = make([]BackupClassStrategy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupClassSpec.
func (in *BackupClassSpec) DeepCopy() *BackupClassSpec {
if in == nil {
return nil
}
out := new(BackupClassSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BackupClassStatus) DeepCopyInto(out *BackupClassStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupClassStatus.
func (in *BackupClassStatus) DeepCopy() *BackupClassStatus {
if in == nil {
return nil
}
out := new(BackupClassStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BackupClassStrategy) DeepCopyInto(out *BackupClassStrategy) {
*out = *in
in.StrategyRef.DeepCopyInto(&out.StrategyRef)
in.Application.DeepCopyInto(&out.Application)
if in.Parameters != nil {
in, out := &in.Parameters, &out.Parameters
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupClassStrategy.
func (in *BackupClassStrategy) DeepCopy() *BackupClassStrategy {
if in == nil {
return nil
}
out := new(BackupClassStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BackupJob) DeepCopyInto(out *BackupJob) {
*out = *in
@@ -283,6 +136,8 @@ func (in *BackupJobSpec) DeepCopyInto(out *BackupJobSpec) {
**out = **in
}
in.ApplicationRef.DeepCopyInto(&out.ApplicationRef)
in.StorageRef.DeepCopyInto(&out.StorageRef)
in.StrategyRef.DeepCopyInto(&out.StrategyRef)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupJobSpec.
@@ -371,6 +226,7 @@ func (in *BackupSpec) DeepCopyInto(out *BackupSpec) {
*out = new(v1.LocalObjectReference)
**out = **in
}
in.StorageRef.DeepCopyInto(&out.StorageRef)
in.StrategyRef.DeepCopyInto(&out.StrategyRef)
in.TakenAt.DeepCopyInto(&out.TakenAt)
if in.DriverMetadata != nil {
@@ -497,6 +353,8 @@ func (in *PlanSchedule) DeepCopy() *PlanSchedule {
func (in *PlanSpec) DeepCopyInto(out *PlanSpec) {
*out = *in
in.ApplicationRef.DeepCopyInto(&out.ApplicationRef)
in.StorageRef.DeepCopyInto(&out.StorageRef)
in.StrategyRef.DeepCopyInto(&out.StrategyRef)
out.Schedule = in.Schedule
}

View File

@@ -253,25 +253,3 @@ type FactoryList struct {
metav1.ListMeta `json:"metadata,omitempty"`
Items []Factory `json:"items"`
}
// -----------------------------------------------------------------------------
// CustomFormsOverrideMapping
// -----------------------------------------------------------------------------
// +kubebuilder:object:root=true
// +kubebuilder:resource:path=cfomappings,scope=Cluster
// +kubebuilder:subresource:status
type CFOMapping struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ArbitrarySpec `json:"spec"`
Status CommonStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
type CFOMappingList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []CFOMapping `json:"items"`
}

View File

@@ -69,9 +69,6 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&Factory{},
&FactoryList{},
&CFOMapping{},
&CFOMappingList{},
)
metav1.AddToGroupVersion(scheme, GroupVersion)
return nil

View File

@@ -159,65 +159,6 @@ func (in *BreadcrumbList) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CFOMapping) DeepCopyInto(out *CFOMapping) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CFOMapping.
func (in *CFOMapping) DeepCopy() *CFOMapping {
if in == nil {
return nil
}
out := new(CFOMapping)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CFOMapping) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CFOMappingList) DeepCopyInto(out *CFOMappingList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CFOMapping, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CFOMappingList.
func (in *CFOMappingList) DeepCopy() *CFOMappingList {
if in == nil {
return nil
}
out := new(CFOMappingList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CFOMappingList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CommonStatus) DeepCopyInto(out *CommonStatus) {
*out = *in

View File

@@ -17,52 +17,69 @@ limitations under the License.
package v1alpha1
import (
helmv2 "github.com/fluxcd/helm-controller/api/v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +kubebuilder:object:root=true
// +kubebuilder:resource:scope=Cluster
// ApplicationDefinition is the Schema for the applicationdefinitions API
type ApplicationDefinition struct {
// CozystackResourceDefinition is the Schema for the cozystackresourcedefinitions API
type CozystackResourceDefinition struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ApplicationDefinitionSpec `json:"spec,omitempty"`
Spec CozystackResourceDefinitionSpec `json:"spec,omitempty"`
}
// +kubebuilder:object:root=true
// ApplicationDefinitionList contains a list of ApplicationDefinitions
type ApplicationDefinitionList struct {
// CozystackResourceDefinitionList contains a list of CozystackResourceDefinitions
type CozystackResourceDefinitionList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ApplicationDefinition `json:"items"`
Items []CozystackResourceDefinition `json:"items"`
}
func init() {
SchemeBuilder.Register(&ApplicationDefinition{}, &ApplicationDefinitionList{})
SchemeBuilder.Register(&CozystackResourceDefinition{}, &CozystackResourceDefinitionList{})
}
type ApplicationDefinitionSpec struct {
type CozystackResourceDefinitionSpec struct {
// Application configuration
Application ApplicationDefinitionApplication `json:"application"`
Application CozystackResourceDefinitionApplication `json:"application"`
// Release configuration
Release ApplicationDefinitionRelease `json:"release"`
Release CozystackResourceDefinitionRelease `json:"release"`
// Secret selectors
Secrets ApplicationDefinitionResources `json:"secrets,omitempty"`
Secrets CozystackResourceDefinitionResources `json:"secrets,omitempty"`
// Service selectors
Services ApplicationDefinitionResources `json:"services,omitempty"`
Services CozystackResourceDefinitionResources `json:"services,omitempty"`
// Ingress selectors
Ingresses ApplicationDefinitionResources `json:"ingresses,omitempty"`
Ingresses CozystackResourceDefinitionResources `json:"ingresses,omitempty"`
// Dashboard configuration for this resource
Dashboard *ApplicationDefinitionDashboard `json:"dashboard,omitempty"`
Dashboard *CozystackResourceDefinitionDashboard `json:"dashboard,omitempty"`
}
type ApplicationDefinitionApplication struct {
type CozystackResourceDefinitionChart struct {
// Name of the Helm chart
Name string `json:"name"`
// Source reference for the Helm chart
SourceRef SourceRef `json:"sourceRef"`
}
type SourceRef struct {
// Kind of the source reference
// +kubebuilder:default:="HelmRepository"
Kind string `json:"kind"`
// Name of the source reference
Name string `json:"name"`
// Namespace of the source reference
// +kubebuilder:default:="cozy-public"
Namespace string `json:"namespace"`
}
type CozystackResourceDefinitionApplication struct {
// Kind of the application, used for UI and API
Kind string `json:"kind"`
// OpenAPI schema for the application, used for API validation
@@ -73,16 +90,17 @@ type ApplicationDefinitionApplication struct {
Singular string `json:"singular"`
}
type ApplicationDefinitionRelease struct {
// Reference to the chart source
ChartRef *helmv2.CrossNamespaceSourceReference `json:"chartRef"`
type CozystackResourceDefinitionRelease struct {
// Helm chart configuration
// +optional
Chart CozystackResourceDefinitionChart `json:"chart,omitempty"`
// Labels for the release
Labels map[string]string `json:"labels,omitempty"`
// Prefix for the release name
Prefix string `json:"prefix"`
}
// ApplicationDefinitionResourceSelector extends metav1.LabelSelector with resourceNames support.
// CozystackResourceDefinitionResourceSelector extends metav1.LabelSelector with resourceNames support.
// A resource matches this selector only if it satisfies ALL criteria:
// - Label selector conditions (matchExpressions and matchLabels)
// - AND has a name that matches one of the names in resourceNames (if specified)
@@ -105,7 +123,7 @@ type ApplicationDefinitionRelease struct {
// - "{{ .name }}-secret"
// - "{{ .kind }}-{{ .name }}-tls"
// - "specificname"
type ApplicationDefinitionResourceSelector struct {
type CozystackResourceDefinitionResourceSelector struct {
metav1.LabelSelector `json:",inline"`
// ResourceNames is a list of resource names to match
// If specified, the resource must have one of these exact names to match the selector
@@ -113,16 +131,16 @@ type ApplicationDefinitionResourceSelector struct {
ResourceNames []string `json:"resourceNames,omitempty"`
}
type ApplicationDefinitionResources struct {
type CozystackResourceDefinitionResources struct {
// Exclude contains an array of resource selectors that target resources.
// If a resource matches the selector in any of the elements in the array, it is
// hidden from the user, regardless of the matches in the include array.
Exclude []*ApplicationDefinitionResourceSelector `json:"exclude,omitempty"`
Exclude []*CozystackResourceDefinitionResourceSelector `json:"exclude,omitempty"`
// Include contains an array of resource selectors that target resources.
// If a resource matches the selector in any of the elements in the array, and
// matches none of the selectors in the exclude array that resource is marked
// as a tenant resource and is visible to users.
Include []*ApplicationDefinitionResourceSelector `json:"include,omitempty"`
Include []*CozystackResourceDefinitionResourceSelector `json:"include,omitempty"`
}
// ---- Dashboard types ----
@@ -139,8 +157,8 @@ const (
DashboardTabYAML DashboardTab = "yaml"
)
// ApplicationDefinitionDashboard describes how this resource appears in the UI.
type ApplicationDefinitionDashboard struct {
// CozystackResourceDefinitionDashboard describes how this resource appears in the UI.
type CozystackResourceDefinitionDashboard struct {
// Human-readable name shown in the UI (e.g., "Bucket")
Singular string `json:"singular"`
// Plural human-readable name (e.g., "Buckets")

View File

@@ -21,232 +21,12 @@ limitations under the License.
package v1alpha1
import (
"github.com/fluxcd/helm-controller/api/v2"
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ApplicationDefinition) DeepCopyInto(out *ApplicationDefinition) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationDefinition.
func (in *ApplicationDefinition) DeepCopy() *ApplicationDefinition {
if in == nil {
return nil
}
out := new(ApplicationDefinition)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ApplicationDefinition) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ApplicationDefinitionApplication) DeepCopyInto(out *ApplicationDefinitionApplication) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationDefinitionApplication.
func (in *ApplicationDefinitionApplication) DeepCopy() *ApplicationDefinitionApplication {
if in == nil {
return nil
}
out := new(ApplicationDefinitionApplication)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ApplicationDefinitionDashboard) DeepCopyInto(out *ApplicationDefinitionDashboard) {
*out = *in
if in.Tags != nil {
in, out := &in.Tags, &out.Tags
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Tabs != nil {
in, out := &in.Tabs, &out.Tabs
*out = make([]DashboardTab, len(*in))
copy(*out, *in)
}
if in.KeysOrder != nil {
in, out := &in.KeysOrder, &out.KeysOrder
*out = make([][]string, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = make([]string, len(*in))
copy(*out, *in)
}
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationDefinitionDashboard.
func (in *ApplicationDefinitionDashboard) DeepCopy() *ApplicationDefinitionDashboard {
if in == nil {
return nil
}
out := new(ApplicationDefinitionDashboard)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ApplicationDefinitionList) DeepCopyInto(out *ApplicationDefinitionList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ApplicationDefinition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationDefinitionList.
func (in *ApplicationDefinitionList) DeepCopy() *ApplicationDefinitionList {
if in == nil {
return nil
}
out := new(ApplicationDefinitionList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ApplicationDefinitionList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ApplicationDefinitionRelease) DeepCopyInto(out *ApplicationDefinitionRelease) {
*out = *in
if in.ChartRef != nil {
in, out := &in.ChartRef, &out.ChartRef
*out = new(v2.CrossNamespaceSourceReference)
**out = **in
}
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationDefinitionRelease.
func (in *ApplicationDefinitionRelease) DeepCopy() *ApplicationDefinitionRelease {
if in == nil {
return nil
}
out := new(ApplicationDefinitionRelease)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ApplicationDefinitionResourceSelector) DeepCopyInto(out *ApplicationDefinitionResourceSelector) {
*out = *in
in.LabelSelector.DeepCopyInto(&out.LabelSelector)
if in.ResourceNames != nil {
in, out := &in.ResourceNames, &out.ResourceNames
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationDefinitionResourceSelector.
func (in *ApplicationDefinitionResourceSelector) DeepCopy() *ApplicationDefinitionResourceSelector {
if in == nil {
return nil
}
out := new(ApplicationDefinitionResourceSelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ApplicationDefinitionResources) DeepCopyInto(out *ApplicationDefinitionResources) {
*out = *in
if in.Exclude != nil {
in, out := &in.Exclude, &out.Exclude
*out = make([]*ApplicationDefinitionResourceSelector, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(ApplicationDefinitionResourceSelector)
(*in).DeepCopyInto(*out)
}
}
}
if in.Include != nil {
in, out := &in.Include, &out.Include
*out = make([]*ApplicationDefinitionResourceSelector, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(ApplicationDefinitionResourceSelector)
(*in).DeepCopyInto(*out)
}
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationDefinitionResources.
func (in *ApplicationDefinitionResources) DeepCopy() *ApplicationDefinitionResources {
if in == nil {
return nil
}
out := new(ApplicationDefinitionResources)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ApplicationDefinitionSpec) DeepCopyInto(out *ApplicationDefinitionSpec) {
*out = *in
out.Application = in.Application
in.Release.DeepCopyInto(&out.Release)
in.Secrets.DeepCopyInto(&out.Secrets)
in.Services.DeepCopyInto(&out.Services)
in.Ingresses.DeepCopyInto(&out.Ingresses)
if in.Dashboard != nil {
in, out := &in.Dashboard, &out.Dashboard
*out = new(ApplicationDefinitionDashboard)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationDefinitionSpec.
func (in *ApplicationDefinitionSpec) DeepCopy() *ApplicationDefinitionSpec {
if in == nil {
return nil
}
out := new(ApplicationDefinitionSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Component) DeepCopyInto(out *Component) {
*out = *in
@@ -297,6 +77,237 @@ func (in *ComponentInstall) DeepCopy() *ComponentInstall {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CozystackResourceDefinition) DeepCopyInto(out *CozystackResourceDefinition) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CozystackResourceDefinition.
func (in *CozystackResourceDefinition) DeepCopy() *CozystackResourceDefinition {
if in == nil {
return nil
}
out := new(CozystackResourceDefinition)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CozystackResourceDefinition) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CozystackResourceDefinitionApplication) DeepCopyInto(out *CozystackResourceDefinitionApplication) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CozystackResourceDefinitionApplication.
func (in *CozystackResourceDefinitionApplication) DeepCopy() *CozystackResourceDefinitionApplication {
if in == nil {
return nil
}
out := new(CozystackResourceDefinitionApplication)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CozystackResourceDefinitionChart) DeepCopyInto(out *CozystackResourceDefinitionChart) {
*out = *in
out.SourceRef = in.SourceRef
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CozystackResourceDefinitionChart.
func (in *CozystackResourceDefinitionChart) DeepCopy() *CozystackResourceDefinitionChart {
if in == nil {
return nil
}
out := new(CozystackResourceDefinitionChart)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CozystackResourceDefinitionDashboard) DeepCopyInto(out *CozystackResourceDefinitionDashboard) {
*out = *in
if in.Tags != nil {
in, out := &in.Tags, &out.Tags
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Tabs != nil {
in, out := &in.Tabs, &out.Tabs
*out = make([]DashboardTab, len(*in))
copy(*out, *in)
}
if in.KeysOrder != nil {
in, out := &in.KeysOrder, &out.KeysOrder
*out = make([][]string, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = make([]string, len(*in))
copy(*out, *in)
}
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CozystackResourceDefinitionDashboard.
func (in *CozystackResourceDefinitionDashboard) DeepCopy() *CozystackResourceDefinitionDashboard {
if in == nil {
return nil
}
out := new(CozystackResourceDefinitionDashboard)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CozystackResourceDefinitionList) DeepCopyInto(out *CozystackResourceDefinitionList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CozystackResourceDefinition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CozystackResourceDefinitionList.
func (in *CozystackResourceDefinitionList) DeepCopy() *CozystackResourceDefinitionList {
if in == nil {
return nil
}
out := new(CozystackResourceDefinitionList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CozystackResourceDefinitionList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CozystackResourceDefinitionRelease) DeepCopyInto(out *CozystackResourceDefinitionRelease) {
*out = *in
out.Chart = in.Chart
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CozystackResourceDefinitionRelease.
func (in *CozystackResourceDefinitionRelease) DeepCopy() *CozystackResourceDefinitionRelease {
if in == nil {
return nil
}
out := new(CozystackResourceDefinitionRelease)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CozystackResourceDefinitionResourceSelector) DeepCopyInto(out *CozystackResourceDefinitionResourceSelector) {
*out = *in
in.LabelSelector.DeepCopyInto(&out.LabelSelector)
if in.ResourceNames != nil {
in, out := &in.ResourceNames, &out.ResourceNames
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CozystackResourceDefinitionResourceSelector.
func (in *CozystackResourceDefinitionResourceSelector) DeepCopy() *CozystackResourceDefinitionResourceSelector {
if in == nil {
return nil
}
out := new(CozystackResourceDefinitionResourceSelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CozystackResourceDefinitionResources) DeepCopyInto(out *CozystackResourceDefinitionResources) {
*out = *in
if in.Exclude != nil {
in, out := &in.Exclude, &out.Exclude
*out = make([]*CozystackResourceDefinitionResourceSelector, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(CozystackResourceDefinitionResourceSelector)
(*in).DeepCopyInto(*out)
}
}
}
if in.Include != nil {
in, out := &in.Include, &out.Include
*out = make([]*CozystackResourceDefinitionResourceSelector, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(CozystackResourceDefinitionResourceSelector)
(*in).DeepCopyInto(*out)
}
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CozystackResourceDefinitionResources.
func (in *CozystackResourceDefinitionResources) DeepCopy() *CozystackResourceDefinitionResources {
if in == nil {
return nil
}
out := new(CozystackResourceDefinitionResources)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CozystackResourceDefinitionSpec) DeepCopyInto(out *CozystackResourceDefinitionSpec) {
*out = *in
out.Application = in.Application
in.Release.DeepCopyInto(&out.Release)
in.Secrets.DeepCopyInto(&out.Secrets)
in.Services.DeepCopyInto(&out.Services)
in.Ingresses.DeepCopyInto(&out.Ingresses)
if in.Dashboard != nil {
in, out := &in.Dashboard, &out.Dashboard
*out = new(CozystackResourceDefinitionDashboard)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CozystackResourceDefinitionSpec.
func (in *CozystackResourceDefinitionSpec) DeepCopy() *CozystackResourceDefinitionSpec {
if in == nil {
return nil
}
out := new(CozystackResourceDefinitionSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DependencyStatus) DeepCopyInto(out *DependencyStatus) {
*out = *in
@@ -611,6 +622,21 @@ func (in Selector) DeepCopy() Selector {
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SourceRef) DeepCopyInto(out *SourceRef) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceRef.
func (in *SourceRef) DeepCopy() *SourceRef {
if in == nil {
return nil
}
out := new(SourceRef)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Variant) DeepCopyInto(out *Variant) {
*out = *in

View File

@@ -29,16 +29,16 @@ import (
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/controller-runtime/pkg/metrics/filters"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"sigs.k8s.io/controller-runtime/pkg/webhook"
strategyv1alpha1 "github.com/cozystack/cozystack/api/backups/strategy/v1alpha1"
backupsv1alpha1 "github.com/cozystack/cozystack/api/backups/v1alpha1"
"github.com/cozystack/cozystack/internal/backupcontroller"
velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
// +kubebuilder:scaffold:imports
)
@@ -51,6 +51,8 @@ func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(backupsv1alpha1.AddToScheme(scheme))
utilruntime.Must(strategyv1alpha1.AddToScheme(scheme))
utilruntime.Must(velerov1.AddToScheme(scheme))
// +kubebuilder:scaffold:scheme
}
@@ -132,11 +134,6 @@ func main() {
HealthProbeBindAddress: probeAddr,
LeaderElection: enableLeaderElection,
LeaderElectionID: "core.backups.cozystack.io",
Cache: cache.Options{
ByObject: map[client.Object]cache.ByObject{
&backupsv1alpha1.BackupClass{}: {},
},
},
// LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily
// when the Manager ends. This requires the binary to immediately end when the
// Manager is stopped, otherwise, this setting is unsafe. Setting this significantly
@@ -162,6 +159,15 @@ func main() {
os.Exit(1)
}
if err = (&backupcontroller.BackupJobReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Recorder: mgr.GetEventRecorderFor("backup-controller"),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "BackupJob")
os.Exit(1)
}
// +kubebuilder:scaffold:builder
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {

View File

@@ -29,18 +29,14 @@ import (
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/controller-runtime/pkg/metrics/filters"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"sigs.k8s.io/controller-runtime/pkg/webhook"
strategyv1alpha1 "github.com/cozystack/cozystack/api/backups/strategy/v1alpha1"
backupsv1alpha1 "github.com/cozystack/cozystack/api/backups/v1alpha1"
"github.com/cozystack/cozystack/internal/backupcontroller"
velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
// +kubebuilder:scaffold:imports
)
@@ -53,8 +49,6 @@ func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(backupsv1alpha1.AddToScheme(scheme))
utilruntime.Must(strategyv1alpha1.AddToScheme(scheme))
utilruntime.Must(velerov1.AddToScheme(scheme))
// +kubebuilder:scaffold:scheme
}
@@ -136,11 +130,6 @@ func main() {
HealthProbeBindAddress: probeAddr,
LeaderElection: enableLeaderElection,
LeaderElectionID: "strategy.backups.cozystack.io",
Cache: cache.Options{
ByObject: map[client.Object]cache.ByObject{
&backupsv1alpha1.BackupClass{}: {},
},
},
// LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily
// when the Manager ends. This requires the binary to immediately end when the
// Manager is stopped, otherwise, this setting is unsafe. Setting this significantly
@@ -159,20 +148,10 @@ func main() {
}
if err = (&backupcontroller.BackupJobReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Recorder: mgr.GetEventRecorderFor("backup-controller"),
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "BackupJob")
os.Exit(1)
}
if err = (&backupcontroller.RestoreJobReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Recorder: mgr.GetEventRecorderFor("restore-controller"),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "RestoreJob")
setupLog.Error(err, "unable to create controller", "controller", "Job")
os.Exit(1)
}

View File

@@ -23,9 +23,6 @@ import (
"github.com/spf13/cobra"
)
// Version is set at build time via -ldflags.
var Version = "dev"
// rootCmd represents the base command when called without any subcommands.
var rootCmd = &cobra.Command{
Use: "cozypkg",
@@ -47,6 +44,6 @@ func Execute() error {
}
func init() {
rootCmd.Version = Version
// Commands are registered in their respective init() functions
}

View File

@@ -0,0 +1,29 @@
package main
import (
"flag"
"log"
"net/http"
"path/filepath"
)
func main() {
addr := flag.String("address", ":8123", "Address to listen on")
dir := flag.String("dir", "/cozystack/assets", "Directory to serve files from")
flag.Parse()
absDir, err := filepath.Abs(*dir)
if err != nil {
log.Fatalf("Error getting absolute path for %s: %v", *dir, err)
}
fs := http.FileServer(http.Dir(absDir))
http.Handle("/", fs)
log.Printf("Server starting on %s, serving directory %s", *addr, absDir)
err = http.ListenAndServe(*addr, nil)
if err != nil {
log.Fatalf("Server failed to start: %v", err)
}
}

View File

@@ -68,6 +68,8 @@ func main() {
var disableTelemetry bool
var telemetryEndpoint string
var telemetryInterval string
var cozystackVersion string
var reconcileDeployment bool
var tlsOpts []func(*tls.Config)
flag.StringVar(&metricsAddr, "metrics-bind-address", "0", "The address the metrics endpoint binds to. "+
"Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.")
@@ -85,6 +87,10 @@ func main() {
"Endpoint for sending telemetry data")
flag.StringVar(&telemetryInterval, "telemetry-interval", "15m",
"Interval between telemetry data collection (e.g. 15m, 1h)")
flag.StringVar(&cozystackVersion, "cozystack-version", "unknown",
"Version of Cozystack")
flag.BoolVar(&reconcileDeployment, "reconcile-deployment", false,
"If set, the Cozystack API server is assumed to run as a Deployment, else as a DaemonSet.")
opts := zap.Options{
Development: false,
}
@@ -100,9 +106,10 @@ func main() {
// Configure telemetry
telemetryConfig := telemetry.Config{
Disabled: disableTelemetry,
Endpoint: telemetryEndpoint,
Interval: interval,
Disabled: disableTelemetry,
Endpoint: telemetryEndpoint,
Interval: interval,
CozystackVersion: cozystackVersion,
}
ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
@@ -193,19 +200,40 @@ func main() {
os.Exit(1)
}
if err = (&controller.ApplicationDefinitionReconciler{
if err = (&controller.TenantHelmReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "ApplicationDefinitionReconciler")
setupLog.Error(err, "unable to create controller", "controller", "TenantHelmReconciler")
os.Exit(1)
}
if err = (&controller.ApplicationDefinitionHelmReconciler{
if err = (&controller.CozystackConfigReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "ApplicationDefinitionHelmReconciler")
setupLog.Error(err, "unable to create controller", "controller", "CozystackConfigReconciler")
os.Exit(1)
}
cozyAPIKind := "DaemonSet"
if reconcileDeployment {
cozyAPIKind = "Deployment"
}
if err = (&controller.CozystackResourceDefinitionReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
CozystackAPIKind: cozyAPIKind,
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "CozystackResourceDefinitionReconciler")
os.Exit(1)
}
if err = (&controller.CozystackResourceDefinitionHelmReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "CozystackResourceDefinitionHelmReconciler")
os.Exit(1)
}

View File

@@ -32,16 +32,12 @@ import (
helmv2 "github.com/fluxcd/helm-controller/api/v2"
sourcev1 "github.com/fluxcd/source-controller/api/v1"
sourcewatcherv1beta1 "github.com/fluxcd/source-watcher/api/v2/v1beta1"
corev1 "k8s.io/api/core/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log"
@@ -49,11 +45,8 @@ import (
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"sigs.k8s.io/controller-runtime/pkg/webhook"
"github.com/cozystack/cozystack/internal/cozyvaluesreplicator"
"github.com/cozystack/cozystack/internal/crdinstall"
"github.com/cozystack/cozystack/internal/fluxinstall"
"github.com/cozystack/cozystack/internal/operator"
"github.com/cozystack/cozystack/internal/telemetry"
// +kubebuilder:scaffold:imports
)
@@ -78,14 +71,8 @@ func main() {
var probeAddr string
var secureMetrics bool
var enableHTTP2 bool
var installCRDs bool
var installFlux bool
var disableTelemetry bool
var telemetryEndpoint string
var telemetryInterval string
var cozyValuesSecretName string
var cozyValuesSecretNamespace string
var cozyValuesNamespaceSelector string
var cozystackVersion string
var platformSourceURL string
var platformSourceName string
var platformSourceRef string
@@ -99,20 +86,12 @@ func main() {
"If set the metrics endpoint is served securely")
flag.BoolVar(&enableHTTP2, "enable-http2", false,
"If set, HTTP/2 will be enabled for the metrics and webhook servers")
flag.BoolVar(&installCRDs, "install-crds", false, "Install Cozystack CRDs before starting reconcile loop")
flag.BoolVar(&installFlux, "install-flux", false, "Install Flux components before starting reconcile loop")
flag.BoolVar(&disableTelemetry, "disable-telemetry", false,
"Disable telemetry collection")
flag.StringVar(&telemetryEndpoint, "telemetry-endpoint", "https://telemetry.cozystack.io",
"Endpoint for sending telemetry data")
flag.StringVar(&telemetryInterval, "telemetry-interval", "15m",
"Interval between telemetry data collection (e.g. 15m, 1h)")
flag.StringVar(&cozystackVersion, "cozystack-version", "unknown",
"Version of Cozystack")
flag.StringVar(&platformSourceURL, "platform-source-url", "", "Platform source URL (oci:// or https://). If specified, generates OCIRepository or GitRepository resource.")
flag.StringVar(&platformSourceName, "platform-source-name", "cozystack-packages", "Name for the generated platform source resource (default: cozystack-packages)")
flag.StringVar(&platformSourceRef, "platform-source-ref", "", "Reference specification as key=value pairs (e.g., 'branch=main' or 'digest=sha256:...,tag=v1.0'). For OCI: digest, semver, semverFilter, tag. For Git: branch, tag, semver, name, commit.")
flag.StringVar(&cozyValuesSecretName, "cozy-values-secret-name", "cozystack-values", "The name of the secret containing cluster-wide configuration values.")
flag.StringVar(&cozyValuesSecretNamespace, "cozy-values-secret-namespace", "cozy-system", "The namespace of the secret containing cluster-wide configuration values.")
flag.StringVar(&cozyValuesNamespaceSelector, "cozy-values-namespace-selector", "cozystack.io/system=true", "The label selector for namespaces where the cluster-wide configuration values must be replicated.")
opts := zap.Options{
Development: true,
@@ -131,28 +110,10 @@ func main() {
os.Exit(1)
}
targetNSSelector, err := labels.Parse(cozyValuesNamespaceSelector)
if err != nil {
setupLog.Error(err, "could not parse namespace label selector")
os.Exit(1)
}
// Initialize the controller manager
// Start the controller manager
setupLog.Info("Starting controller manager")
mgr, err := ctrl.NewManager(config, ctrl.Options{
Scheme: scheme,
Cache: cache.Options{
ByObject: map[client.Object]cache.ByObject{
// Cache only Secrets named <secretName> (in any namespace)
&corev1.Secret{}: {
Field: fields.OneTermEqualSelector("metadata.name", cozyValuesSecretName),
},
// Cache only Namespaces that match a label selector
&corev1.Namespace{}: {
Label: targetNSSelector,
},
},
},
Metrics: metricsserver.Options{
BindAddress: metricsAddr,
SecureServing: secureMetrics,
@@ -179,26 +140,10 @@ func main() {
os.Exit(1)
}
// Set up signal handler early so install phases respect SIGTERM
mgrCtx := ctrl.SetupSignalHandler()
// Install Cozystack CRDs before starting reconcile loop
if installCRDs {
setupLog.Info("Installing Cozystack CRDs before starting reconcile loop")
installCtx, installCancel := context.WithTimeout(mgrCtx, 2*time.Minute)
defer installCancel()
if err := crdinstall.Install(installCtx, directClient, crdinstall.WriteEmbeddedManifests); err != nil {
setupLog.Error(err, "failed to install CRDs")
os.Exit(1)
}
setupLog.Info("CRD installation completed successfully")
}
// Install Flux before starting reconcile loop
if installFlux {
setupLog.Info("Installing Flux components before starting reconcile loop")
installCtx, installCancel := context.WithTimeout(mgrCtx, 5*time.Minute)
installCtx, installCancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer installCancel()
// Use direct client for pre-start operations (cache is not ready yet)
@@ -212,7 +157,7 @@ func main() {
// Generate and install platform source resource if specified
if platformSourceURL != "" {
setupLog.Info("Generating platform source resource", "url", platformSourceURL, "name", platformSourceName, "ref", platformSourceRef)
installCtx, installCancel := context.WithTimeout(mgrCtx, 2*time.Minute)
installCtx, installCancel := context.WithTimeout(context.Background(), 2*time.Minute)
defer installCancel()
// Use direct client for pre-start operations (cache is not ready yet)
@@ -242,18 +187,6 @@ func main() {
os.Exit(1)
}
// Setup CozyValuesReplicator reconciler
if err := (&cozyvaluesreplicator.SecretReplicatorReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
SourceNamespace: cozyValuesSecretNamespace,
SecretName: cozyValuesSecretName,
TargetNamespaceSelector: targetNSSelector,
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "CozyValuesReplicator")
os.Exit(1)
}
// +kubebuilder:scaffold:builder
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
@@ -265,35 +198,8 @@ func main() {
os.Exit(1)
}
// Parse telemetry interval
interval, err := time.ParseDuration(telemetryInterval)
if err != nil {
setupLog.Error(err, "invalid telemetry interval")
os.Exit(1)
}
// Configure telemetry
telemetryConfig := telemetry.Config{
Disabled: disableTelemetry,
Endpoint: telemetryEndpoint,
Interval: interval,
}
// Initialize telemetry collector
// Use APIReader (non-cached) because the manager's cache is filtered
// and doesn't include resources needed for telemetry (e.g., kube-system namespace, nodes, etc.)
collector, err := telemetry.NewOperatorCollector(mgr.GetAPIReader(), &telemetryConfig, config)
if err != nil {
setupLog.V(1).Info("unable to create telemetry collector, telemetry will be disabled", "error", err)
}
if collector != nil {
if err := mgr.Add(collector); err != nil {
setupLog.V(1).Info("unable to set up telemetry collector, continuing without telemetry", "error", err)
}
}
setupLog.Info("Starting controller manager")
mgrCtx := ctrl.SetupSignalHandler()
if err := mgr.Start(mgrCtx); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)

View File

@@ -1,151 +0,0 @@
/*
Copyright 2025.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"crypto/tls"
"flag"
"os"
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
// to ensure that exec-entrypoint and run can make use of them.
_ "k8s.io/client-go/plugin/pkg/client/auth"
helmv2 "github.com/fluxcd/helm-controller/api/v2"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"github.com/cozystack/cozystack/internal/controller/fluxplunger"
// +kubebuilder:scaffold:imports
)
var (
scheme = runtime.NewScheme()
setupLog = ctrl.Log.WithName("setup")
)
func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(helmv2.AddToScheme(scheme))
// +kubebuilder:scaffold:scheme
}
func main() {
var metricsAddr string
var enableLeaderElection bool
var probeAddr string
var secureMetrics bool
var enableHTTP2 bool
var tlsOpts []func(*tls.Config)
flag.StringVar(&metricsAddr, "metrics-bind-address", "0", "The address the metrics endpoint binds to. "+
"Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.")
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
flag.BoolVar(&enableLeaderElection, "leader-elect", false,
"Enable leader election for controller manager. "+
"Enabling this will ensure there is only one active controller manager.")
flag.BoolVar(&secureMetrics, "metrics-secure", true,
"If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.")
flag.BoolVar(&enableHTTP2, "enable-http2", false,
"If set, HTTP/2 will be enabled for the metrics server")
opts := zap.Options{
Development: false,
}
opts.BindFlags(flag.CommandLine)
flag.Parse()
ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
// if the enable-http2 flag is false (the default), http/2 should be disabled
// due to its vulnerabilities. More specifically, disabling http/2 will
// prevent from being vulnerable to the HTTP/2 Stream Cancellation and
// Rapid Reset CVEs. For more information see:
// - https://github.com/advisories/GHSA-qppj-fm5r-hxr3
// - https://github.com/advisories/GHSA-4374-p667-p6c8
disableHTTP2 := func(c *tls.Config) {
setupLog.Info("disabling http/2")
c.NextProtos = []string{"http/1.1"}
}
if !enableHTTP2 {
tlsOpts = append(tlsOpts, disableHTTP2)
}
// Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server.
// More info:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.1/pkg/metrics/server
// - https://book.kubebuilder.io/reference/metrics.html
metricsServerOptions := metricsserver.Options{
BindAddress: metricsAddr,
SecureServing: secureMetrics,
TLSOpts: tlsOpts,
}
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
Metrics: metricsServerOptions,
HealthProbeBindAddress: probeAddr,
LeaderElection: enableLeaderElection,
LeaderElectionID: "flux-plunger.cozystack.io",
// LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily
// when the Manager ends. This requires the binary to immediately end when the
// Manager is stopped, otherwise, this setting is unsafe. Setting this significantly
// speeds up voluntary leader transitions as the new leader don't have to wait
// LeaseDuration time first.
//
// In the default scaffold provided, the program ends immediately after
// the manager stops, so would be fine to enable this option. However,
// if you are doing or is intended to do any operation such as perform cleanups
// after the manager stops then its usage might be unsafe.
// LeaderElectionReleaseOnCancel: true,
})
if err != nil {
setupLog.Error(err, "unable to create manager")
os.Exit(1)
}
if err = (&fluxplunger.FluxPlunger{
Client: mgr.GetClient(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "FluxPlunger")
os.Exit(1)
}
// +kubebuilder:scaffold:builder
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up health check")
os.Exit(1)
}
if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up ready check")
os.Exit(1)
}
setupLog.Info("starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
}

View File

@@ -1,602 +0,0 @@
{
"__inputs": [
{
"name": "DS_PROMETHEUS",
"label": "Prometheus",
"description": "",
"type": "datasource",
"pluginId": "prometheus",
"pluginName": "Prometheus"
}
],
"__elements": {},
"__requires": [
{
"type": "panel",
"id": "bargauge",
"name": "Bar gauge",
"version": ""
},
{
"type": "grafana",
"id": "grafana",
"name": "Grafana",
"version": "9.4.7"
},
{
"type": "datasource",
"id": "prometheus",
"name": "Prometheus",
"version": "1.0.0"
},
{
"type": "panel",
"id": "timeseries",
"name": "Time series",
"version": ""
}
],
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "datasource",
"uid": "grafana"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"target": {
"limit": 100,
"matchAny": false,
"tags": [],
"type": "dashboard"
},
"type": "dashboard"
}
]
},
"description": "",
"editable": true,
"fiscalYearStartMonth": 0,
"gnetId": 16612,
"graphTooltip": 0,
"id": null,
"links": [
{
"asDropdown": true,
"icon": "external link",
"includeVars": true,
"keepTime": true,
"tags": [
"cilium-overview"
],
"targetBlank": false,
"title": "Cilium Overviews",
"tooltip": "",
"type": "dashboards",
"url": ""
},
{
"asDropdown": true,
"icon": "external link",
"includeVars": false,
"keepTime": true,
"tags": [
"hubble"
],
"targetBlank": false,
"title": "Hubble",
"tooltip": "",
"type": "dashboards",
"url": ""
}
],
"liveNow": false,
"panels": [
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 0
},
"id": 2,
"panels": [],
"title": "DNS",
"type": "row"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "normal"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "reqps"
},
"overrides": []
},
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
"y": 1
},
"id": 37,
"options": {
"legend": {
"calcs": [
"mean",
"lastNotNull"
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"editorMode": "code",
"expr": "sum(rate(hubble_dns_queries_total{cluster=~\"$cluster\", source_namespace=~\"$source_namespace\", destination_namespace=~\"$destination_namespace\"}[$__rate_interval])) by (source) > 0",
"legendFormat": "{{source}}",
"range": true,
"refId": "A"
}
],
"title": "DNS queries",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "reqps"
},
"overrides": []
},
"gridPos": {
"h": 9,
"w": 12,
"x": 12,
"y": 1
},
"id": 41,
"options": {
"displayMode": "gradient",
"minVizHeight": 10,
"minVizWidth": 0,
"orientation": "horizontal",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"showUnfilled": true
},
"pluginVersion": "9.4.7",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"editorMode": "code",
"expr": "topk(10, sum(rate(hubble_dns_queries_total{cluster=~\"$cluster\", source_namespace=~\"$source_namespace\", destination_namespace=~\"$destination_namespace\"}[$__rate_interval])*60) by (query))",
"legendFormat": "{{query}}",
"range": true,
"refId": "A"
}
],
"title": "Top 10 DNS queries",
"type": "bargauge"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "normal"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "reqps"
},
"overrides": []
},
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
"y": 10
},
"id": 39,
"options": {
"legend": {
"calcs": [
"mean",
"lastNotNull"
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"editorMode": "code",
"expr": "round(sum(rate(hubble_dns_queries_total{cluster=~\"$cluster\", source_namespace=~\"$source_namespace\", destination_namespace=~\"$destination_namespace\"}[$__rate_interval])) by (source) - sum(label_replace(sum(rate(hubble_dns_responses_total{cluster=~\"$cluster\", source_namespace=~\"$destination_namespace\", destination_namespace=~\"$source_namespace\"}[$__rate_interval])) by (destination), \"source\", \"$1\", \"destination\", \"(.*)\")) without (destination), 0.001) > 0",
"legendFormat": "{{source}}",
"range": true,
"refId": "A"
}
],
"title": "Missing DNS responses",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "normal"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "reqps"
},
"overrides": []
},
"gridPos": {
"h": 9,
"w": 12,
"x": 12,
"y": 10
},
"id": 43,
"options": {
"legend": {
"calcs": [
"mean",
"lastNotNull"
],
"displayMode": "table",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"editorMode": "code",
"expr": "sum(rate(hubble_dns_responses_total{cluster=~\"$cluster\", source_namespace=~\"$destination_namespace\", destination_namespace=~\"$source_namespace\", rcode!=\"No Error\"}[$__rate_interval])) by (destination, rcode) > 0",
"legendFormat": "{{destination}}: {{rcode}}",
"range": true,
"refId": "A"
}
],
"title": "DNS errors",
"type": "timeseries"
}
],
"refresh": "",
"revision": 1,
"schemaVersion": 38,
"style": "dark",
"tags": [
"kubecon-demo"
],
"templating": {
"list": [
{
"current": {
"selected": false,
"text": "default",
"value": "default"
},
"hide": 0,
"includeAll": false,
"label": "Data Source",
"multi": false,
"name": "DS_PROMETHEUS",
"options": [],
"query": "prometheus",
"queryValue": "",
"refresh": 1,
"regex": "(?!grafanacloud-usage|grafanacloud-ml-metrics).+",
"skipUrlSync": false,
"type": "datasource"
},
{
"current": {},
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"definition": "label_values(cilium_version, cluster)",
"hide": 0,
"includeAll": true,
"multi": true,
"name": "cluster",
"options": [],
"query": {
"query": "label_values(cilium_version, cluster)",
"refId": "StandardVariableQuery"
},
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"type": "query"
},
{
"allValue": ".*",
"current": {},
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"definition": "label_values(source_namespace)",
"hide": 0,
"includeAll": true,
"label": "Source Namespace",
"multi": true,
"name": "source_namespace",
"options": [],
"query": {
"query": "label_values(source_namespace)",
"refId": "StandardVariableQuery"
},
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"type": "query"
},
{
"allValue": ".*",
"current": {},
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"definition": "label_values(destination_namespace)",
"hide": 0,
"includeAll": true,
"label": "Destination Namespace",
"multi": true,
"name": "destination_namespace",
"options": [],
"query": {
"query": "label_values(destination_namespace)",
"refId": "StandardVariableQuery"
},
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"type": "query"
}
]
},
"time": {
"from": "now-1h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
"timezone": "",
"title": "Hubble / DNS Overview (Namespace)",
"uid": "_f0DUpY4k",
"version": 26,
"weekStart": ""
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -27,7 +27,7 @@ git commit --signoff -m "[component] Brief description of changes"
**Component prefixes:**
- System: `[dashboard]`, `[platform]`, `[cilium]`, `[kube-ovn]`, `[linstor]`, `[fluxcd]`, `[cluster-api]`
- Apps: `[postgres]`, `[mariadb]`, `[redis]`, `[kafka]`, `[clickhouse]`, `[virtual-machine]`, `[kubernetes]`
- Apps: `[postgres]`, `[mysql]`, `[redis]`, `[kafka]`, `[clickhouse]`, `[virtual-machine]`, `[kubernetes]`
- Other: `[tests]`, `[ci]`, `[docs]`, `[maintenance]`
**Examples:**

View File

@@ -1,16 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.37.10
-->
## Features and Improvements
* **[virtual-machine] Improve check for resizing job**: Improved storage resize logic to only expand persistent volume claims when storage is being increased, preventing unintended storage reduction operations. Added validation to accurately compare current and desired storage sizes before triggering resize operations ([**@kvaps**](https://github.com/kvaps) in #1688, #1702).
## Fixes
* **[dashboard] Fix CustomFormsOverride schema to nest properties under spec.properties**: Fixed the logic for generating CustomFormsOverride schema to properly nest properties under `spec.properties` instead of directly under `properties`, ensuring correct form schema generation in the dashboard ([**@kvaps**](https://github.com/kvaps) in #1692, #1699).
---
**Full Changelog**: [v0.37.9...v0.37.10](https://github.com/cozystack/cozystack/compare/v0.37.9...v0.37.10)

View File

@@ -1,18 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.38.5
-->
## Features and Improvements
* **[virtual-machine,vm-instance] Add nodeAffinity for Windows VMs based on scheduling config**: Added nodeAffinity configuration to virtual-machine and vm-instance charts to support dedicated nodes for Windows VMs. When `dedicatedNodesForWindowsVMs` is enabled in the `cozystack-scheduling` ConfigMap, Windows VMs are scheduled on nodes with label `scheduling.cozystack.io/vm-windows=true`, while non-Windows VMs prefer nodes without this label ([**@kvaps**](https://github.com/kvaps) in #1693, #1744).
* **[cilium] Enable automatic pod rollout on configmap updates**: Cilium and Cilium operator pods now automatically restart when the cilium-config ConfigMap is updated, ensuring configuration changes are applied immediately without manual intervention ([**@kvaps**](https://github.com/kvaps) in #1728, #1745).
* **Update SeaweedFS v4.02**: Updated SeaweedFS to version 4.02 with improved S3 daemon performance and fixes. This update includes better S3 compatibility and performance improvements ([**@kvaps**](https://github.com/kvaps) in #1725, #1732).
## Fixes
* **[apps] Refactor apiserver to use typed objects and fix UnstructuredList GVK**: Refactored the apiserver REST handlers to use typed objects (`appsv1alpha1.Application`) instead of `unstructured.Unstructured`, eliminating the need for runtime conversions and simplifying the codebase. Additionally, fixed an issue where `UnstructuredList` objects were using the first registered kind from `typeToGVK` instead of the kind from the object's field when multiple kinds are registered with the same Go type. This fix includes the upstream fix from kubernetes/kubernetes#135537 ([**@kvaps**](https://github.com/kvaps) in #1679, #1709).
---
**Full Changelog**: [v0.38.4...v0.38.5](https://github.com/cozystack/cozystack/compare/v0.38.4...v0.38.5)

View File

@@ -1,12 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.38.6
-->
## Development, Testing, and CI/CD
* **[kubernetes] Add lb tests for tenant k8s**: Added load balancer tests for tenant Kubernetes clusters, improving test coverage and ensuring proper load balancer functionality in tenant environments ([**@IvanHunters**](https://github.com/IvanHunters) in #1783, #1792).
---
**Full Changelog**: [v0.38.5...v0.38.6](https://github.com/cozystack/cozystack/compare/v0.38.5...v0.38.6)

View File

@@ -1,13 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.38.7
-->
## Fixes
* **[kubevirt-operator] Fix typo in VMNotRunningFor10Minutes alert**: Fixed typo in VM alert name, ensuring proper alert triggering and monitoring for virtual machines that are not running for extended periods ([**@lexfrei**](https://github.com/lexfrei) in #1770).
* **[kubevirt-operator] Revert incorrect case change in VM alerts**: Reverted incorrect case change in VM alert names to maintain consistency with alert naming conventions ([**@lexfrei**](https://github.com/lexfrei) in #1804, #1805).
---
**Full Changelog**: [v0.38.6...v0.38.7](https://github.com/cozystack/cozystack/compare/v0.38.6...v0.38.7)

View File

@@ -1,12 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.38.8
-->
## Improvements
* **[multus] Remove memory limit**: Removed memory limit for Multus daemonset due to unpredictable memory consumption spikes during startup after node reboots (reported up to 3Gi). This temporary change prevents out-of-memory issues while the root cause is addressed in future releases ([**@nbykov0**](https://github.com/nbykov0) in #1834).
---
**Full Changelog**: [v0.38.7...v0.38.8](https://github.com/cozystack/cozystack/compare/v0.38.7...v0.38.8)

View File

@@ -1,19 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.39.2
-->
## Features and Improvements
* **[vm] Always expose VMs with a service**: Virtual machines are now always exposed with at least a ClusterIP service, ensuring they have in-cluster DNS names and can be accessed from other pods even without public IP addresses ([**@lllamnyp**](https://github.com/lllamnyp) in #1738, #1751).
* **[tenant] Allow egress to parent ingress pods**: Updated tenant network policies to allow egress traffic to parent cluster ingress pods, enabling proper communication patterns between tenant namespaces and parent cluster ingress controllers ([**@lexfrei**](https://github.com/lexfrei) in #1765, #1776).
* **[system] Add resource requests and limits to etcd-defrag**: Added resource requests and limits to etcd-defrag job to ensure proper resource allocation and prevent resource contention during etcd maintenance operations ([**@matthieu-robin**](https://github.com/matthieu-robin) in #1785, #1786).
* **[tenant] Run cleanup job from system namespace**: Moved tenant cleanup job to run from system namespace, improving security and resource isolation for tenant cleanup operations ([**@lllamnyp**](https://github.com/lllamnyp) in #1774, #1777).
## Fixes
* **[kubevirt-operator] Fix typo in VMNotRunningFor10Minutes alert**: Fixed typo in VM alert name, ensuring proper alert triggering and monitoring for virtual machines that are not running for extended periods ([**@lexfrei**](https://github.com/lexfrei) in #1770, #1775).
---
**Full Changelog**: [v0.39.1...v0.39.2](https://github.com/cozystack/cozystack/compare/v0.39.1...v0.39.2)

View File

@@ -1,36 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.39.3
-->
## Features and Improvements
* **[seaweedfs] Traffic locality**: Upgraded SeaweedFS to v4.05 with traffic locality capabilities, new admin component with web-based UI, worker component for distributed operations, and enhanced S3 monitoring with Grafana dashboards. Improves S3 service performance by routing requests to nearest available volume servers ([**@nbykov0**](https://github.com/nbykov0) in #1748, #1830).
* **[kube-ovn] Update to v1.14.25**: Updated Kube-OVN to version 1.14.25 with improved stability and new features ([**@kvaps**](https://github.com/kvaps) in #1819, #1837).
* **[linstor] Build linstor-server with custom patches**: Added custom patches to linstor-server build process, enabling platform-specific optimizations and fixes ([**@kvaps**](https://github.com/kvaps) in #1726, #1818).
* **[api, lineage] Tolerate all taints**: Updated API and lineage webhook to tolerate all taints, ensuring controllers can run on any node regardless of taint configuration ([**@nbykov0**](https://github.com/nbykov0) in #1781, #1827).
* **[ingress] Add topology anti-affinities**: Added topology anti-affinity rules to ingress controller deployment for better pod distribution across nodes ([**@kvaps**](https://github.com/kvaps) in commit 25f31022).
## Fixes
* **[linstor] fix: prevent DRBD device race condition in updateDiscGran**: Fixed race condition in DRBD device management during granularity updates, preventing potential data corruption or device conflicts ([**@kvaps**](https://github.com/kvaps) in #1829, #1836).
* **fix(linstor): prevent orphaned DRBD devices during toggle-disk retry**: Fixed issue where retry logic during disk toggle operations could leave orphaned DRBD devices, now properly cleans up devices during retry attempts ([**@kvaps**](https://github.com/kvaps) in #1823, #1825).
* **[kubernetes] Fix endpoints for cilium-gateway**: Fixed endpoint configuration for cilium-gateway, ensuring proper service discovery and connectivity ([**@kvaps**](https://github.com/kvaps) in #1729, #1808).
* **[kubevirt-operator] Revert incorrect case change in VM alerts**: Reverted incorrect case change in VM alert names to maintain consistency with alert naming conventions ([**@lexfrei**](https://github.com/lexfrei) in #1804, #1806).
## System Configuration
* **[kubeovn] Package from external repo**: Extracted Kube-OVN packaging from main repository to external repository, improving modularity ([**@lllamnyp**](https://github.com/lllamnyp) in #1535).
## Development, Testing, and CI/CD
* **[testing] Add aliases and autocomplete**: Added shell aliases and autocomplete support for testing commands, improving developer experience ([**@lllamnyp**](https://github.com/lllamnyp) in #1803, #1809).
## Dependencies
* **[seaweedfs] Traffic locality**: Upgraded SeaweedFS to v4.05 with traffic locality capabilities ([**@nbykov0**](https://github.com/nbykov0) in #1748, #1830).
* **[kube-ovn] Update to v1.14.25**: Updated Kube-OVN to version 1.14.25 ([**@kvaps**](https://github.com/kvaps) in #1819, #1837).
---
**Full Changelog**: [v0.39.2...v0.39.3](https://github.com/cozystack/cozystack/compare/v0.39.2...v0.39.3)

View File

@@ -1,12 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.39.4
-->
## Features and Improvements
* **[paas-full] Add multus dependencies similar to other CNIs**: Added Multus as a dependency in the paas-full package, consistent with how other CNIs are included. This ensures proper dependency management and simplifies the installation process for environments using Multus networking ([**@nbykov0**](https://github.com/nbykov0) in #1835).
---
**Full Changelog**: [v0.39.3...v0.39.4](https://github.com/cozystack/cozystack/compare/v0.39.3...v0.39.4)

View File

@@ -1,11 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.39.5
-->
## Fixes
* **[linstor] Update piraeus-server patches with critical fixes**: Backported critical patches to piraeus-server that address storage stability issues and improve DRBD resource handling. These patches fix edge cases in device management and ensure more reliable storage operations ([**@kvaps**](https://github.com/kvaps) in #1850, #1853).
---
**Full Changelog**: [v0.39.4...v0.39.5](https://github.com/cozystack/cozystack/compare/v0.39.4...v0.39.5)

View File

@@ -1,206 +0,0 @@
# Cozystack v0.40 — "Enhanced Storage & Platform Architecture"
This release introduces LINSTOR scheduler for optimal pod placement, SeaweedFS traffic locality, a new valuesFrom-based configuration mechanism, auto-diskful for LINSTOR, automated version management systems, and numerous improvements across the platform.
## Feature Highlights
### LINSTOR Scheduler for Optimal Pod Placement
Cozystack now includes a custom Kubernetes scheduler extender that works alongside the default kube-scheduler to optimize pod placement on nodes with LINSTOR storage. When a pod requests LINSTOR-backed storage, the scheduler communicates with the LINSTOR controller to find nodes that have local replicas of the requested volumes, prioritizing placement on nodes with existing data to minimize network traffic and improve I/O performance.
The scheduler includes an admission webhook that automatically routes pods using LINSTOR CSI volumes to the custom scheduler, ensuring seamless integration without manual configuration. This feature significantly improves performance for workloads using LINSTOR storage by reducing network latency and improving data locality.
Learn more about LINSTOR in the [documentation](https://cozystack.io/docs/operations/storage/linstor/).
### SeaweedFS Traffic Locality
SeaweedFS has been upgraded to version 4.05 with new traffic locality capabilities that optimize S3 service traffic distribution. The update includes a new admin component with a web-based UI and authentication support, as well as a worker component for distributed operations. These enhancements improve S3 service performance and provide better visibility through enhanced Grafana dashboard panels for buckets, API calls, costs, and performance metrics.
The traffic locality feature ensures that S3 requests are routed to the nearest available volume servers, reducing latency and improving overall performance for distributed storage operations. TLS certificate support for admin and worker components adds an extra layer of security for management operations.
### ValuesFrom Configuration Mechanism
Cozystack now uses FluxCD's valuesFrom mechanism to replace Helm lookup functions for configuration propagation. This architectural improvement provides cleaner config propagation and eliminates the need for force reconcile controllers. Configuration from ConfigMaps (cozystack, cozystack-branding, cozystack-scheduling) and namespace service references (etcd, host, ingress, monitoring, seaweedfs) is now centrally managed through a `cozystack-values` Secret in each namespace.
This change simplifies Helm chart templates by replacing complex lookup functions with direct value references, improves configuration consistency, and reduces the reconciliation overhead. All HelmReleases now automatically receive cluster and namespace configuration through the valuesFrom mechanism, making configuration management more transparent and maintainable.
### Auto-diskful for LINSTOR
The LINSTOR integration now includes automatic diskful functionality that converts diskless nodes to diskful when they hold DRBD resources in Primary state for an extended period (30 minutes). This feature addresses scenarios where workloads are scheduled on nodes without local storage replicas by automatically creating local disk replicas when needed, improving I/O performance for long-running workloads.
When enabled with cleanup options, the system can automatically remove disk replicas that are no longer needed, preventing storage waste from temporary replicas. This intelligent storage management reduces network traffic for frequently accessed data while maintaining efficient storage utilization.
### Automated Version Management Systems
Cozystack now includes automated version management systems for PostgreSQL, Kubernetes, MariaDB, and Redis applications. These systems automatically track upstream versions and provide mechanisms for automated version updates, ensuring that platform users always have access to the latest stable versions while maintaining compatibility with existing deployments.
The version management systems integrate with the Cozystack API and dashboard, providing administrators with visibility into available versions and update paths. This infrastructure sets the foundation for future automated upgrade workflows and version compatibility management.
---
## Major Features and Improvements
### Storage
* **[linstor] Add linstor-scheduler package**: Added LINSTOR scheduler extender for optimal pod placement on nodes with LINSTOR storage. Includes admission webhook that automatically routes pods using LINSTOR CSI volumes to the custom scheduler, ensuring pods are placed on nodes with local replicas to minimize network traffic and improve I/O performance ([**@kvaps**](https://github.com/kvaps) in #1824).
* **[linstor] Enable auto-diskful for diskless nodes**: Enabled DRBD auto-diskful functionality to automatically convert diskless nodes to diskful when they hold volumes in Primary state for more than 30 minutes. Improves I/O performance for long-running workloads by creating local replicas and includes automatic cleanup options to prevent storage waste ([**@kvaps**](https://github.com/kvaps) in #1826).
* **[linstor] Build linstor-server with custom patches**: Added custom patches to linstor-server build process, enabling platform-specific optimizations and fixes ([**@kvaps**](https://github.com/kvaps) in #1726).
* **[seaweedfs] Traffic locality**: Upgraded SeaweedFS to v4.05 with traffic locality capabilities, new admin component with web-based UI, worker component for distributed operations, and enhanced S3 monitoring with Grafana dashboards. Improves S3 service performance by routing requests to nearest available volume servers ([**@nbykov0**](https://github.com/nbykov0) in #1748).
* **[linstor] fix: prevent DRBD device race condition in updateDiscGran**: Fixed race condition in DRBD device management during granularity updates, preventing potential data corruption or device conflicts ([**@kvaps**](https://github.com/kvaps) in #1829).
* **fix(linstor): prevent orphaned DRBD devices during toggle-disk retry**: Fixed issue where retry logic during disk toggle operations could leave orphaned DRBD devices, now properly cleans up devices during retry attempts ([**@kvaps**](https://github.com/kvaps) in #1823).
### Platform Architecture
* **[platform] Replace Helm lookup with valuesFrom mechanism**: Replaced Helm lookup functions with FluxCD valuesFrom mechanism for configuration propagation. Configuration from ConfigMaps and namespace references is now managed through `cozystack-values` Secret, simplifying templates and eliminating force reconcile controllers ([**@kvaps**](https://github.com/kvaps) in #1787).
* **[platform] refactor: split cozystack-resource-definitions into separate packages**: Refactored cozystack-resource-definitions into separate packages for better organization and maintainability, improving code structure and reducing coupling between components ([**@kvaps**](https://github.com/kvaps) in #1778).
* **[platform] Separate assets server into dedicated deployment**: Separated assets server from main platform deployment, improving scalability and allowing independent scaling of asset delivery infrastructure ([**@kvaps**](https://github.com/kvaps) in #1705).
* **[core] Extract Talos package from installer**: Extracted Talos package configuration from installer into a separate package, improving modularity and enabling independent updates ([**@kvaps**](https://github.com/kvaps) in #1724).
* **[registry] Add application labels and update filtering mechanism**: Added application labels to registry resources and improved filtering mechanism for better resource discovery and organization ([**@kvaps**](https://github.com/kvaps) in #1707).
* **fix(registry): implement field selector filtering for label-based resources**: Implemented field selector filtering for label-based resources in the registry, improving query performance and resource lookup efficiency ([**@kvaps**](https://github.com/kvaps) in #1845).
* **[platform] Add alphabetical sorting to registry resource lists**: Added alphabetical sorting to registry resource lists in the API and dashboard, improving user experience when browsing available applications ([**@lexfrei**](https://github.com/lexfrei) in #1764).
### Version Management
* **[postgres] Add version management system with automated version updates**: Introduced version management system for PostgreSQL with automated version tracking and update mechanisms ([**@kvaps**](https://github.com/kvaps) in #1671).
* **[kubernetes] Add version management system with automated version updates**: Added version management system for Kubernetes tenant clusters with automated version tracking and update capabilities ([**@kvaps**](https://github.com/kvaps) in #1672).
* **[mariadb] Add version management system with automated version updates**: Implemented version management system for MariaDB with automated version tracking and update mechanisms ([**@kvaps**](https://github.com/kvaps) in #1680).
* **[redis] Add version management system with automated version updates**: Added version management system for Redis with automated version tracking and update capabilities ([**@kvaps**](https://github.com/kvaps) in #1681).
### Networking
* **[kube-ovn] Update to v1.14.25**: Updated Kube-OVN to version 1.14.25 with improved stability and new features ([**@kvaps**](https://github.com/kvaps) in #1819).
* **[kubeovn] Package from external repo**: Extracted Kube-OVN packaging from main repository to external repository, improving modularity ([**@lllamnyp**](https://github.com/lllamnyp) in #1535).
* **[cilium] Update Cilium to v1.18.5**: Updated Cilium to version 1.18.5 with latest features and bug fixes ([**@lexfrei**](https://github.com/lexfrei) in #1769).
* **[system/cilium] Enable topology-aware routing for services**: Enabled topology-aware routing for Cilium services, improving traffic distribution and reducing latency by routing traffic to endpoints in the same zone when possible ([**@nbykov0**](https://github.com/nbykov0) in #1734).
* **[cilium] Enable automatic pod rollout on configmap updates**: Cilium and Cilium operator pods now automatically restart when the cilium-config ConfigMap is updated, ensuring configuration changes are applied immediately ([**@kvaps**](https://github.com/kvaps) in #1728).
* **[kubernetes] Fix endpoints for cilium-gateway**: Fixed endpoint configuration for cilium-gateway, ensuring proper service discovery and connectivity ([**@kvaps**](https://github.com/kvaps) in #1729).
* **[multus] Increase memory limit**: Increased memory limits for Multus components to handle larger network configurations and reduce out-of-memory issues ([**@nbykov0**](https://github.com/nbykov0) in #1773).
* **[main][paas-full] Add multus dependencies similar to other CNIs**: Added Multus as a dependency in the paas-full package, consistent with how other CNIs are included ([**@nbykov0**](https://github.com/nbykov0) in #1842).
### Virtual Machines
* **[vm] Always expose VMs with a service**: Virtual machines are now always exposed with at least a ClusterIP service, ensuring they have in-cluster DNS names and can be accessed from other pods even without public IP addresses ([**@lllamnyp**](https://github.com/lllamnyp) in #1738).
* **[virtual-machine] Improve check for resizing job**: Improved storage resize logic to only expand persistent volume claims when storage is being increased, preventing unintended storage reduction operations ([**@kvaps**](https://github.com/kvaps) in #1688).
* **[virtual-machine,vm-instance] Add nodeAffinity for Windows VMs based on scheduling config**: Added nodeAffinity configuration to virtual-machine and vm-instance charts to support dedicated nodes for Windows VMs ([**@kvaps**](https://github.com/kvaps) in #1693).
### Monitoring
* **[monitoring] Add SLACK_SEVERITY_FILTER field and VMAgent for tenant monitoring**: Introduced SLACK_SEVERITY_FILTER environment variable in Alerta deployment to enable filtering of alert severities for Slack notifications. Added VMAgent resource template for scraping metrics within tenant namespaces, improving monitoring granularity ([**@IvanHunters**](https://github.com/IvanHunters) in #1712).
* **[monitoring] Improve tenant metrics collection**: Improved tenant metrics collection mechanisms for better observability and monitoring coverage ([**@IvanHunters**](https://github.com/IvanHunters) in #1684).
### System Configuration
* **[api, lineage] Tolerate all taints**: Updated API and lineage webhook to tolerate all taints, ensuring controllers can run on any node regardless of taint configuration ([**@nbykov0**](https://github.com/nbykov0) in #1781).
* **[system] Add resource requests and limits to etcd-defrag**: Added resource requests and limits to etcd-defrag job to ensure proper resource allocation and prevent resource contention ([**@matthieu-robin**](https://github.com/matthieu-robin) in #1785).
* **[system:coredns] update coredns app labels to match Talos coredns labels**: Updated coredns app labels to match Talos coredns labels, ensuring consistency across the platform ([**@nbykov0**](https://github.com/nbykov0) in #1675).
* **[system:monitoring-agents] rename coredns metrics service**: Renamed coredns metrics service to avoid interference with coredns service used for name resolution in tenant k8s clusters ([**@nbykov0**](https://github.com/nbykov0) in #1676).
### Tenants and Namespaces
* **[tenant] Allow egress to parent ingress pods**: Updated tenant network policies to allow egress traffic to parent cluster ingress pods, enabling proper communication patterns ([**@lexfrei**](https://github.com/lexfrei) in #1765).
* **[tenant] Run cleanup job from system namespace**: Moved tenant cleanup job to run from system namespace, improving security and resource isolation ([**@lllamnyp**](https://github.com/lllamnyp) in #1774).
### FluxCD
* **[fluxcd] Add flux-aio module and migration**: Added FluxCD all-in-one module with migration support, simplifying FluxCD installation and management ([**@kvaps**](https://github.com/kvaps) in #1698).
* **[fluxcd] Enable source-watcher**: Enabled source-watcher in FluxCD configuration for improved GitOps synchronization and faster update detection ([**@kvaps**](https://github.com/kvaps) in #1706).
### Applications
* **[dashboard] Fix CustomFormsOverride schema to nest properties under spec.properties**: Fixed CustomFormsOverride schema generation to properly nest properties under `spec.properties` instead of directly under `properties`, ensuring correct form schema generation ([**@kvaps**](https://github.com/kvaps) in #1692).
* **[apps] Refactor apiserver to use typed objects and fix UnstructuredList GVK**: Refactored apiserver REST handlers to use typed objects instead of unstructured.Unstructured, eliminating runtime conversions. Fixed UnstructuredList GVK issue where objects were using the first registered kind instead of the correct kind ([**@kvaps**](https://github.com/kvaps) in #1679).
* **[keycloak] Make kubernetes client public**: Made Kubernetes client public in Keycloak configuration, enabling broader access patterns for Kubernetes integrations ([**@lllamnyp**](https://github.com/lllamnyp) in #1802).
## Improvements
* **[granular kubernetes application extensions dependencies]**: Improved dependency management for Kubernetes application extensions with more granular control over dependencies ([**@nbykov0**](https://github.com/nbykov0) in #1683).
* **[core:installer] Address buildx warnings**: Fixed Dockerfile syntax warnings from buildx, ensuring clean builds without warnings ([**@nbykov0**](https://github.com/nbykov0) in #1682).
* **[linstor] Update piraeus-operator v2.10.2**: Updated LINSTOR CSI to version 2.10.2 with improved stability and bug fixes ([**@kvaps**](https://github.com/kvaps) in #1689).
* **Update SeaweedFS v4.02**: Updated SeaweedFS to version 4.02 with improved S3 daemon performance and fixes ([**@kvaps**](https://github.com/kvaps) in #1725).
* **[installer,dx] Rename cozypkg to cozyhr**: Renamed cozypkg tool to cozyhr for better branding and consistency ([**@kvaps**](https://github.com/kvaps) in #1763).
## Fixes
* **fix(platform): fix migrations for v0.40 release**: Fixed platform migrations for v0.40 release, ensuring smooth upgrades from previous versions ([**@kvaps**](https://github.com/kvaps) in #1846).
* **[platform] fix migration for removing fluxcd-operator**: Fixed migration logic for removing fluxcd-operator, ensuring clean removal without leaving orphaned resources ([**@kvaps**](https://github.com/kvaps) in commit 4a83d2c7).
* **[kubevirt-operator] Fix typo in VMNotRunningFor10Minutes alert**: Fixed typo in VM alert name, ensuring proper alert triggering and monitoring ([**@kvaps**](https://github.com/kvaps) in #1770).
* **[kubevirt-operator] Revert incorrect case change in VM alerts**: Reverted incorrect case change in VM alert names to maintain consistency ([**@lexfrei**](https://github.com/lexfrei) in #1804).
* **[cozystack-controller] Fix: move crds to definitions**: Fixed CRD placement by moving them to definitions directory, ensuring proper resource organization ([**@kvaps**](https://github.com/kvaps) in #1759).
## Dependencies
* **Update SeaweedFS v4.02**: Updated SeaweedFS to version 4.02 ([**@kvaps**](https://github.com/kvaps) in #1725).
* **[seaweedfs] Traffic locality**: Upgraded SeaweedFS to v4.05 with traffic locality capabilities ([**@nbykov0**](https://github.com/nbykov0) in #1748).
* **[linstor] Update piraeus-operator v2.10.2**: Updated piraeus-operator to version 2.10.2 ([**@kvaps**](https://github.com/kvaps) in #1689).
* **[kube-ovn] Update to v1.14.25**: Updated Kube-OVN to version 1.14.25 ([**@kvaps**](https://github.com/kvaps) in #1819).
* **[cilium] Update Cilium to v1.18.5**: Updated Cilium to version 1.18.5 ([**@lexfrei**](https://github.com/lexfrei) in #1769).
* **Update go modules**: Updated Go modules to latest versions ([**@kvaps**](https://github.com/kvaps) in #1736).
## Development, Testing, and CI/CD
* **[ci] Fix auto-release workflow**: Fixed auto-release workflow to ensure correct release publishing and tagging ([**@kvaps**](https://github.com/kvaps) in commit 526af294).
* **fix(ci): ensure correct latest release after backport publishing**: Fixed CI workflow to correctly identify and tag the latest release after backport publishing ([**@kvaps**](https://github.com/kvaps) in #1800).
* **[workflows] Add auto patch release workflow**: Added automated patch release workflow for streamlined release management ([**@kvaps**](https://github.com/kvaps) in #1754).
* **[workflow] Add GitHub Action to update release notes from changelogs**: Added GitHub Action to automatically update release notes from changelog files ([**@kvaps**](https://github.com/kvaps) in #1752).
* **[ci] Improve backport workflow with merge_commits skip and conflict resolution**: Improved backport workflow with better merge commit handling and conflict resolution ([**@kvaps**](https://github.com/kvaps) in #1694).
* **[testing] Add aliases and autocomplete**: Added shell aliases and autocomplete support for testing commands, improving developer experience ([**@lllamnyp**](https://github.com/lllamnyp) in #1803).
* **[kubernetes] Add lb tests for tenant k8s**: Added load balancer tests for tenant Kubernetes clusters, improving test coverage ([**@IvanHunters**](https://github.com/IvanHunters) in #1783).
* **[agents] Add instructions for working with unresolved code review comments**: Added documentation and instructions for working with unresolved code review comments in agent workflows ([**@kvaps**](https://github.com/kvaps) in #1710).
* **feat(ci): add /retest command to rerun tests from Prepare environment**: Added `/retest` command to rerun tests from Prepare environment workflow ([**@kvaps**](https://github.com/kvaps) in commit 30c1041e).
* **fix(ci): remove GITHUB_TOKEN extraheader to trigger workflows**: Removed GITHUB_TOKEN extraheader to properly trigger workflows ([**@kvaps**](https://github.com/kvaps) in commit 68a639b3).
* **Fix: Add missing components to `distro-full` bundle**: Fixed missing components in distro-full bundle, ensuring all required components are included ([**@LoneExile**](https://github.com/LoneExile) in #1620).
* **Update Flux Operator (v0.33.0)**: Updated Flux Operator to version 0.33.0 ([**@kingdonb**](https://github.com/kingdonb) in #1649).
* **Add changelogs for v0.38.3 and v.0.38.4**: Added missing changelogs for v0.38.3 and v0.38.4 releases ([**@androndo**](https://github.com/androndo) in #1743).
* **Add changelogs to v.0.39.1**: Added changelog for v0.39.1 release ([**@androndo**](https://github.com/androndo) in #1750).
* **Add Cloupard to ADOPTERS.md**: Added Cloupard to the adopters list ([**@SerjioTT**](https://github.com/SerjioTT) in #1733).
## Documentation
* **[website] docs: expand monitoring and alerting documentation**: Expanded monitoring and alerting documentation with comprehensive guides, examples, and troubleshooting information ([**@IvanHunters**](https://github.com/IvanHunters) in [cozystack/website#388](https://github.com/cozystack/website/pull/388)).
* **[website] fix auto-generation of documentation**: Fixed automatic documentation generation process, ensuring all documentation is properly generated and formatted ([**@IvanHunters**](https://github.com/IvanHunters) in [cozystack/website#391](https://github.com/cozystack/website/pull/391)).
* **[website] secure boot**: Added documentation for Secure Boot support in Talos Linux ([**@kvaps**](https://github.com/kvaps) in [cozystack/website#387](https://github.com/cozystack/website/pull/387)).
## Tools
* **[talm] feat(helpers): add bond interface discovery helpers**: Added bond interface discovery helpers to talm for easier network configuration ([**@kvaps**](https://github.com/kvaps) in [cozystack/talm#94](https://github.com/cozystack/talm/pull/94)).
* **[talm] feat(talosconfig): add certificate regeneration from secrets.yaml**: Added certificate regeneration functionality to talm talosconfig command, allowing certificates to be regenerated from secrets.yaml ([**@kvaps**](https://github.com/kvaps) in cozystack/talm@1319dde).
* **[talm] fix(init): make name optional for -u flag**: Made name parameter optional for init command with -u flag, improving flexibility ([**@kvaps**](https://github.com/kvaps) in cozystack/talm@da29320).
* **[talm] fix(wrapper): copy NoOptDefVal when remapping -f to -F flag**: Fixed wrapper to properly copy NoOptDefVal when remapping flags, ensuring correct default value handling ([**@kvaps**](https://github.com/kvaps) in cozystack/talm@f6a6f1d).
* **[talm] fix(root): detect project root with secrets.encrypted.yaml**: Fixed root detection to properly identify project root when secrets.encrypted.yaml is present ([**@kvaps**](https://github.com/kvaps) in cozystack/talm@cf56780).
* **[talm] Fix interfaces helper for Talos v1.12**: Fixed interfaces helper to work correctly with Talos v1.12 ([**@kvaps**](https://github.com/kvaps) in cozystack/talm@34984ae).
* **[talm] Fix typo on README.md**: Fixed typo in README documentation ([**@diegolakatos**](https://github.com/diegolakatos) in [cozystack/talm#92](https://github.com/cozystack/talm/pull/92)).
* **[talm] fix(template): return error for invalid YAML in template output**: Fixed template command to return proper error for invalid YAML output ([**@kvaps**](https://github.com/kvaps) in [cozystack/talm#93](https://github.com/cozystack/talm/pull/93)).
* **[talm] feat(cozystack): enable allocateNodeCIDRs by default**: Enabled allocateNodeCIDRs by default in talm cozystack preset ([**@lexfrei**](https://github.com/lexfrei) in [cozystack/talm#91](https://github.com/cozystack/talm/pull/91)).
* **[boot-to-talos] feat(network): add VLAN interface support via netlink**: Added VLAN interface support via netlink in boot-to-talos for advanced network configuration ([**@kvaps**](https://github.com/kvaps) in cozystack/boot-to-talos@02874d7).
* **[boot-to-talos] feat(network): add bond interface support via netlink**: Added bond interface support via netlink in boot-to-talos for network bonding configurations ([**@kvaps**](https://github.com/kvaps) in cozystack/boot-to-talos@067822d).
* **[boot-to-talos] Draft EFI Support**: Added draft EFI support in boot-to-talos for UEFI boot scenarios ([**@kvaps**](https://github.com/kvaps) in cozystack/boot-to-talos@e194bc8).
* **[boot-to-talos] Change default install image size from 2GB to 3GB**: Changed default install image size from 2GB to 3GB to accommodate larger installations ([**@kvaps**](https://github.com/kvaps) in cozystack/boot-to-talos@3bfb035).
* **[cozyhr] feat(values): add valuesFrom support for HelmRelease**: Added valuesFrom support for HelmRelease in cozyhr tool, enabling better configuration management ([**@kvaps**](https://github.com/kvaps) in cozystack/cozyhr@7dff0c8).
* **[cozyhr] Rename cozypkg to cozyhr**: Renamed cozypkg tool to cozyhr for better branding ([**@kvaps**](https://github.com/kvaps) in cozystack/cozyhr@1029461).
---
## Contributors
We'd like to thank all contributors who made this release possible:
* [**@IvanHunters**](https://github.com/IvanHunters)
* [**@kvaps**](https://github.com/kvaps)
* [**@lexfrei**](https://github.com/lexfrei)
* [**@lllamnyp**](https://github.com/lllamnyp)
* [**@nbykov0**](https://github.com/nbykov0)
* [**@LoneExile**](https://github.com/LoneExile)
* [**@kingdonb**](https://github.com/kingdonb)
* [**@androndo**](https://github.com/androndo)
* [**@SerjioTT**](https://github.com/SerjioTT)
* [**@matthieu-robin**](https://github.com/matthieu-robin)
* [**@diegolakatos**](https://github.com/diegolakatos)
---
**Full Changelog**: [v0.39.0...v0.40.0](https://github.com/cozystack/cozystack/compare/v0.39.0...v0.40.0)
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.40.0
-->

View File

@@ -1,11 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.40.1
-->
## Fixes
* **[linstor] Update piraeus-server patches with critical fixes**: Backported critical patches to piraeus-server that address storage stability issues and improve DRBD resource handling. These patches fix edge cases in device management and ensure more reliable storage operations ([**@kvaps**](https://github.com/kvaps) in #1850, #1852).
---
**Full Changelog**: [v0.40.0...v0.40.1](https://github.com/cozystack/cozystack/compare/v0.40.0...v0.40.1)

View File

@@ -1,15 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.40.2
-->
## Improvements
* **[linstor] Refactor node-level RWX validation**: Refactored the node-level ReadWriteMany (RWX) validation logic in LINSTOR CSI. The validation has been moved to the CSI driver level with a custom linstor-csi image build, providing more reliable RWX volume handling and clearer error messages when RWX requirements cannot be satisfied ([**@kvaps**](https://github.com/kvaps) in #1856, #1857).
## Fixes
* **[linstor] Remove node-level RWX validation**: Removed the problematic node-level RWX validation that was causing issues with volume provisioning. The validation logic has been refactored and moved to a more appropriate location in the LINSTOR CSI driver ([**@kvaps**](https://github.com/kvaps) in #1851).
---
**Full Changelog**: [v0.40.1...v0.40.2](https://github.com/cozystack/cozystack/compare/v0.40.1...v0.40.2)

View File

@@ -1,15 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.40.3
-->
## Fixes
* **[apiserver] Fix Watch resourceVersion and bookmark handling**: Fixed issues with Watch API handling of resourceVersion and bookmarks, ensuring proper event streaming and state synchronization for API clients ([**@kvaps**](https://github.com/kvaps) in #1860).
## Dependencies
* **[cilium] Update Cilium to v1.18.6**: Updated Cilium CNI to v1.18.6 with security fixes and performance improvements ([**@sircthulhu**](https://github.com/sircthulhu) in #1868, #1870).
---
**Full Changelog**: [v0.40.2...v0.40.3](https://github.com/cozystack/cozystack/compare/v0.40.2...v0.40.3)

View File

@@ -1,23 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.40.4
-->
## Improvements
* **[kubernetes] Increase default apiServer resourcesPreset to large**: Increased the default resource preset for kube-apiserver to `large` to ensure more reliable operation under higher workloads and prevent resource constraints ([**@kvaps**](https://github.com/kvaps) in #1875, #1882).
* **[kubernetes] Increase kube-apiserver startup probe threshold**: Increased the startup probe threshold for kube-apiserver to allow more time for the API server to become ready, especially in scenarios with slow storage or high load ([**@kvaps**](https://github.com/kvaps) in #1876, #1883).
* **[etcd] Increase probe thresholds for better recovery**: Increased etcd probe thresholds to provide more time for recovery operations, improving cluster resilience during network issues or temporary slowdowns ([**@kvaps**](https://github.com/kvaps) in #1874, #1878).
## Fixes
* **[dashboard] Fix view of loadbalancer IP in services window**: Fixed an issue where load balancer IP addresses were not displayed correctly in the services window of the dashboard ([**@IvanHunters**](https://github.com/IvanHunters) in #1884, #1887).
## Dependencies
* **Update Talos Linux v1.11.6**: Updated Talos Linux to v1.11.6 with latest security patches and improvements ([**@kvaps**](https://github.com/kvaps) in #1879).
---
**Full Changelog**: [v0.40.3...v0.40.4](https://github.com/cozystack/cozystack/compare/v0.40.3...v0.40.4)

View File

@@ -1,63 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.41.0
-->
# Cozystack v0.41.0 — "MongoDB"
This release introduces MongoDB as a new managed application, expanding Cozystack's database offerings alongside existing PostgreSQL, MySQL, and Redis services. The release also includes storage improvements, Kubernetes stability enhancements, and updated documentation.
## Feature Highlights
### MongoDB Managed Application
Cozystack now includes MongoDB as a fully managed database service. Users can deploy production-ready MongoDB instances directly from the application catalog with minimal configuration.
Key capabilities:
- **Replica Set deployment**: Automatic configuration of MongoDB replica sets for high availability
- **Persistent storage**: Integration with Cozystack storage backends for reliable data persistence
- **Resource management**: Configurable CPU, memory, and storage resources
- **Monitoring integration**: Built-in metrics export for platform monitoring
Deploy MongoDB through the Cozystack dashboard or using the standard application deployment workflow ([**@lexfrei**](https://github.com/lexfrei) in #1822, #1881).
## Improvements
* **[linstor] Update piraeus-server patches with critical fixes**: Backported critical patches to piraeus-server that address storage stability issues and improve DRBD resource handling. These patches fix edge cases in device management and ensure more reliable storage operations ([**@kvaps**](https://github.com/kvaps) in #1850, #1852).
* **[linstor] Refactor node-level RWX validation**: Refactored the node-level ReadWriteMany (RWX) validation logic in LINSTOR CSI. The validation has been moved to the CSI driver level with a custom linstor-csi image build, providing more reliable RWX volume handling and clearer error messages when RWX requirements cannot be satisfied ([**@kvaps**](https://github.com/kvaps) in #1856, #1857).
* **[kubernetes] Increase default apiServer resourcesPreset to large**: Increased the default resource preset for kube-apiserver to `large` to ensure more reliable operation under higher workloads and prevent resource constraints ([**@kvaps**](https://github.com/kvaps) in #1875, #1882).
* **[kubernetes] Increase kube-apiserver startup probe threshold**: Increased the startup probe threshold for kube-apiserver to allow more time for the API server to become ready, especially in scenarios with slow storage or high load ([**@kvaps**](https://github.com/kvaps) in #1876, #1883).
* **[etcd] Increase probe thresholds for better recovery**: Increased etcd probe thresholds to provide more time for recovery operations, improving cluster resilience during network issues or temporary slowdowns ([**@kvaps**](https://github.com/kvaps) in #1874, #1878).
## Fixes
* **[linstor] Remove node-level RWX validation**: Removed the problematic node-level RWX validation that was causing issues with volume provisioning. The validation logic has been refactored and moved to a more appropriate location in the LINSTOR CSI driver ([**@kvaps**](https://github.com/kvaps) in #1851).
* **[apiserver] Fix Watch resourceVersion and bookmark handling**: Fixed issues with Watch API handling of resourceVersion and bookmarks, ensuring proper event streaming and state synchronization for API clients ([**@kvaps**](https://github.com/kvaps) in #1860).
* **[dashboard] Fix view of loadbalancer IP in services window**: Fixed an issue where load balancer IP addresses were not displayed correctly in the services window of the dashboard ([**@IvanHunters**](https://github.com/IvanHunters) in #1884, #1887).
## Dependencies
* **[cilium] Update cilium to v1.18.6**: Updated Cilium CNI to v1.18.6 with security fixes and performance improvements ([**@sircthulhu**](https://github.com/sircthulhu) in #1868, #1870).
* **Update Talos Linux v1.11.6**: Updated Talos Linux to v1.11.6 with latest security patches and improvements ([**@kvaps**](https://github.com/kvaps) in #1879).
## Documentation
* **[website] Add documentation for creating and managing cloned virtual machines**: Added comprehensive guide for VM cloning operations ([**@sircthulhu**](https://github.com/sircthulhu) in [cozystack/website#401](https://github.com/cozystack/website/pull/401)).
* **[website] Simplify NFS driver setup instructions**: Improved NFS driver setup documentation with clearer instructions ([**@kvaps**](https://github.com/kvaps) in [cozystack/website#399](https://github.com/cozystack/website/pull/399)).
* **[website] Update Talos installation docs for Hetzner and Servers.com**: Updated installation documentation with improved instructions for Hetzner and Servers.com environments ([**@kvaps**](https://github.com/kvaps) in [cozystack/website#395](https://github.com/cozystack/website/pull/395)).
* **[website] Add Hetzner RobotLB documentation**: Added documentation for configuring public IP with Hetzner RobotLB ([**@kvaps**](https://github.com/kvaps) in [cozystack/website#394](https://github.com/cozystack/website/pull/394)).
* **[website] Add Hidora organization support details**: Added Hidora to the support page with organization details ([**@matthieu-robin**](https://github.com/matthieu-robin) in [cozystack/website#397](https://github.com/cozystack/website/pull/397), [cozystack/website#398](https://github.com/cozystack/website/pull/398)).
---
**Full Changelog**: [v0.40.0...v0.41.0](https://github.com/cozystack/cozystack/compare/v0.40.0...v0.41.0)

View File

@@ -1,11 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.41.1
-->
## Improvements
* **[kubernetes] Add enum validation for IngressNginx exposeMethod**: Added enum validation for the `exposeMethod` field in IngressNginx configuration, preventing invalid values and improving user experience with clear valid options ([**@sircthulhu**](https://github.com/sircthulhu) in #1895, #1897).
---
**Full Changelog**: [v0.41.0...v0.41.1](https://github.com/cozystack/cozystack/compare/v0.41.0...v0.41.1)

View File

@@ -1,13 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.41.2
-->
## Improvements
* **[monitoring-agents] Set minReplicas to 1 for VPA for VMAgent**: Configured VPA (Vertical Pod Autoscaler) to maintain at least 1 replica for VMAgent, ensuring monitoring availability during scaling operations ([**@sircthulhu**](https://github.com/sircthulhu) in #1894, #1905).
* **[mongodb] Remove user-configurable images from MongoDB chart**: Removed user-configurable image options from the MongoDB chart to simplify configuration and ensure consistency with tested image versions ([**@kvaps**](https://github.com/kvaps) in #1901, #1904).
---
**Full Changelog**: [v0.41.1...v0.41.2](https://github.com/cozystack/cozystack/compare/v0.41.1...v0.41.2)

View File

@@ -1,15 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v0.41.3
-->
## Improvements
* **[kubernetes] Show Service and Ingress resources for Kubernetes app in dashboard**: Added visibility of Service and Ingress resources for Kubernetes applications in the dashboard, improving resource management and monitoring capabilities ([**@sircthulhu**](https://github.com/sircthulhu) in #1912, #1915).
## Fixes
* **[dashboard] Fix filtering on Pods tab for Service**: Fixed an issue where pod filtering was not working correctly on the Pods tab when viewing Services in the dashboard ([**@sircthulhu**](https://github.com/sircthulhu) in #1909, #1914).
---
**Full Changelog**: [v0.41.2...v0.41.3](https://github.com/cozystack/cozystack/compare/v0.41.2...v0.41.3)

View File

@@ -1,134 +0,0 @@
# Cozystack v1.0.0-alpha.1 — "Package-Based Architecture"
This alpha release introduces a fundamental architectural shift from HelmRelease bundles to Package-based deployment managed by the new cozystack-operator. It includes a comprehensive backup system with Velero integration, significant API changes that rename the core CRD, Flux sharding for improved tenant workload distribution, enhanced monitoring capabilities, and various improvements to virtual machines, tenants, and the build workflow.
> **⚠️ Alpha Release Warning**: This is a pre-release version intended for testing and early adoption. Breaking changes may occur before the stable v1.0.0 release.
## Breaking Changes
### API Rename: CozystackResourceDefinition → ApplicationDefinition
The `CozystackResourceDefinition` CRD has been renamed to `ApplicationDefinition` for better clarity and consistency. This change affects:
- All Go types and controller files
- CRD Helm chart renamed from `cozystack-resource-definition-crd` to `application-definition-crd`
- All cozyrds YAML manifests updated to use `kind: ApplicationDefinition`
A migration (v24) is included to handle the transition automatically.
### Package-Based Deployment
The platform now uses Package resources managed by cozystack-operator instead of HelmRelease bundles. Key changes:
- Restructured values.yaml with full configuration support (networking, publishing, authentication, scheduling, branding, resources)
- Added values-isp-full.yaml and values-isp-hosted.yaml for bundle variants
- Package resources replace old HelmRelease templates
- PackageSources moved from sources/ to templates/sources/
- Migration script `hack/migrate-to-version-1.0.sh` provided for converting ConfigMaps to Package resources
---
## Major Features and Improvements
### Cozystack Operator
A new operator has been introduced to manage Package and PackageSource resources, providing declarative package management for the platform:
* **[cozystack-operator] Introduce API objects: packages and packagesources**: Added new CRDs for declarative package management, defining the API for Package and PackageSource resources ([**@kvaps**](https://github.com/kvaps) in #1740).
* **[cozystack-operator] Introduce Cozystack-operator core logic**: Implemented core reconciliation logic for the operator, handling Package and PackageSource lifecycle management ([**@kvaps**](https://github.com/kvaps) in #1741).
* **[cozystack-operator] Add Package and PackageSource reconcilers**: Added controllers for Package and PackageSource resources with full reconciliation support ([**@kvaps**](https://github.com/kvaps) in #1755).
* **[cozystack-operator] Add deployment files**: Added Kubernetes deployment manifests for running cozystack-operator in the cluster ([**@kvaps**](https://github.com/kvaps) in #1761).
* **[platform] Add PackageSources for cozystack-operator**: Added PackageSource definitions for cozystack-operator integration ([**@kvaps**](https://github.com/kvaps) in #1760).
* **[cozypkg] Add tool for managing Package and PackageSources**: Added CLI tool for managing Package and PackageSource resources ([**@kvaps**](https://github.com/kvaps) in #1756).
### Backup System
Comprehensive backup functionality has been added with Velero integration for managing application backups:
* **[backups] Implement core backup Plan controller**: Core controller for managing backup schedules and plans, providing the foundation for backup orchestration ([**@lllamnyp**](https://github.com/lllamnyp) in #1640).
* **[backups] Build and deploy backup controller**: Deployment infrastructure for the backup controller, including container image builds and Kubernetes manifests ([**@lllamnyp**](https://github.com/lllamnyp) in #1685).
* **[backups] Scaffold a backup strategy API group**: Added API group for backup strategies, enabling pluggable backup implementations ([**@lllamnyp**](https://github.com/lllamnyp) in #1687).
* **[backups] Add indices to core backup resources**: Added indices to backup resources for improved query performance ([**@lllamnyp**](https://github.com/lllamnyp) in #1719).
* **[backups] Stub the Job backup strategy controller**: Added stub implementation for Job-based backup strategy ([**@lllamnyp**](https://github.com/lllamnyp) in #1720).
* **[backups] Implement Velero strategy controller**: Integration with Velero for backup operations, enabling enterprise-grade backup capabilities ([**@androndo**](https://github.com/androndo) in #1762).
* **[backups,dashboard] User-facing UI**: Dashboard interface for managing backups and backup jobs, providing visibility into backup status and history ([**@lllamnyp**](https://github.com/lllamnyp) in #1737).
### Platform Architecture
* **[platform] Migrate from HelmRelease bundles to Package-based deployment**: Replaced HelmRelease bundle system with Package resources managed by cozystack-operator. Includes restructured values.yaml with full configuration support and migration tooling ([**@kvaps**](https://github.com/kvaps) in #1816).
* **refactor(api): rename CozystackResourceDefinition to ApplicationDefinition**: Renamed CRD and all related types for better clarity and consistency. Updated all Go types, controllers, and 25+ YAML manifests ([**@kvaps**](https://github.com/kvaps) in #1864).
* **feat(flux): implement flux sharding for tenant HelmReleases**: Added Flux sharding support to distribute tenant HelmRelease reconciliation across multiple controllers, improving scalability in multi-tenant environments ([**@kvaps**](https://github.com/kvaps) in #1816).
* **refactor(installer): migrate installer to cozystack-operator**: Moved installer functionality to cozystack-operator for unified management ([**@kvaps**](https://github.com/kvaps) in #1816).
* **feat(api): add chartRef to ApplicationDefinition**: Added chartRef field to support ExternalArtifact references for flexible chart sourcing ([**@kvaps**](https://github.com/kvaps) in #1816).
* **feat(api): show only hash in version column for applications and modules**: Simplified version display in API responses for cleaner output ([**@kvaps**](https://github.com/kvaps) in #1816).
### Virtual Machines
* **[vm] Always expose VMs with a service**: Virtual machines are now always exposed with at least a ClusterIP service, ensuring they have in-cluster DNS names and can be accessed from other pods even without public IP addresses ([**@lllamnyp**](https://github.com/lllamnyp) in #1738, #1751).
### Monitoring
* **[monitoring] Add SLACK_SEVERITY_FILTER field and VMAgent for tenant monitoring**: Introduced the SLACK_SEVERITY_FILTER environment variable in the Alerta deployment to enable filtering of alert severities for Slack notifications based on the disabledSeverity configuration. Additionally, added a VMAgent resource template for scraping metrics within tenant namespaces, improving monitoring granularity and control ([**@IvanHunters**](https://github.com/IvanHunters) in #1712).
### Tenants
* **[tenant] Allow egress to parent ingress pods**: Updated tenant network policies to allow egress traffic to parent cluster ingress pods, enabling proper communication patterns between tenant namespaces and parent cluster ingress controllers ([**@lexfrei**](https://github.com/lexfrei) in #1765, #1776).
* **[tenant] Run cleanup job from system namespace**: Moved tenant cleanup job to run from system namespace, improving security and resource isolation for tenant cleanup operations ([**@lllamnyp**](https://github.com/lllamnyp) in #1774, #1777).
### System
* **[system] Add resource requests and limits to etcd-defrag**: Added resource requests and limits to etcd-defrag job to ensure proper resource allocation and prevent resource contention during etcd maintenance operations ([**@matthieu-robin**](https://github.com/matthieu-robin) in #1785, #1786).
### Development and Build
* **feat(cozypkg): add cross-platform build targets with version injection**: Added cross-platform build targets (linux/amd64, linux/arm64, darwin/amd64, darwin/arm64) for cozypkg/cozyhr tool with automatic version injection from git tags ([**@kvaps**](https://github.com/kvaps) in #1862).
* **refactor: move scripts to hack directory**: Reorganized scripts to standard hack/ location following Kubernetes project conventions ([**@kvaps**](https://github.com/kvaps) in #1863).
## Fixes
* **fix(talos): skip rebuilding assets if files already exist**: Improved Talos package build process to avoid redundant asset rebuilds when files are already present, reducing build time ([**@kvaps**](https://github.com/kvaps)).
* **[kubevirt-operator] Fix typo in VMNotRunningFor10Minutes alert**: Fixed typo in VM alert name, ensuring proper alert triggering and monitoring for virtual machines that are not running for extended periods ([**@lexfrei**](https://github.com/lexfrei) in #1770, #1775).
* **[backups] Fix malformed glob and split in template**: Fixed malformed glob pattern and split operation in backup template processing ([**@lllamnyp**](https://github.com/lllamnyp) in #1708).
## Documentation
* **[website] docs(storage): simplify NFS driver setup instructions**: Simplified NFS driver setup documentation with clearer instructions ([**@kvaps**](https://github.com/kvaps) in [cozystack/website#399](https://github.com/cozystack/website/pull/399)).
* **[website] Add Hidora organization support details**: Added Hidora to the support page with organization details ([**@matthieu-robin**](https://github.com/matthieu-robin) in [cozystack/website#397](https://github.com/cozystack/website/pull/397)).
* **[website] Update LinkedIn link for Hidora organization**: Updated LinkedIn link for Hidora organization on the support page ([**@matthieu-robin**](https://github.com/matthieu-robin) in [cozystack/website#398](https://github.com/cozystack/website/pull/398)).
---
## Migration Guide
### From v0.38.x / v0.39.x to v1.0.0-alpha.1
1. **Backup your cluster** before upgrading
2. Run the migration script: `hack/migrate-to-version-1.0.sh`
3. The migration will:
- Convert ConfigMaps to Package resources
- Rename CozystackResourceDefinition to ApplicationDefinition
- Update HelmRelease references to use Package-based deployment
### Known Issues
- This is an alpha release; some features may be incomplete or change before stable release
- Migration script should be tested in a non-production environment first
---
## Contributors
We'd like to thank all contributors who made this release possible:
* [**@androndo**](https://github.com/androndo)
* [**@IvanHunters**](https://github.com/IvanHunters)
* [**@kvaps**](https://github.com/kvaps)
* [**@lexfrei**](https://github.com/lexfrei)
* [**@lllamnyp**](https://github.com/lllamnyp)
* [**@matthieu-robin**](https://github.com/matthieu-robin)
---
**Full Changelog**: [v0.38.0...v1.0.0-alpha.1](https://github.com/cozystack/cozystack/compare/v0.38.0...v1.0.0-alpha.1)
<!--
https://github.com/cozystack/cozystack/releases/tag/v1.0.0-alpha.1
-->

View File

@@ -1,71 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v1.0.0-alpha.2
-->
> **⚠️ Alpha Release Warning**: This is a pre-release version intended for testing and early adoption. Breaking changes may occur before the stable v1.0.0 release.
## Major Features and Improvements
### New Applications
* **[apps] Add MongoDB managed application**: Added MongoDB as a new managed application, providing a fully managed MongoDB database with automatic scaling, backups, and high availability support ([**@lexfrei**](https://github.com/lexfrei) in #1822).
### Networking
* **[kilo] Introduce kilo**: Added Kilo WireGuard mesh networking support. Kilo provides secure WireGuard-based VPN mesh for connecting Kubernetes nodes across different networks and regions ([**@kvaps**](https://github.com/kvaps) in #1691).
* **[local-ccm] Add local-ccm package**: Added local cloud controller manager package for managing load balancer services in local/bare-metal environments without a cloud provider ([**@kvaps**](https://github.com/kvaps) in #1831).
### Platform
* **[platform] Add flux-plunger controller**: Added flux-plunger controller to automatically fix stuck HelmRelease errors by cleaning up failed resources and retrying reconciliation ([**@kvaps**](https://github.com/kvaps) in #1843).
* **[platform] Split telemetry between operator and controller**: Separated telemetry collection between cozystack-operator and cozystack-controller for better metrics isolation and monitoring capabilities ([**@kvaps**](https://github.com/kvaps) in #1869).
* **[platform] Remove cozystack.io/ui label**: Cleaned up deprecated `cozystack.io/ui` labels from platform components ([**@kvaps**](https://github.com/kvaps) in #1872).
## Improvements
* **[kubernetes] Increase default apiServer resourcesPreset to large**: Increased the default resource preset for kube-apiserver to `large` to ensure more reliable operation under higher workloads ([**@kvaps**](https://github.com/kvaps) in #1875).
* **[kubernetes] Increase kube-apiserver startup probe threshold**: Increased the startup probe threshold for kube-apiserver to allow more time for the API server to become ready ([**@kvaps**](https://github.com/kvaps) in #1876).
* **[etcd] Increase probe thresholds for better recovery**: Increased etcd probe thresholds to provide more time for recovery operations, improving cluster resilience ([**@kvaps**](https://github.com/kvaps) in #1874).
## Fixes
* **[apiserver] Fix Watch resourceVersion and bookmark handling**: Fixed issues with Watch API handling of resourceVersion and bookmarks, ensuring proper event streaming and state synchronization ([**@kvaps**](https://github.com/kvaps) in #1860).
* **[dashboard] Fix view of loadbalancer IP in services window**: Fixed an issue where load balancer IP addresses were not displayed correctly in the services window of the dashboard ([**@IvanHunters**](https://github.com/IvanHunters) in #1884).
## Dependencies
* **[cilium] Update cilium to v1.18.6**: Updated Cilium CNI to v1.18.6 with security fixes and performance improvements ([**@sircthulhu**](https://github.com/sircthulhu) in #1868).
* **Update Talos Linux v1.12.1**: Updated Talos Linux to v1.12.1 with latest features, security patches and improvements ([**@kvaps**](https://github.com/kvaps) in #1877).
## Documentation
* **[website] Add documentation for creating and managing cloned virtual machines**: Added comprehensive guide for VM cloning operations ([**@sircthulhu**](https://github.com/sircthulhu) in [cozystack/website#401](https://github.com/cozystack/website/pull/401)).
* **[website] Simplify NFS driver setup instructions**: Improved NFS driver setup documentation with clearer instructions ([**@kvaps**](https://github.com/kvaps) in [cozystack/website#399](https://github.com/cozystack/website/pull/399)).
* **[website] Update Talos installation docs for Hetzner and Servers.com**: Updated installation documentation with improved instructions for Hetzner and Servers.com environments ([**@kvaps**](https://github.com/kvaps) in [cozystack/website#395](https://github.com/cozystack/website/pull/395)).
* **[website] Add Hetzner RobotLB documentation**: Added documentation for configuring public IP with Hetzner RobotLB ([**@kvaps**](https://github.com/kvaps) in [cozystack/website#394](https://github.com/cozystack/website/pull/394)).
* **[website] Add Hidora organization support details**: Added Hidora to the support page with organization details ([**@matthieu-robin**](https://github.com/matthieu-robin) in [cozystack/website#397](https://github.com/cozystack/website/pull/397), [cozystack/website#398](https://github.com/cozystack/website/pull/398)).
---
## Contributors
* [**@IvanHunters**](https://github.com/IvanHunters)
* [**@kvaps**](https://github.com/kvaps)
* [**@lexfrei**](https://github.com/lexfrei)
* [**@matthieu-robin**](https://github.com/matthieu-robin)
* [**@sircthulhu**](https://github.com/sircthulhu)
---
**Full Changelog**: [v1.0.0-alpha.1...v1.0.0-alpha.2](https://github.com/cozystack/cozystack/compare/v1.0.0-alpha.1...v1.0.0-alpha.2)

View File

@@ -1,144 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v1.0.0-beta.3
-->
> **⚠️ Beta Release Warning**: This is a pre-release version intended for testing and early adoption. Breaking changes may occur before the stable v1.0.0 release.
## Major Features and Improvements
### New Applications
* **[qdrant] Add Qdrant vector database application**: Added Qdrant as a new managed application, providing a high-performance vector database for AI and machine learning workloads. Supports single replica or clustered mode, persistent storage, resource presets, API key authentication, and optional external LoadBalancer access ([**@lexfrei**](https://github.com/lexfrei) in #1987).
### System Components
* **[system] Add cluster-autoscaler package for Hetzner and Azure**: Added cluster-autoscaler system package with support for multiple cloud providers (Hetzner and Azure) to automatically scale management cluster nodes. Includes comprehensive documentation for Hetzner setup with Talos Linux, vSwitch configuration, and Kilo mesh networking integration ([**@kvaps**](https://github.com/kvaps) in #1964).
* **[system] Add clustersecret-operator package**: Added clustersecret-operator system package for managing secrets across multiple namespaces in Kubernetes clusters ([**@sircthulhu**](https://github.com/sircthulhu) in #2025).
### Networking
* **[kilo] Update to v0.7.0 and add configurable MTU**: Updated Kilo WireGuard mesh networking to v0.7.0 from cozystack fork with pre-built images. Added configurable MTU parameter (default: auto) for WireGuard interface, allowing automatic MTU detection or manual override ([**@kvaps**](https://github.com/kvaps) in #2003).
* **[local-ccm] Add node-lifecycle-controller component**: Added optional node-lifecycle-controller to local-ccm package that automatically deletes unreachable NotReady nodes from the cluster. Solves the "zombie" node problem when cluster autoscaler deletes cloud instances but node objects remain in Kubernetes. Supports configurable node selectors, protected labels, and HA deployment with leader election ([**@IvanHunters**](https://github.com/IvanHunters) in #1992).
### Virtual Machines
* **[vm] Add cpuModel field to specify CPU model without instanceType**: Added cpuModel field to VirtualMachine API, allowing users to specify CPU model directly without using instanceType, providing more granular control over VM CPU configuration ([**@sircthulhu**](https://github.com/sircthulhu) in #2007).
* **[vm] Allow switching between instancetype and custom resources**: Implemented atomic upgrade hook that allows switching between instanceType-based and custom resource-based VM configuration, providing more flexibility in VM resource management ([**@sircthulhu**](https://github.com/sircthulhu) in #2008).
* **[vm] Migrate to runStrategy instead of running**: Migrated VirtualMachine API from deprecated `running` field to `runStrategy` field, following KubeVirt upstream best practices ([**@sircthulhu**](https://github.com/sircthulhu) in #2004).
### Backups
* **[backups] Add comprehensive backup and restore functionality**: Major update to backup system including BackupClass for Velero, virtual machine backup strategies, RestoreJob resource with end-to-end restore workflows, Velero integration with polling and status tracking, and enhanced backup plans UI with simplified Plan/BackupJob API ([**@androndo**](https://github.com/androndo) in #1967, [**@lllamnyp**](https://github.com/lllamnyp) in #1968).
* **[backups] Add kubevirt plugin to velero**: Added KubeVirt plugin to Velero for proper virtual machine backup support, enabling consistent snapshots of VM state and data ([**@lllamnyp**](https://github.com/lllamnyp) in #2017).
* **[backups] Install backupstrategy controller by default**: Enabled backupstrategy controller by default to provide automatic backup scheduling and management for managed applications ([**@lllamnyp**](https://github.com/lllamnyp) in #2020).
* **[backups] Better selectors for VM strategy**: Improved VM backup strategy selectors for more accurate and reliable backup targeting ([**@lllamnyp**](https://github.com/lllamnyp) in #2023).
### Platform
* **[kubernetes] Auto-enable Gateway API support in cert-manager**: Added automatic Gateway API support in cert-manager for tenant Kubernetes clusters, enabling automatic certificate management for Gateway API resources ([**@kvaps**](https://github.com/kvaps) in #1997).
* **[tenant,rbac] Use shared clusterroles**: Refactored tenant RBAC to use shared ClusterRoles, improving maintainability and consistency across tenant namespaces ([**@lllamnyp**](https://github.com/lllamnyp) in #1999).
* **[mongodb] Unify users and databases configuration**: Simplified MongoDB user and database configuration with a more unified API structure ([**@kvaps**](https://github.com/kvaps) in #1923).
## Improvements
* **[keycloak-configure,dashboard] Enable insecure TLS verification by default**: Made SSL certificate verification configurable with insecure mode enabled by default for easier local development and testing ([**@IvanHunters**](https://github.com/IvanHunters) in #2005).
* **[dashboard] Add startupProbe to prevent container restarts on slow hardware**: Added startup probe to dashboard pods to prevent unnecessary container restarts on slow hardware or during high load ([**@kvaps**](https://github.com/kvaps) in #1996).
* **[cilium] Change cilium-operator replicas to 1**: Reduced Cilium operator replicas from 2 to 1 to decrease resource consumption in smaller deployments ([**@IvanHunters**](https://github.com/IvanHunters) in #1784).
* **[monitoring] Enable monitoring for core components**: Enhanced monitoring capabilities with better dashboards and metrics collection for core Cozystack components ([**@IvanHunters**](https://github.com/IvanHunters) in #1937).
* **[branding] Separate values for Keycloak**: Separated Keycloak branding values for better customization capabilities ([**@nbykov0**](https://github.com/nbykov0) in #1947).
* **[kubernetes] Use ingress-nginx nodeport service**: Changed Kubernetes managed clusters to use ingress-nginx NodePort service for improved compatibility and flexibility ([**@sircthulhu**](https://github.com/sircthulhu) in #1948).
## Fixes
* **[linstor] Extract piraeus-operator CRDs into separate package**: Fixed issue where Helm did not reliably install all CRDs from large crds.yaml files by creating dedicated piraeus-operator-crds package. This ensures the linstorsatellites.io CRD is properly installed, preventing satellite pod creation failures ([**@IvanHunters**](https://github.com/IvanHunters) in #1991).
* **[platform] Fix cozystack-values secret race condition**: Fixed race condition in cozystack-values secret creation that could cause platform initialization failures ([**@lllamnyp**](https://github.com/lllamnyp) in #2024).
* **[seaweedfs] Increase certificate duration to 10 years**: Increased SeaweedFS certificate validity from 1 year to 10 years to reduce certificate rotation overhead and prevent unexpected certificate expiration issues ([**@IvanHunters**](https://github.com/IvanHunters) in #1986).
* **[monitoring] Remove cozystack-controller dependency**: Fixed monitoring package to remove unnecessary dependency on cozystack-controller, allowing monitoring to be installed independently ([**@IvanHunters**](https://github.com/IvanHunters) in #1990).
* **[monitoring] Remove duplicate dashboards.list from extra/monitoring**: Fixed duplicate dashboards.list configuration in extra/monitoring package ([**@IvanHunters**](https://github.com/IvanHunters) in #2016).
* **[mongodb] Fix pre-commit check**: Fixed pre-commit linting issues in MongoDB package ([**@kvaps**](https://github.com/kvaps) in #1753).
* **[mongodb] Update MongoDB logo**: Updated MongoDB application logo in the dashboard to use the correct branding ([**@kvaps**](https://github.com/kvaps) in #2027).
* **[bootbox] Auto-create bootbox-application as dependency**: Fixed bootbox package to automatically create required bootbox-application dependency ([**@kvaps**](https://github.com/kvaps) in #1974).
* **[migrations] Add migration 25 for v1.0 upgrade cleanup**: Added migration script to handle cleanup during v1.0 upgrade path ([**@kvaps**](https://github.com/kvaps) in #1975).
* **[build] Fix platform migrations image build**: Fixed Docker image build process for platform migrations ([**@kvaps**](https://github.com/kvaps) in #1976).
* **[postgres-operator] Correct PromQL syntax in CNPGClusterOffline alert**: Fixed incorrect PromQL syntax in CNPGClusterOffline Prometheus alert for PostgreSQL clusters ([**@mattia-eleuteri**](https://github.com/mattia-eleuteri) in #1981).
* **[coredns] Fix serviceaccount to match kubernetes bootstrap RBAC**: Fixed CoreDNS service account configuration to correctly match Kubernetes bootstrap RBAC requirements ([**@mattia-eleuteri**](https://github.com/mattia-eleuteri) in #1958).
* **[dashboard] Verify JWT token**: Added JWT token verification to dashboard for improved security ([**@lllamnyp**](https://github.com/lllamnyp) in #1980).
* **[talm] Skip config loading for completion subcommands**: Fixed talm CLI to skip unnecessary config loading for shell completion commands ([**@kitsunoff**](https://github.com/kitsunoff) in [cozystack/talm#109](https://github.com/cozystack/talm/pull/109)).
## Dependencies
* **[kube-ovn] Update Kube-OVN to v1.15.3**: Updated Kube-OVN CNI to v1.15.3 with performance improvements and bug fixes ([**@kvaps**](https://github.com/kvaps) in #2022).
* **[local-ccm] Update to v0.3.0**: Updated local cloud controller manager to v0.3.0 with node-lifecycle-controller support ([**@kvaps**](https://github.com/kvaps) in #1992).
* **[kilo] Update to v0.7.0**: Updated Kilo to v0.7.0 from cozystack fork with improved MTU handling ([**@kvaps**](https://github.com/kvaps) in #2003).
## Development, Testing, and CI/CD
* **[ci] Use GitHub Copilot CLI for changelog generation**: Automated changelog generation using GitHub Copilot CLI to improve release process efficiency ([**@androndo**](https://github.com/androndo) in #1753).
* **[ci] Choose runner conditional on label**: Added conditional runner selection in CI based on PR labels for more flexible CI/CD workflows ([**@lllamnyp**](https://github.com/lllamnyp) in #1998).
* **[backups] Add restore jobs controller**: Added controller for managing backup restore jobs ([**@androndo**](https://github.com/androndo) in #1811).
* **Update CODEOWNERS**: Updated CODEOWNERS file to include new maintainers ([**@lllamnyp**](https://github.com/lllamnyp) in #1972, [**@IvanHunters**](https://github.com/IvanHunters) in #2015).
## Documentation
* **[website] Add LINSTOR disk preparation guide**: Added comprehensive documentation for preparing disks for LINSTOR storage system ([**@IvanHunters**](https://github.com/IvanHunters) in [cozystack/website#411](https://github.com/cozystack/website/pull/411)).
* **[website] Add Proxmox VM migration guide**: Added detailed guide for migrating virtual machines from Proxmox to Cozystack ([**@IvanHunters**](https://github.com/IvanHunters) in [cozystack/website#410](https://github.com/cozystack/website/pull/410)).
* **[website] Describe operator-based and HelmRelease-based package patterns**: Added development documentation explaining operator-based and HelmRelease-based package patterns for Cozystack ([**@kvaps**](https://github.com/kvaps) in [cozystack/website#413](https://github.com/cozystack/website/pull/413)).
* **[website] Correct typo in kubeconfig reference in Kubernetes installation guide**: Fixed documentation typo in kubeconfig reference ([**@shkarface**](https://github.com/shkarface) in [cozystack/website#414](https://github.com/cozystack/website/pull/414)).
* **[website] Check quotas before an upgrade**: Added troubleshooting documentation for checking resource quotas before performing upgrades ([**@nbykov0**](https://github.com/nbykov0) in [cozystack/website#405](https://github.com/cozystack/website/pull/405)).
---
## Contributors
We'd like to thank all contributors who made this release possible:
* [**@IvanHunters**](https://github.com/IvanHunters)
* [**@androndo**](https://github.com/androndo)
* [**@kitsunoff**](https://github.com/kitsunoff)
* [**@kvaps**](https://github.com/kvaps)
* [**@lexfrei**](https://github.com/lexfrei)
* [**@lllamnyp**](https://github.com/lllamnyp)
* [**@mattia-eleuteri**](https://github.com/mattia-eleuteri)
* [**@nbykov0**](https://github.com/nbykov0)
* [**@shkarface**](https://github.com/shkarface)
* [**@sircthulhu**](https://github.com/sircthulhu)
---
**Full Changelog**: [v1.0.0-beta.2...v1.0.0-beta.3](https://github.com/cozystack/cozystack/compare/v1.0.0-beta.2...v1.0.0-beta.3)

View File

@@ -1,96 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v1.0.0-beta.4
-->
> **⚠️ Beta Release Warning**: This is a pre-release version intended for testing and early adoption. Breaking changes may occur before the stable v1.0.0 release.
## Major Features and Improvements
### Virtual Machines
* **[vm-instance] Complete migration from virtual-machine to vm-disk and vm-instance**: Completed the architectural redesign of virtual machine management by fully migrating from the legacy `virtual-machine` application to the new `vm-disk` and `vm-instance` applications. This includes automatic migration scripts (migration 28) that convert existing virtual machines, handle CDI webhook configurations, and update cloud-init references. The new architecture provides better separation of concerns between disk management and VM lifecycle, enabling more flexible VM configuration and improved resource management ([**@kvaps**](https://github.com/kvaps) in #2040).
* **[vm-instance] Port advanced VM features**: Ported critical VM features from the legacy virtual-machine application including cpuModel field for direct CPU model specification, support for switching between instanceType and custom resource configurations, and migration from deprecated `running` field to `runStrategy` field following KubeVirt best practices ([**@kvaps**](https://github.com/kvaps) in #2040).
### Storage and CSI
* **[kubevirt-csi-driver] Add RWX Filesystem (NFS) support**: Added Read-Write-Many (RWX) filesystem support to kubevirt-csi-driver, enabling multiple pods to mount the same persistent volume simultaneously via NFS. This provides native NFS support for shared storage use cases without requiring external NFS provisioners, with automatic NFS server deployment per PVC and seamless integration with KubeVirt's storage layer ([**@kvaps**](https://github.com/kvaps) in #2042).
### Platform and Infrastructure
* **[cozystack-api] Switch from DaemonSet to Deployment**: Migrated cozystack-api from DaemonSet to Deployment with PreferClose topology spread constraints, improving resource efficiency while maintaining high availability. The Deployment approach reduces resource consumption compared to running API pods on every node, while topology spreading ensures resilient pod placement across the cluster ([**@kvaps**](https://github.com/kvaps) in #2041, #2048).
* **[linstor] Move CRDs installation to dedicated chart**: Refactored LINSTOR CRDs installation by moving them to a dedicated `piraeus-operator-crds` chart, solving Helm's limitation with large CRD files that could cause unreliable installations. This ensures all LINSTOR CRDs (including linstorsatellites.io) are properly installed before the operator starts, preventing satellite pod creation failures. Includes automatic migration script to reassign existing CRDs to the new chart ([**@kvaps**](https://github.com/kvaps) in #2036).
* **[installer] Unify operator templates**: Merged separate operator templates into a single variant-based template, simplifying the installation process and reducing configuration duplication. The new template supports different deployment variants (Talos, non-Talos) through a unified configuration approach ([**@kvaps**](https://github.com/kvaps) in #2034).
### Applications
* **[mariadb] Rename mysql application to mariadb**: Renamed the MySQL application to MariaDB to accurately reflect the underlying database engine being used. Includes automatic migration script (migration 27) that handles resource renaming and ensures seamless upgrade path for existing MySQL deployments. All resources, including databases, users, backups, and configurations, are automatically migrated to use the mariadb naming ([**@kvaps**](https://github.com/kvaps) in #2026).
* **[ferretdb] Remove FerretDB application**: Removed the FerretDB application from the catalog as it has been superseded by native MongoDB support with improved performance and features ([**@kvaps**](https://github.com/kvaps) in #2028).
## Improvements
* **[rbac] Use hierarchical naming scheme**: Refactored RBAC configuration to use hierarchical naming scheme for cluster roles and role bindings, improving organization and maintainability of permission structures across the platform ([**@lllamnyp**](https://github.com/lllamnyp) in #2019).
* **[backups] Create RBAC for backup resources**: Added comprehensive RBAC configuration for backup resources, enabling proper permission management for backup operations and restore jobs across different user roles ([**@lllamnyp**](https://github.com/lllamnyp) in #2018).
* **[etcd-operator] Add vertical-pod-autoscaler dependency**: Added vertical-pod-autoscaler as a dependency to etcd-operator package, ensuring proper resource scaling and optimization for etcd clusters ([**@sircthulhu**](https://github.com/sircthulhu) in #2047).
## Fixes
* **[cozystack-operator] Preserve existing suspend field in package reconciler**: Fixed package reconciler to properly preserve the existing suspend field state during reconciliation, preventing unintended resumption of suspended packages ([**@sircthulhu**](https://github.com/sircthulhu) in #2043).
* **[cozystack-operator] Fix namespace privileged flag resolution**: Fixed operator to correctly resolve namespace privileged flag by checking all Packages in the namespace, not just the first one. This ensures namespaces are properly marked as privileged when any package requires elevated permissions ([**@kvaps**](https://github.com/kvaps) in #2046).
* **[cozystack-operator] Fix namespace reconciliation field ownership**: Fixed Server-Side Apply (SSA) field ownership conflicts by using per-Package field owner for namespace reconciliation, preventing conflicts when multiple packages reconcile the same namespace ([**@kvaps**](https://github.com/kvaps) in #2046).
* **[platform] Clean up Helm secrets for removed releases**: Added cleanup logic to migration 23 to remove orphaned Helm secrets from removed -rd releases, preventing secret accumulation and reducing cluster resource usage ([**@kvaps**](https://github.com/kvaps) in #2035).
* **[monitoring] Fix YAML parse error in vmagent template**: Fixed YAML parsing error in monitoring-agents vmagent template that could cause monitoring stack deployment failures ([**@kvaps**](https://github.com/kvaps) in #2037).
* **[talm] Fix metadata.id type casting in physical_links_info**: Fixed Prometheus query in physical_links_info chart to properly cast metadata.id to string for regexMatch operations, preventing query failures with numeric interface IDs ([**@kvaps**](https://github.com/kvaps) in cozystack/talm#110).
## Dependencies
* **[kilo] Update to v0.7.1**: Updated Kilo WireGuard mesh networking to v0.7.1 with bug fixes and improvements ([**@kvaps**](https://github.com/kvaps) in #2049).
## Development, Testing, and CI/CD
* **[ci] Improve cozyreport functionality**: Enhanced cozyreport tool with improved reporting capabilities for CI/CD pipelines, providing better visibility into test results and build status ([**@lllamnyp**](https://github.com/lllamnyp) in #2032).
* **[e2e] Increase HelmRelease readiness timeout for kubernetes test**: Increased HelmRelease readiness timeout in Kubernetes end-to-end tests to prevent false failures on slower hardware or during high load conditions, specifically targeting ingress-nginx component which may take longer to become ready ([**@lexfrei**](https://github.com/lexfrei) in #2033).
## Documentation
* **[website] Add documentation versioning**: Implemented comprehensive documentation versioning system with separate v0 and v1 documentation trees, version selector in the UI, proper URL redirects for unversioned docs, and improved navigation for users working with different Cozystack versions ([**@IvanStukov**](https://github.com/IvanStukov) in cozystack/website#415).
* **[website] Describe upgrade to v1.0**: Added detailed upgrade instructions for migrating from v0.x to v1.0, including prerequisites, upgrade steps, and troubleshooting guidance ([**@nbykov0**](https://github.com/nbykov0) in cozystack/website@21bbe84).
* **[website] Update support documentation**: Updated support documentation with current contact information and support channels ([**@xrmtech-isk**](https://github.com/xrmtech-isk) in cozystack/website#420).
---
## Contributors
We'd like to thank all contributors who made this release possible:
* [**@IvanStukov**](https://github.com/IvanStukov)
* [**@kvaps**](https://github.com/kvaps)
* [**@lexfrei**](https://github.com/lexfrei)
* [**@lllamnyp**](https://github.com/lllamnyp)
* [**@nbykov0**](https://github.com/nbykov0)
* [**@sircthulhu**](https://github.com/sircthulhu)
* [**@xrmtech-isk**](https://github.com/xrmtech-isk)
### New Contributors
We're excited to welcome our first-time contributors:
* [**@IvanStukov**](https://github.com/IvanStukov) - First contribution!
* [**@xrmtech-isk**](https://github.com/xrmtech-isk) - First contribution!
---
**Full Changelog**: [v1.0.0-beta.3...v1.0.0-beta.4](https://github.com/cozystack/cozystack/compare/v1.0.0-beta.3...v1.0.0-beta.4)

View File

@@ -1,36 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v1.0.0-beta.5
-->
> **⚠️ Beta Release Warning**: This is a pre-release version intended for testing and early adoption. Breaking changes may occur before the stable v1.0.0 release.
## Features and Improvements
* **[installer] Add variant-aware templates for generic Kubernetes support**: Extended the installer chart to support generic and hosted Kubernetes deployments via the existing `cozystackOperator.variant` parameter. When using `variant=generic`, the installer now renders separate templates for the Cozystack operator, skipping Talos-specific components. This enables users to deploy Cozystack on standard Kubernetes distributions and hosted Kubernetes services, expanding platform compatibility beyond Talos Linux ([**@lexfrei**](https://github.com/lexfrei) in #2010).
* **[kilo] Add Cilium compatibility variant**: Added a new `cilium` variant to the kilo PackageSource that deploys kilo with the `--compatibility=cilium` flag. This enables Cilium-aware IPIP encapsulation where the outer packet IP matches the inner packet source, allowing Cilium's network policies to function correctly with kilo's WireGuard mesh networking. Users can now run kilo alongside Cilium CNI while maintaining full network policy enforcement capabilities ([**@kvaps**](https://github.com/kvaps) in #2055).
* **[cluster-autoscaler] Enable enforce-node-group-min-size by default**: Enabled the `enforce-node-group-min-size` option for the system cluster-autoscaler chart. This ensures node groups are always scaled up to their configured minimum size, even when current workload demands are lower, preventing unexpected scale-down below minimum thresholds and improving cluster stability for production workloads ([**@kvaps**](https://github.com/kvaps) in #2050).
* **[dashboard] Upgrade dashboard to version 1.4.0**: Updated the Cozystack dashboard to version 1.4.0 with new features and improvements for better user experience and cluster management capabilities ([**@sircthulhu**](https://github.com/sircthulhu) in #2051).
## Breaking Changes & Upgrade Notes
* **[vpc] Migrate subnets definition from map to array format**: Migrated VPC subnets definition from map format (`map[string]Subnet`) to array format (`[]Subnet`) with an explicit `name` field. This aligns VPC subnet definitions with the vm-instance `networks` field pattern and provides more intuitive configuration. Existing VPC deployments are automatically migrated via migration 30, which converts the subnet map to an array while preserving all existing subnet configurations and network connectivity ([**@kvaps**](https://github.com/kvaps) in #2052).
## Dependencies
* **[kilo] Update to v0.8.0**: Updated Kilo WireGuard mesh networking to v0.8.0 with performance improvements, bug fixes, and new compatibility features ([**@kvaps**](https://github.com/kvaps) in #2053).
* **[talm] Skip config loading for __complete command**: Fixed CLI completion behavior by skipping config loading for the `__complete` command, preventing errors during shell completion when configuration files are not available or misconfigured ([**@kitsunoff**](https://github.com/kitsunoff) in cozystack/talm#109).
## Contributors
We'd like to thank all contributors who made this release possible:
* [**@kitsunoff**](https://github.com/kitsunoff)
* [**@kvaps**](https://github.com/kvaps)
* [**@lexfrei**](https://github.com/lexfrei)
* [**@sircthulhu**](https://github.com/sircthulhu)
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v1.0.0-beta.4...v1.0.0-beta.5

View File

@@ -1,46 +0,0 @@
<!--
https://github.com/cozystack/cozystack/releases/tag/v1.0.0-beta.6
-->
> **⚠️ Beta Release Warning**: This is a pre-release version intended for testing and early adoption. Breaking changes may occur before the stable v1.0.0 release.
## Features and Improvements
* **[platform] Add cilium-kilo networking variant**: Added a new `cilium-kilo` networking variant that combines Cilium CNI with Kilo WireGuard mesh overlay. This variant enables `enable-ipip-termination` in Cilium for proper IPIP packet handling and deploys Kilo with `--compatibility=cilium` flag. Users can now select `cilium-kilo` as their networking variant during platform setup, simplifying the multi-location WireGuard setup compared to manually combining Cilium and standalone Kilo ([**@kvaps**](https://github.com/kvaps) in #2064).
* **[nats] Add monitoring**: Added Grafana dashboards for NATS JetStream and server metrics monitoring, along with Prometheus monitoring support with TLS-aware endpoint configuration. Includes updated image customization options (digest and full image name) and component version upgrades for the NATS exporter and utilities. Users now have full observability into NATS message broker performance and health ([**@klinch0**](https://github.com/klinch0) in #1381).
* **[platform] Add DNS-1035 validation for Application names**: Added dynamic DNS-1035 label validation for Application names in the Cozystack API, using `IsDNS1035Label` from `k8s.io/apimachinery`. Validation is performed at creation time and accounts for the root host length to prevent names that would exceed Kubernetes resource naming limits. This prevents creation of resources with invalid names that would fail downstream Kubernetes resource creation ([**@lexfrei**](https://github.com/lexfrei) in #1771).
* **[operator] Add automatic CRD installation at startup**: Added `--install-crds` flag to the Cozystack operator that installs embedded CRD manifests at startup, ensuring CRDs exist before the operator begins reconciliation. CRD manifests are now embedded in the operator binary and verified for consistency with the Helm `crds/` directory via a new CI Makefile check. This eliminates ordering issues during initial cluster setup where CRDs might not yet be present ([**@lexfrei**](https://github.com/lexfrei) in #2060).
## Fixes
* **[platform] Adopt tenant-root into cozystack-basics during migration**: Added migration 31 to adopt existing `tenant-root` Namespace and HelmRelease into the `cozystack-basics` Helm release when upgrading from v0.41.x to v1.0. Previously these resources were applied via `kubectl apply` with no Helm release tracking, causing Helm to treat them as foreign resources and potentially delete them during reconciliation. This migration ensures a safe upgrade path by annotating and labeling these resources for Helm adoption ([**@kvaps**](https://github.com/kvaps) in #2065).
* **[platform] Preserve tenant-root HelmRelease during migration**: Fixed a data-loss risk during migration from v0.41.x to v1.0.0-beta where the `tenant-root` HelmRelease (and the namespace it manages) could be deleted, causing tenant service outages. Added safety annotation to the HelmRelease and lookup logic to preserve current parameters during migration, preventing unwanted deletion of tenant-root resources ([**@sircthulhu**](https://github.com/sircthulhu) in #2063).
* **[codegen] Add gen_client to update-codegen.sh and regenerate applyconfiguration**: Fixed a build error in `pkg/generated/applyconfiguration/utils.go` caused by a reference to `testing.TypeConverter` which was removed in client-go v0.34.1. The root cause was that `hack/update-codegen.sh` never called `gen_client`, leaving the generated applyconfiguration code stale. Running the full code generation now produces a consistent and compilable codebase ([**@lexfrei**](https://github.com/lexfrei) in #2061).
* **[e2e] Make kubernetes test retries effective by cleaning up stale resources**: Fixed E2E test retries for the Kubernetes tenant test by adding pre-creation cleanup of backend deployment/service and NFS pod/PVC in `run-kubernetes.sh`. Previously, retries would fail immediately because stale resources from a failed attempt blocked re-creation. Also increased the tenant deployment wait timeout from 90s to 300s to handle CI resource pressure ([**@lexfrei**](https://github.com/lexfrei) in #2062).
## Development, Testing, and CI/CD
* **[e2e] Use helm install instead of kubectl apply for cozystack installation**: Replaced the pre-rendered static YAML application flow (`kubectl apply`) with direct `helm upgrade --install` of the `packages/core/installer` chart in E2E tests. Removed the CRD/operator artifact upload/download steps from the CI workflow, simplifying the pipeline. The chart with correct values is already present in the sandbox via workspace copy and `pr.patch` ([**@lexfrei**](https://github.com/lexfrei) in #2060).
## Documentation
* **[website] Improve Azure autoscaling troubleshooting guide**: Enhanced the Azure autoscaling troubleshooting documentation with serial console instructions for debugging VMSS worker nodes, a troubleshooting section for nodes stuck in maintenance mode due to invalid or missing machine config, `az vmss update --custom-data` instructions for updating machine config, and a warning that Azure does not support reading back `customData` ([**@kvaps**](https://github.com/kvaps) in cozystack/website#424).
* **[website] Update multi-location documentation for cilium-kilo variant**: Updated multi-location networking documentation to reflect the new integrated `cilium-kilo` variant selection during platform setup, replacing the previous manual Kilo installation and Cilium configuration steps. Added explanation of `enable-ipip-termination` and updated the troubleshooting section ([**@kvaps**](https://github.com/kvaps) in cozystack/website@02d63f0).
## Contributors
We'd like to thank all contributors who made this release possible:
* [**@klinch0**](https://github.com/klinch0)
* [**@kvaps**](https://github.com/kvaps)
* [**@lexfrei**](https://github.com/lexfrei)
* [**@sircthulhu**](https://github.com/sircthulhu)
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v1.0.0-beta.5...v1.0.0-beta.6

View File

@@ -1,99 +0,0 @@
# Enabling Hubble for Network Observability
Hubble is a network and security observability platform built on top of Cilium. It provides deep visibility into the communication and behavior of services in your Kubernetes cluster.
## Prerequisites
- Cozystack platform running with Cilium as the CNI
- Monitoring hub enabled for Grafana access
## Configuration
Hubble is disabled by default in Cozystack. To enable it, update the Cilium configuration.
### Enable Hubble
Edit the Cilium values in your platform configuration to enable Hubble:
```yaml
cilium:
hubble:
enabled: true
relay:
enabled: true
ui:
enabled: true
metrics:
enabled:
- dns
- drop
- tcp
- flow
- port-distribution
- icmp
- httpV2:exemplars=true;labelsContext=source_ip,source_namespace,source_workload,destination_ip,destination_namespace,destination_workload,traffic_direction
```
### Components
When Hubble is enabled, the following components become available:
- **Hubble Relay**: Aggregates flow data from all Cilium agents
- **Hubble UI**: Web-based interface for exploring network flows
- **Hubble Metrics**: Prometheus metrics for network observability
## Grafana Dashboards
Once Hubble is enabled and the monitoring hub is deployed, the following dashboards become available in Grafana under the `hubble` folder:
| Dashboard | Description |
|-----------|-------------|
| **Overview** | General Hubble metrics including processing statistics |
| **DNS Namespace** | DNS query and response metrics by namespace |
| **L7 HTTP Metrics** | HTTP layer 7 metrics by workload |
| **Network Overview** | Network flow overview by namespace |
### Accessing Dashboards
1. Navigate to Grafana via the monitoring hub
2. Browse to the `hubble` folder in the dashboard browser
3. Select a dashboard to view network observability data
## Metrics Available
Hubble exposes various metrics that can be queried in Grafana:
- `hubble_flows_processed_total`: Total number of flows processed
- `hubble_dns_queries_total`: DNS queries by type
- `hubble_dns_responses_total`: DNS responses by status
- `hubble_drop_total`: Dropped packets by reason
- `hubble_tcp_flags_total`: TCP connections by flag
- `hubble_http_requests_total`: HTTP requests by method and status
## Troubleshooting
### Verify Hubble Status
Check if Hubble is running:
```bash
kubectl get pods -n cozy-cilium -l k8s-app=hubble-relay
kubectl get pods -n cozy-cilium -l k8s-app=hubble-ui
```
### Check Metrics Endpoint
Verify Hubble metrics are being scraped:
```bash
kubectl port-forward -n cozy-cilium svc/hubble-metrics 9965:9965
curl http://localhost:9965/metrics
```
### Verify ServiceMonitor
Ensure the ServiceMonitor is created for Prometheus scraping:
```bash
kubectl get servicemonitor -n cozy-cilium
```

View File

@@ -1,356 +0,0 @@
# AffinityClass: Named Placement Classes for CozyStack Applications (Draft)
## Concept
Similar to StorageClass in Kubernetes, a new resource **AffinityClass** is introduced — a named abstraction over scheduling constraints. When creating an Application, the user selects an AffinityClass by name without knowing the details of the cluster topology.
```
StorageClass → "which disk" → PV provisioning
AffinityClass → "where to place" → Pod scheduling
```
## Design
### 1. AffinityClass CRD
A cluster-scoped resource created by the platform administrator:
```yaml
apiVersion: cozystack.io/v1alpha1
kind: AffinityClass
metadata:
name: dc1
spec:
# nodeSelector that MUST be present on every pod of the application.
# Used for validation by the lineage webhook.
nodeSelector:
topology.kubernetes.io/zone: dc1
```
```yaml
apiVersion: cozystack.io/v1alpha1
kind: AffinityClass
metadata:
name: dc2
spec:
nodeSelector:
topology.kubernetes.io/zone: dc2
```
```yaml
apiVersion: cozystack.io/v1alpha1
kind: AffinityClass
metadata:
name: gpu
spec:
nodeSelector:
node.kubernetes.io/gpu: "true"
```
An AffinityClass contains a `nodeSelector` — a set of key=value pairs that must be present in `pod.spec.nodeSelector` on every pod of the application. This is a contract: the chart is responsible for setting these selectors, the webhook is responsible for verifying them.
### 2. Tenant: Restricting Available Classes
Tenant gets `allowedAffinityClasses` and `defaultAffinityClass` fields:
```yaml
apiVersion: apps.cozystack.io/v1alpha1
kind: Tenant
metadata:
name: acme
namespace: tenant-root
spec:
defaultAffinityClass: dc1 # default class for applications
allowedAffinityClasses: # which classes are allowed
- dc1
- dc2
etcd: false
ingress: true
monitoring: false
```
These values are propagated to the `cozystack-values` Secret in the child namespace:
```yaml
# Secret cozystack-values in namespace tenant-acme
stringData:
values.yaml: |
_cluster:
# ... existing cluster config
_namespace:
# ... existing namespace config
defaultAffinityClass: dc1
allowedAffinityClasses:
- dc1
- dc2
```
### 3. Application: Selecting a Class
Each application can specify an `affinityClass`. If not specified, the `defaultAffinityClass` from the tenant is used:
```yaml
apiVersion: apps.cozystack.io/v1alpha1
kind: Postgres
metadata:
name: main-db
namespace: tenant-acme
spec:
affinityClass: dc1 # explicit selection
replicas: 3
```
```yaml
apiVersion: apps.cozystack.io/v1alpha1
kind: Redis
metadata:
name: cache
namespace: tenant-acme
spec:
# affinityClass not specified → uses tenant's defaultAffinityClass (dc1)
replicas: 2
```
### 4. How affinityClass Reaches the HelmRelease
When creating an Application, the API server (`pkg/registry/apps/application/rest.go`):
1. Extracts `affinityClass` from `spec` (or uses the default from `cozystack-values`)
2. Records `affinityClass` as a **label on the HelmRelease**:
```
apps.cozystack.io/affinity-class: dc1
```
3. Resolves AffinityClass to `nodeSelector` and passes it into HelmRelease values as `_scheduling`:
```yaml
_scheduling:
affinityClass: dc1
nodeSelector:
topology.kubernetes.io/zone: dc1
```
### 5. How Charts Apply Scheduling
A helper is added to `cozy-lib`:
```yaml
{{- define "cozy-lib.scheduling.nodeSelector" -}}
{{- if .Values._scheduling }}
{{- if .Values._scheduling.nodeSelector }}
nodeSelector:
{{- .Values._scheduling.nodeSelector | toYaml | nindent 2 }}
{{- end }}
{{- end }}
{{- end -}}
```
Each app chart uses the helper when rendering Pod/StatefulSet/Deployment specs:
```yaml
# packages/apps/postgres/templates/db.yaml
spec:
instances: {{ .Values.replicas }}
{{- include "cozy-lib.scheduling.nodeSelector" . | nindent 2 }}
```
```yaml
# packages/apps/redis/templates/redis.yaml
spec:
replicas: {{ .Values.replicas }}
template:
spec:
{{- include "cozy-lib.scheduling.nodeSelector" . | nindent 6 }}
```
Charts **must** apply `_scheduling.nodeSelector`. If they don't, pods will be rejected by the webhook.
---
## Validation via Lineage Webhook
### Why Validation, Not Mutation
Mutation (injecting nodeSelector into a pod) creates problems:
- Requires merging with existing pod nodeSelector/affinity — complex logic with edge cases
- Operators (CNPG, Strimzi) may overwrite nodeSelector on pod restart
- Hidden behavior: pod is created with one spec but actually runs with another
Validation is simpler and more reliable:
- Webhook checks: "does this pod **have** the required nodeSelector?"
- If not, the pod is **rejected** with a clear error message
- The chart and operator are responsible for setting the correct spec
### What Already Exists in the Lineage Webhook
The lineage webhook (`internal/lineagecontrollerwebhook/webhook.go`) on every Pod creation:
1. Decodes the Pod
2. Walks the ownership graph (`lineage.WalkOwnershipGraph`) — finds the **owning HelmRelease**
3. Extracts labels from the HelmRelease: `apps.cozystack.io/application.kind`, `.group`, `.name`
4. Applies these labels to the Pod
**Key point:** the webhook already knows which HelmRelease owns each Pod.
### What Is Added
After computing lineage labels, a validation step is added:
```
Handle(pod):
1. [existing] computeLabels(pod) → finds owning HelmRelease
2. [existing] applyLabels(pod, labels) → mutates labels
3. [NEW] validateAffinity(pod, hr) → checks nodeSelector
4. Return patch or Denied
```
The `validateAffinity` logic:
```go
func (h *LineageControllerWebhook) validateAffinity(
ctx context.Context,
pod *unstructured.Unstructured,
hr *helmv2.HelmRelease,
) *admission.Response {
// 1. Extract affinityClass from HelmRelease label
affinityClassName, ok := hr.Labels["apps.cozystack.io/affinity-class"]
if !ok {
return nil // no affinityClass — no validation needed
}
// 2. Look up AffinityClass from cache
affinityClass, ok := h.affinityClassMap[affinityClassName]
if !ok {
resp := admission.Denied(fmt.Sprintf(
"AffinityClass %q not found", affinityClassName))
return &resp
}
// 3. Check pod's nodeSelector
podNodeSelector := extractNodeSelector(pod) // from pod.spec.nodeSelector
for key, expected := range affinityClass.Spec.NodeSelector {
actual, exists := podNodeSelector[key]
if !exists || actual != expected {
resp := admission.Denied(fmt.Sprintf(
"pod %s/%s belongs to application with AffinityClass %q "+
"but missing required nodeSelector %s=%s",
pod.GetNamespace(), pod.GetName(),
affinityClassName, key, expected))
return &resp
}
}
return nil // validation passed
}
```
### AffinityClass Caching
The lineage webhook controller already caches ApplicationDefinitions (`runtimeConfig.appCRDMap`). An AffinityClass cache is added in the same way:
```go
type runtimeConfig struct {
appCRDMap map[appRef]*cozyv1alpha1.ApplicationDefinition
affinityClassMap map[string]*cozyv1alpha1.AffinityClass // NEW
}
```
The controller adds a watch on AffinityClass:
```go
func (c *LineageControllerWebhook) SetupWithManagerAsController(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&cozyv1alpha1.ApplicationDefinition{}).
Watches(&cozyv1alpha1.AffinityClass{}, &handler.EnqueueRequestForObject{}).
Complete(c)
}
```
When an AffinityClass changes, the cache is rebuilt.
---
## End-to-End Flow
```
1. Admin creates AffinityClass "dc1" (nodeSelector: zone=dc1)
2. Admin creates Tenant "acme" (defaultAffinityClass: dc1, allowed: [dc1, dc2])
→ namespace tenant-acme
→ cozystack-values Secret with defaultAffinityClass
3. User creates Postgres "main-db" (affinityClass: dc1)
→ API server checks: dc1 ∈ allowedAffinityClasses? ✓
→ API server resolves AffinityClass → nodeSelector
→ HelmRelease is created with:
- label: apps.cozystack.io/affinity-class=dc1
- values: _scheduling.nodeSelector.topology.kubernetes.io/zone=dc1
4. FluxCD deploys HelmRelease → Helm renders the chart
→ Chart uses cozy-lib helper
→ CNPG Cluster is created with nodeSelector: {zone: dc1}
5. CNPG operator creates Pod
→ Pod has nodeSelector: {zone: dc1}
6. Lineage webhook intercepts the Pod:
a. WalkOwnershipGraph → finds HelmRelease "main-db"
b. HelmRelease label → affinityClass=dc1
c. AffinityClass "dc1" → nodeSelector: {zone: dc1}
d. Checks: pod.spec.nodeSelector contains zone=dc1? ✓
e. Admits Pod (+ standard lineage labels)
7. Scheduler places the Pod on a node in dc1
```
### Error Scenario (chart forgot to apply nodeSelector):
```
5. CNPG operator creates Pod WITHOUT nodeSelector
6. Lineage webhook:
d. Checks: pod.spec.nodeSelector contains zone=dc1? ✗
e. REJECTS Pod:
"pod main-db-1 belongs to application with AffinityClass dc1
but missing required nodeSelector topology.kubernetes.io/zone=dc1"
7. Pod is not created. CNPG operator sees the error and retries.
→ Chart developer gets a signal that the chart does not support scheduling.
```
---
## Code Changes
### New Files
| File | Description |
|------------------------------------------------------|-------------------------|
| `api/v1alpha1/affinityclass_types.go` | AffinityClass CRD types |
| `config/crd/bases/cozystack.io_affinityclasses.yaml` | CRD manifest |
### Modified Files
| File | Change |
|-------------------------------------------------------|-------------------------------------------------------------------|
| `internal/lineagecontrollerwebhook/webhook.go` | Add `validateAffinity()` to `Handle()` |
| `internal/lineagecontrollerwebhook/config.go` | Add `affinityClassMap` to `runtimeConfig` |
| `internal/lineagecontrollerwebhook/controller.go` | Add watch on AffinityClass |
| `pkg/registry/apps/application/rest.go` | On Create/Update: resolve affinityClass, pass to values and label |
| `packages/apps/tenant/values.yaml` | Add `defaultAffinityClass`, `allowedAffinityClasses` |
| `packages/apps/tenant/templates/namespace.yaml` | Propagate to cozystack-values |
| `packages/system/tenant-rd/cozyrds/tenant.yaml` | Extend OpenAPI schema |
| `packages/library/cozy-lib/templates/_cozyconfig.tpl` | Add `cozy-lib.scheduling.nodeSelector` helper |
| `packages/apps/*/templates/*.yaml` | Each app chart: add helper usage |
---
## Open Questions
1. **AffinityClass outside Tenants**: Should AffinityClass work for applications outside tenant namespaces (system namespace)? Or only for tenant workloads?
2. **affinityClass validation on Application creation**: The API server should verify that the specified affinityClass exists and is included in the tenant's `allowedAffinityClasses`. Where should this be done — in the REST handler (`rest.go`) or in a separate validating webhook?
3. **Soft mode (warn vs deny)**: Is a mode needed where the webhook issues a warning instead of rejecting? This would simplify gradual adoption while not all charts support `_scheduling`.
4. **affinityClass inheritance**: If a child Tenant does not specify `defaultAffinityClass`, should it be inherited from the parent? The current `cozystack-values` architecture supports this inheritance natively.
5. **Multiple nodeSelectors**: Is OR-logic support needed (pod can be in dc1 OR dc2)? With `nodeSelector` this is impossible — AffinityClass would need to be extended to `nodeAffinity`. However, validation becomes significantly more complex.

4
go.mod
View File

@@ -19,6 +19,7 @@ require (
github.com/spf13/cobra v1.9.1
github.com/vmware-tanzu/velero v1.17.1
go.uber.org/zap v1.27.0
gopkg.in/yaml.v2 v2.4.0
k8s.io/api v0.34.1
k8s.io/apiextensions-apiserver v0.34.1
k8s.io/apimachinery v0.34.2
@@ -29,7 +30,7 @@ require (
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b
k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d
sigs.k8s.io/controller-runtime v0.22.4
sigs.k8s.io/structured-merge-diff/v6 v6.3.0
sigs.k8s.io/structured-merge-diff/v4 v4.7.0
)
require (
@@ -125,6 +126,7 @@ require (
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
sigs.k8s.io/yaml v1.6.0 // indirect
)

7
go.sum
View File

@@ -81,6 +81,7 @@ github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI=
github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM=
github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -296,6 +297,8 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
@@ -323,9 +326,13 @@ sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327U
sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8=
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/structured-merge-diff/v4 v4.7.0 h1:qPeWmscJcXP0snki5IYF79Z8xrl8ETFxgMd7wez1XkI=
sigs.k8s.io/structured-merge-diff/v4 v4.7.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=

View File

@@ -56,33 +56,13 @@ kubectl get hr -A --no-headers | awk '$4 != "True"' | \
kubectl describe hr -n $NAMESPACE $NAME > $DIR/describe.txt 2>&1
done
echo "Collecting packages..."
kubectl get packages > $REPORT_DIR/kubernetes/packages.txt 2>&1
kubectl get packages --no-headers | awk '$3 != "True"' | \
while read NAME _; do
DIR=$REPORT_DIR/kubernetes/packages/$NAME
mkdir -p $DIR
kubectl get package $NAME -o yaml > $DIR/package.yaml 2>&1
kubectl describe package $NAME > $DIR/describe.txt 2>&1
done
echo "Collecting packagesources..."
kubectl get packagesources > $REPORT_DIR/kubernetes/packagesources.txt 2>&1
kubectl get packagesources --no-headers | awk '$3 != "True"' | \
while read NAME _; do
DIR=$REPORT_DIR/kubernetes/packagesources/$NAME
mkdir -p $DIR
kubectl get packagesource $NAME -o yaml > $DIR/packagesource.yaml 2>&1
kubectl describe packagesource $NAME > $DIR/describe.txt 2>&1
done
echo "Collecting pods..."
kubectl get pod -A -o wide > $REPORT_DIR/kubernetes/pods.txt 2>&1
kubectl get pod -A --no-headers | awk '$4 !~ /Running|Succeeded|Completed/' |
while read NAMESPACE NAME _ STATE _; do
DIR=$REPORT_DIR/kubernetes/pods/$NAMESPACE/$NAME
mkdir -p $DIR
CONTAINERS=$(kubectl get pod -o jsonpath='{.spec.containers[*].name} {.spec.initContainers[*].name}' -n $NAMESPACE $NAME)
CONTAINERS=$(kubectl get pod -o jsonpath='{.spec.containers[*].name}' -n $NAMESPACE $NAME)
kubectl get pod -n $NAMESPACE $NAME -o yaml > $DIR/pod.yaml 2>&1
kubectl describe pod -n $NAMESPACE $NAME > $DIR/describe.txt 2>&1
if [ "$STATE" != "Pending" ]; then

View File

@@ -83,8 +83,6 @@ modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//flux/flux-stats
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//kafka/strimzi-kafka.json
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//seaweedfs/seaweedfs.json
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//goldpinger/goldpinger.json
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//nats/nats-jetstream.json
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//nats/nats-server.json
EOT

View File

@@ -0,0 +1,44 @@
#!/usr/bin/env bats
@test "Create DB FerretDB" {
name='test'
kubectl apply -f - <<EOF
apiVersion: apps.cozystack.io/v1alpha1
kind: FerretDB
metadata:
name: $name
namespace: tenant-test
spec:
backup:
destinationPath: "s3://bucket/path/to/folder/"
enabled: false
endpointURL: "http://minio-gateway-service:9000"
retentionPolicy: "30d"
s3AccessKey: "<your-access-key>"
s3SecretKey: "<your-secret-key>"
schedule: "0 2 * * * *"
bootstrap:
enabled: false
external: false
quorum:
maxSyncReplicas: 0
minSyncReplicas: 0
replicas: 2
resources: {}
resourcesPreset: "micro"
size: "10Gi"
users:
testuser:
password: xai7Wepo
EOF
sleep 5
kubectl -n tenant-test wait hr ferretdb-$name --timeout=100s --for=condition=ready
timeout 40 sh -ec "until kubectl -n tenant-test get svc ferretdb-$name-postgres-r -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
timeout 40 sh -ec "until kubectl -n tenant-test get svc ferretdb-$name-postgres-ro -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
timeout 40 sh -ec "until kubectl -n tenant-test get svc ferretdb-$name-postgres-rw -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
timeout 120 sh -ec "until kubectl -n tenant-test get endpoints ferretdb-$name-postgres-r -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
# for some reason it takes longer for the read-only endpoint to be ready
#timeout 120 sh -ec "until kubectl -n tenant-test get endpoints ferretdb-$name-postgres-ro -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
timeout 120 sh -ec "until kubectl -n tenant-test get endpoints ferretdb-$name-postgres-rw -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
kubectl -n tenant-test delete ferretdb.apps.cozystack.io $name
}

View File

@@ -1,74 +0,0 @@
#!/usr/bin/env bats
@test "Create Harbor" {
name='test'
release="harbor-$name"
# Clean up stale resources from previous failed runs
kubectl -n tenant-test delete harbor.apps.cozystack.io $name 2>/dev/null || true
kubectl -n tenant-test wait hr $release --timeout=60s --for=delete 2>/dev/null || true
kubectl apply -f- <<EOF
apiVersion: apps.cozystack.io/v1alpha1
kind: Harbor
metadata:
name: $name
namespace: tenant-test
spec:
host: ""
storageClass: ""
core:
resources: {}
resourcesPreset: "nano"
registry:
resources: {}
resourcesPreset: "nano"
jobservice:
resources: {}
resourcesPreset: "nano"
trivy:
enabled: false
size: 2Gi
resources: {}
resourcesPreset: "nano"
database:
size: 2Gi
replicas: 1
redis:
size: 1Gi
replicas: 1
EOF
sleep 5
kubectl -n tenant-test wait hr $release --timeout=60s --for=condition=ready
# Wait for COSI to provision bucket
kubectl -n tenant-test wait bucketclaims.objectstorage.k8s.io $release-registry \
--timeout=300s --for=jsonpath='{.status.bucketReady}'=true
kubectl -n tenant-test wait bucketaccesses.objectstorage.k8s.io $release-registry \
--timeout=60s --for=jsonpath='{.status.accessGranted}'=true
kubectl -n tenant-test wait hr $release-system --timeout=600s --for=condition=ready || {
echo "=== HelmRelease status ==="
kubectl -n tenant-test get hr $release-system -o yaml 2>&1 || true
echo "=== Pods ==="
kubectl -n tenant-test get pods 2>&1 || true
echo "=== Events ==="
kubectl -n tenant-test get events --sort-by='.lastTimestamp' 2>&1 | tail -30 || true
echo "=== ExternalArtifact ==="
kubectl -n cozy-system get externalartifact cozystack-harbor-application-default-harbor-system -o yaml 2>&1 || true
echo "=== BucketClaim status ==="
kubectl -n tenant-test get bucketclaims.objectstorage.k8s.io $release-registry -o yaml 2>&1 || true
echo "=== BucketAccess status ==="
kubectl -n tenant-test get bucketaccesses.objectstorage.k8s.io $release-registry -o yaml 2>&1 || true
echo "=== BucketAccess Secret ==="
kubectl -n tenant-test get secret $release-registry-bucket -o jsonpath='{.data.BucketInfo}' 2>&1 | base64 -d 2>&1 || true
false
}
kubectl -n tenant-test wait deploy $release-core --timeout=120s --for=condition=available
kubectl -n tenant-test wait deploy $release-registry --timeout=120s --for=condition=available
kubectl -n tenant-test wait deploy $release-portal --timeout=120s --for=condition=available
kubectl -n tenant-test get secret $release-credentials -o jsonpath='{.data.admin-password}' | base64 --decode | grep -q '.'
kubectl -n tenant-test get secret $release-credentials -o jsonpath='{.data.url}' | base64 --decode | grep -q 'https://'
kubectl -n tenant-test get svc $release -o jsonpath='{.spec.ports[0].port}' | grep -q '80'
kubectl -n tenant-test delete harbor.apps.cozystack.io $name
}

View File

@@ -1,46 +0,0 @@
#!/usr/bin/env bats
@test "Create DB MariaDB" {
name='test'
kubectl apply -f- <<EOF
apiVersion: apps.cozystack.io/v1alpha1
kind: MariaDB
metadata:
name: $name
namespace: tenant-test
spec:
external: false
size: 10Gi
replicas: 2
storageClass: ""
users:
testuser:
maxUserConnections: 1000
password: xai7Wepo
databases:
testdb:
roles:
admin:
- testuser
backup:
enabled: false
s3Region: us-east-1
s3Bucket: s3.example.org/mariadb-backups
schedule: "0 2 * * *"
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
resources: {}
resourcesPreset: "nano"
EOF
sleep 5
kubectl -n tenant-test wait hr mariadb-$name --timeout=30s --for=condition=ready
timeout 80 sh -ec "until kubectl -n tenant-test get svc mariadb-$name -o jsonpath='{.spec.ports[0].port}' | grep -q '3306'; do sleep 10; done"
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints mariadb-$name -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
kubectl -n tenant-test wait statefulset.apps/mariadb-$name --timeout=110s --for=jsonpath='{.status.replicas}'=2
timeout 80 sh -ec "until kubectl -n tenant-test get svc mariadb-$name-metrics -o jsonpath='{.spec.ports[0].port}' | grep -q '9104'; do sleep 10; done"
timeout 40 sh -ec "until kubectl -n tenant-test get endpoints mariadb-$name-metrics -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
kubectl -n tenant-test wait deployment.apps/mariadb-$name-metrics --timeout=90s --for=jsonpath='{.status.replicas}'=1
kubectl -n tenant-test delete mariadbs.apps.cozystack.io $name
}

View File

@@ -1,39 +0,0 @@
#!/usr/bin/env bats
@test "Create DB MongoDB" {
name='test'
kubectl apply -f - <<EOF
apiVersion: apps.cozystack.io/v1alpha1
kind: MongoDB
metadata:
name: $name
namespace: tenant-test
spec:
external: false
size: 10Gi
replicas: 1
storageClass: ""
resourcesPreset: "small"
users:
testuser:
password: xai7Wepo
databases:
testdb:
roles:
admin:
- testuser
backup:
enabled: false
EOF
sleep 5
# Wait for HelmRelease
kubectl -n tenant-test wait hr mongodb-$name --timeout=60s --for=condition=ready
# Wait for MongoDB service (port 27017)
timeout 120 sh -ec "until kubectl -n tenant-test get svc mongodb-$name-rs0 -o jsonpath='{.spec.ports[0].port}' | grep -q '27017'; do sleep 10; done"
# Wait for endpoints
timeout 180 sh -ec "until kubectl -n tenant-test get endpoints mongodb-$name-rs0 -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
# Wait for StatefulSet replicas
kubectl -n tenant-test wait statefulset.apps/mongodb-$name-rs0 --timeout=300s --for=jsonpath='{.status.replicas}'=1
# Cleanup
kubectl -n tenant-test delete mongodbs.apps.cozystack.io $name
}

46
hack/e2e-apps/mysql.bats Normal file
View File

@@ -0,0 +1,46 @@
#!/usr/bin/env bats
@test "Create DB MySQL" {
name='test'
kubectl apply -f- <<EOF
apiVersion: apps.cozystack.io/v1alpha1
kind: MySQL
metadata:
name: $name
namespace: tenant-test
spec:
external: false
size: 10Gi
replicas: 2
storageClass: ""
users:
testuser:
maxUserConnections: 1000
password: xai7Wepo
databases:
testdb:
roles:
admin:
- testuser
backup:
enabled: false
s3Region: us-east-1
s3Bucket: s3.example.org/postgres-backups
schedule: "0 2 * * *"
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
resources: {}
resourcesPreset: "nano"
EOF
sleep 5
kubectl -n tenant-test wait hr mysql-$name --timeout=30s --for=condition=ready
timeout 80 sh -ec "until kubectl -n tenant-test get svc mysql-$name -o jsonpath='{.spec.ports[0].port}' | grep -q '3306'; do sleep 10; done"
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints mysql-$name -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
kubectl -n tenant-test wait statefulset.apps/mysql-$name --timeout=110s --for=jsonpath='{.status.replicas}'=2
timeout 80 sh -ec "until kubectl -n tenant-test get svc mysql-$name-metrics -o jsonpath='{.spec.ports[0].port}' | grep -q '9104'; do sleep 10; done"
timeout 40 sh -ec "until kubectl -n tenant-test get endpoints mysql-$name-metrics -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
kubectl -n tenant-test wait deployment.apps/mysql-$name-metrics --timeout=90s --for=jsonpath='{.status.replicas}'=1
kubectl -n tenant-test delete mysqls.apps.cozystack.io $name
}

View File

@@ -1,25 +0,0 @@
#!/usr/bin/env bats
@test "Create Qdrant" {
name='test'
kubectl apply -f- <<EOF
apiVersion: apps.cozystack.io/v1alpha1
kind: Qdrant
metadata:
name: $name
namespace: tenant-test
spec:
replicas: 1
size: 10Gi
storageClass: ""
resourcesPreset: "nano"
resources: {}
external: false
EOF
sleep 5
kubectl -n tenant-test wait hr qdrant-$name --timeout=60s --for=condition=ready
kubectl -n tenant-test wait hr qdrant-$name-system --timeout=120s --for=condition=ready
kubectl -n tenant-test wait sts qdrant-$name --timeout=90s --for=jsonpath='{.status.readyReplicas}'=1
kubectl -n tenant-test wait pvc qdrant-storage-qdrant-$name-0 --timeout=50s --for=jsonpath='{.status.phase}'=Bound
kubectl -n tenant-test delete qdrant.apps.cozystack.io $name
}

View File

@@ -56,7 +56,7 @@ spec:
gpus: []
instanceType: u1.medium
maxReplicas: 10
minReplicas: 2
minReplicas: 0
roles:
- ingress-nginx
storageClass: replicated
@@ -80,10 +80,10 @@ EOF
# Wait for the machine deployment to scale to 2 replicas (timeout after 1 minute)
kubectl wait machinedeployment kubernetes-${test_name}-md0 -n tenant-test --timeout=1m --for=jsonpath='{.status.replicas}'=2
# Get the admin kubeconfig and save it to a file
kubectl get secret kubernetes-${test_name}-admin-kubeconfig -ojsonpath='{.data.super-admin\.conf}' -n tenant-test | base64 -d > "tenantkubeconfig-${test_name}"
kubectl get secret kubernetes-${test_name}-admin-kubeconfig -ojsonpath='{.data.super-admin\.conf}' -n tenant-test | base64 -d > tenantkubeconfig-${test_name}
# Update the kubeconfig to use localhost for the API server
yq -i ".clusters[0].cluster.server = \"https://localhost:${port}\"" "tenantkubeconfig-${test_name}"
yq -i ".clusters[0].cluster.server = \"https://localhost:${port}\"" tenantkubeconfig-${test_name}
# Set up port forwarding to the Kubernetes API server for a 200 second timeout
@@ -98,8 +98,8 @@ EOF
done
'
# Verify the nodes are ready
kubectl --kubeconfig "tenantkubeconfig-${test_name}" wait node --all --timeout=2m --for=condition=Ready
kubectl --kubeconfig "tenantkubeconfig-${test_name}" get nodes -o wide
kubectl --kubeconfig tenantkubeconfig-${test_name} wait node --all --timeout=2m --for=condition=Ready
kubectl --kubeconfig tenantkubeconfig-${test_name} get nodes -o wide
# Verify the kubelet version matches what we expect
versions=$(kubectl --kubeconfig "tenantkubeconfig-${test_name}" \
@@ -125,21 +125,15 @@ EOF
fi
kubectl --kubeconfig "tenantkubeconfig-${test_name}" apply -f - <<EOF
kubectl --kubeconfig tenantkubeconfig-${test_name} apply -f - <<EOF
apiVersion: v1
kind: Namespace
metadata:
name: tenant-test
EOF
# Clean up backend resources from any previous failed attempt
kubectl delete deployment --kubeconfig "tenantkubeconfig-${test_name}" "${test_name}-backend" \
-n tenant-test --ignore-not-found --timeout=60s || true
kubectl delete service --kubeconfig "tenantkubeconfig-${test_name}" "${test_name}-backend" \
-n tenant-test --ignore-not-found --timeout=60s || true
# Backend 1
kubectl apply --kubeconfig "tenantkubeconfig-${test_name}" -f- <<EOF
kubectl apply --kubeconfig tenantkubeconfig-${test_name} -f- <<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
@@ -171,7 +165,7 @@ spec:
EOF
# LoadBalancer Service
kubectl apply --kubeconfig "tenantkubeconfig-${test_name}" -f- <<EOF
kubectl apply --kubeconfig tenantkubeconfig-${test_name} -f- <<EOF
apiVersion: v1
kind: Service
metadata:
@@ -188,7 +182,7 @@ spec:
EOF
# Wait for pods readiness
kubectl wait deployment --kubeconfig "tenantkubeconfig-${test_name}" "${test_name}-backend" -n tenant-test --for=condition=Available --timeout=300s
kubectl wait deployment --kubeconfig tenantkubeconfig-${test_name} ${test_name}-backend -n tenant-test --for=condition=Available --timeout=90s
# Wait for LoadBalancer to be provisioned (IP or hostname)
timeout 90 sh -ec "
@@ -199,7 +193,7 @@ EOF
"
LB_ADDR=$(
kubectl get svc --kubeconfig "tenantkubeconfig-${test_name}" "${test_name}-backend" \
kubectl get svc --kubeconfig tenantkubeconfig-${test_name} "${test_name}-backend" \
-n tenant-test \
-o jsonpath='{.status.loadBalancer.ingress[0].ip}{.status.loadBalancer.ingress[0].hostname}'
)
@@ -221,79 +215,15 @@ fi
fi
# Cleanup
kubectl delete deployment --kubeconfig "tenantkubeconfig-${test_name}" "${test_name}-backend" -n tenant-test
kubectl delete service --kubeconfig "tenantkubeconfig-${test_name}" "${test_name}-backend" -n tenant-test
# Clean up NFS test resources from any previous failed attempt
kubectl --kubeconfig "tenantkubeconfig-${test_name}" delete pod nfs-test-pod \
-n tenant-test --ignore-not-found --timeout=60s || true
kubectl --kubeconfig "tenantkubeconfig-${test_name}" delete pvc nfs-test-pvc \
-n tenant-test --ignore-not-found --timeout=60s || true
# Test RWX NFS mount in tenant cluster (uses kubevirt CSI driver with RWX support)
kubectl --kubeconfig "tenantkubeconfig-${test_name}" apply -f - <<EOF
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-test-pvc
namespace: tenant-test
spec:
accessModes:
- ReadWriteMany
storageClassName: kubevirt
resources:
requests:
storage: 1Gi
EOF
# Wait for PVC to be bound
kubectl --kubeconfig "tenantkubeconfig-${test_name}" wait pvc nfs-test-pvc -n tenant-test --timeout=2m --for=jsonpath='{.status.phase}'=Bound
# Create Pod that writes and reads data from NFS volume
kubectl --kubeconfig "tenantkubeconfig-${test_name}" apply -f - <<EOF
apiVersion: v1
kind: Pod
metadata:
name: nfs-test-pod
namespace: tenant-test
spec:
containers:
- name: test
image: busybox
command: ["sh", "-c", "echo 'nfs-mount-ok' > /data/test.txt && cat /data/test.txt"]
volumeMounts:
- name: nfs-vol
mountPath: /data
volumes:
- name: nfs-vol
persistentVolumeClaim:
claimName: nfs-test-pvc
restartPolicy: Never
EOF
# Wait for Pod to complete successfully
kubectl --kubeconfig "tenantkubeconfig-${test_name}" wait pod nfs-test-pod -n tenant-test --timeout=5m --for=jsonpath='{.status.phase}'=Succeeded
# Verify NFS data integrity
nfs_result=$(kubectl --kubeconfig "tenantkubeconfig-${test_name}" logs nfs-test-pod -n tenant-test)
if [ "$nfs_result" != "nfs-mount-ok" ]; then
echo "NFS mount test failed: expected 'nfs-mount-ok', got '$nfs_result'" >&2
kubectl --kubeconfig "tenantkubeconfig-${test_name}" delete pod nfs-test-pod -n tenant-test --wait=false 2>/dev/null || true
kubectl --kubeconfig "tenantkubeconfig-${test_name}" delete pvc nfs-test-pvc -n tenant-test --wait=false 2>/dev/null || true
exit 1
fi
# Cleanup NFS test resources in tenant cluster
kubectl --kubeconfig "tenantkubeconfig-${test_name}" delete pod nfs-test-pod -n tenant-test --wait
kubectl --kubeconfig "tenantkubeconfig-${test_name}" delete pvc nfs-test-pvc -n tenant-test
kubectl delete deployment --kubeconfig tenantkubeconfig-${test_name} "${test_name}-backend" -n tenant-test
kubectl delete service --kubeconfig tenantkubeconfig-${test_name} "${test_name}-backend" -n tenant-test
# Wait for all machine deployment replicas to be ready (timeout after 10 minutes)
kubectl wait machinedeployment kubernetes-${test_name}-md0 -n tenant-test --timeout=10m --for=jsonpath='{.status.v1beta2.readyReplicas}'=2
for component in cilium coredns csi vsnap-crd; do
for component in cilium coredns csi ingress-nginx vsnap-crd; do
kubectl wait hr kubernetes-${test_name}-${component} -n tenant-test --timeout=1m --for=condition=ready
done
kubectl wait hr kubernetes-${test_name}-ingress-nginx -n tenant-test --timeout=5m --for=condition=ready
# Clean up by deleting the Kubernetes resource
kubectl -n tenant-test delete kuberneteses.apps.cozystack.io $test_name

View File

@@ -0,0 +1,45 @@
#!/usr/bin/env bats
@test "Create a Virtual Machine" {
name='test'
kubectl apply -f - <<EOF
apiVersion: apps.cozystack.io/v1alpha1
kind: VirtualMachine
metadata:
name: $name
namespace: tenant-test
spec:
external: false
externalMethod: PortList
externalPorts:
- 22
instanceType: "u1.medium"
instanceProfile: ubuntu
systemDisk:
image: ubuntu
storage: 5Gi
storageClass: replicated
gpus: []
resources: {}
sshKeys:
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF
test@test
cloudInit: |
#cloud-config
users:
- name: test
shell: /bin/bash
sudo: ['ALL=(ALL) NOPASSWD: ALL']
groups: sudo
ssh_authorized_keys:
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF test@test
cloudInitSeed: ""
EOF
sleep 5
kubectl -n tenant-test wait hr virtual-machine-$name --timeout=10s --for=condition=ready
kubectl -n tenant-test wait dv virtual-machine-$name --timeout=150s --for=condition=ready
kubectl -n tenant-test wait pvc virtual-machine-$name --timeout=100s --for=jsonpath='{.status.phase}'=Bound
kubectl -n tenant-test wait vm virtual-machine-$name --timeout=100s --for=condition=ready
timeout 120 sh -ec "until kubectl -n tenant-test get vmi virtual-machine-$name -o jsonpath='{.status.interfaces[0].ipAddress}' | grep -q '[0-9]'; do sleep 10; done"
kubectl -n tenant-test delete virtualmachines.apps.cozystack.io $name
}

View File

@@ -1,49 +1,35 @@
#!/usr/bin/env bats
@test "Required installer chart exists" {
if [ ! -f packages/core/installer/Chart.yaml ]; then
echo "Missing: packages/core/installer/Chart.yaml" >&2
@test "Required installer assets exist" {
if [ ! -f _out/assets/cozystack-installer.yaml ]; then
echo "Missing: _out/assets/cozystack-installer.yaml" >&2
exit 1
fi
}
@test "Install Cozystack" {
# Install cozy-installer chart (CRDs from crds/ are applied automatically)
helm upgrade installer packages/core/installer \
--install \
--namespace cozy-system \
--create-namespace \
--wait \
--timeout 2m
# Create namespace & configmap required by installer
kubectl create namespace cozy-system --dry-run=client -o yaml | kubectl apply -f -
kubectl create configmap cozystack -n cozy-system \
--from-literal=bundle-name=paas-full \
--from-literal=ipv4-pod-cidr=10.244.0.0/16 \
--from-literal=ipv4-pod-gateway=10.244.0.1 \
--from-literal=ipv4-svc-cidr=10.96.0.0/16 \
--from-literal=ipv4-join-cidr=100.64.0.0/16 \
--from-literal=root-host=example.org \
--from-literal=api-server-endpoint=https://192.168.123.10:6443 \
--dry-run=client -o yaml | kubectl apply -f -
# Verify the operator deployment is available
kubectl wait deployment/cozystack-operator -n cozy-system --timeout=1m --for=condition=Available
# Apply installer manifests from file
kubectl apply -f _out/assets/cozystack-installer.yaml
# Create platform Package with isp-full variant
kubectl apply -f - <<EOF
apiVersion: cozystack.io/v1alpha1
kind: Package
metadata:
name: cozystack.cozystack-platform
spec:
variant: isp-full
components:
platform:
values:
networking:
podCIDR: "10.244.0.0/16"
podGateway: "10.244.0.1"
serviceCIDR: "10.96.0.0/16"
joinCIDR: "100.64.0.0/16"
publishing:
host: "example.org"
apiServerEndpoint: "https://192.168.123.10:6443"
EOF
# Wait for the installer deployment to become available
kubectl wait deployment/cozystack -n cozy-system --timeout=1m --for=condition=Available
# Wait until HelmReleases appear & reconcile them
timeout 180 sh -ec 'until [ $(kubectl get hr -A --no-headers 2>/dev/null | wc -l) -gt 10 ]; do sleep 1; done'
timeout 60 sh -ec 'until kubectl get hr -A -l cozystack.io/system-app=true | grep -q cozys; do sleep 1; done'
sleep 5
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n "$1" hr/"$2" &"} END {print "wait"}' | sh -ex
kubectl get hr -A -l cozystack.io/system-app=true | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n "$1" hr/"$2" &"} END {print "wait"}' | sh -ex
# Fail the test if any HelmRelease is not Ready
if kubectl get hr -A | grep -v " True " | grep -v NAME; then
@@ -156,7 +142,7 @@ EOF
# Expose Cozystack services through ingress
kubectl patch package cozystack.cozystack-platform --type merge -p '{"spec":{"components":{"platform":{"values":{"publishing":{"exposedServices":["api","dashboard","cdi-uploadproxy","vm-exportproxy","keycloak"]}}}}}}'
kubectl patch configmap/cozystack -n cozy-system --type merge -p '{"data":{"expose-services":"api,dashboard,cdi-uploadproxy,vm-exportproxy,keycloak"}}'
# NGINX ingress controller
timeout 60 sh -ec 'until kubectl get deploy root-ingress-controller -n tenant-root >/dev/null 2>&1; do sleep 1; done'
@@ -183,7 +169,7 @@ EOF
}
@test "Keycloak OIDC stack is healthy" {
kubectl patch package cozystack.cozystack-platform --type merge -p '{"spec":{"components":{"platform":{"values":{"authentication":{"oidc":{"enabled":true}}}}}}}'
kubectl patch configmap/cozystack -n cozy-system --type merge -p '{"data":{"oidc-enabled":"true"}}'
timeout 120 sh -ec 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator >/dev/null 2>&1; do sleep 1; done'
kubectl wait hr/keycloak hr/keycloak-configure hr/keycloak-operator -n cozy-keycloak --timeout=10m --for=condition=ready

View File

@@ -136,28 +136,25 @@ machine:
mirrors:
docker.io:
endpoints:
- https://mirror.gcr.io
#docker.io:
# endpoints:
# - https://dockerio.nexus.aenix.org
#cr.fluentbit.io:
# endpoints:
# - https://fluentbit.nexus.aenix.org
#docker-registry3.mariadb.com:
# endpoints:
# - https://mariadb.nexus.aenix.org
#gcr.io:
# endpoints:
# - https://gcr.nexus.aenix.org
#ghcr.io:
# endpoints:
# - https://ghcr.nexus.aenix.org
#quay.io:
# endpoints:
# - https://quay.nexus.aenix.org
#registry.k8s.io:
# endpoints:
# - https://k8s.nexus.aenix.org
- https://dockerio.nexus.aenix.org
cr.fluentbit.io:
endpoints:
- https://fluentbit.nexus.aenix.org
docker-registry3.mariadb.com:
endpoints:
- https://mariadb.nexus.aenix.org
gcr.io:
endpoints:
- https://gcr.nexus.aenix.org
ghcr.io:
endpoints:
- https://ghcr.nexus.aenix.org
quay.io:
endpoints:
- https://quay.nexus.aenix.org
registry.k8s.io:
endpoints:
- https://k8s.nexus.aenix.org
files:
- content: |
[plugins]
@@ -239,10 +236,7 @@ EOF
timeout 10 sh -ec 'until talosctl bootstrap -n 192.168.123.11 -e 192.168.123.11; do sleep 1; done'
# Wait until etcd is healthy
if ! timeout 180 sh -ec 'until talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 >/dev/null 2>&1; do sleep 1; done'; then
talosctl dmesg -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 || true
exit 1
fi
timeout 180 sh -ec 'until talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 >/dev/null 2>&1; do sleep 1; done'
timeout 60 sh -ec 'while talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1 | grep -q "rpc error"; do sleep 1; done'
# Retrieve kubeconfig

View File

@@ -1,179 +0,0 @@
#!/bin/bash
# Migration script from Cozystack ConfigMaps to Package-based configuration
# This script converts cozystack, cozystack-branding, and cozystack-scheduling
# ConfigMaps into a Package resource with the new values structure.
set -e
NAMESPACE="cozy-system"
echo "============================="
echo " Cozystack Migration to v1.0 "
echo "============================="
echo ""
echo "This script will convert existing ConfigMaps to a Package resource."
echo ""
# Check if kubectl is available
if ! command -v kubectl &> /dev/null; then
echo "Error: kubectl is not installed or not in PATH"
exit 1
fi
# Check if jq is available
if ! command -v jq &> /dev/null; then
echo "Error: jq is not installed or not in PATH"
exit 1
fi
# Check if we can access the cluster
if ! kubectl get namespace "$NAMESPACE" &> /dev/null; then
echo "Error: Cannot access namespace $NAMESPACE"
exit 1
fi
# Read ConfigMap cozystack
echo "Reading ConfigMap cozystack..."
COZYSTACK_CM=$(kubectl get configmap -n "$NAMESPACE" cozystack -o json 2>/dev/null || echo "{}")
# Read ConfigMap cozystack-branding
echo "Reading ConfigMap cozystack-branding..."
BRANDING_CM=$(kubectl get configmap -n "$NAMESPACE" cozystack-branding -o json 2>/dev/null || echo "{}")
# Read ConfigMap cozystack-scheduling
echo "Reading ConfigMap cozystack-scheduling..."
SCHEDULING_CM=$(kubectl get configmap -n "$NAMESPACE" cozystack-scheduling -o json 2>/dev/null || echo "{}")
# Extract values from cozystack ConfigMap
CLUSTER_DOMAIN=$(echo "$COZYSTACK_CM" | jq -r '.data["cluster-domain"] // "cozy.local"')
ROOT_HOST=$(echo "$COZYSTACK_CM" | jq -r '.data["root-host"] // "example.org"')
API_SERVER_ENDPOINT=$(echo "$COZYSTACK_CM" | jq -r '.data["api-server-endpoint"] // ""')
OIDC_ENABLED=$(echo "$COZYSTACK_CM" | jq -r '.data["oidc-enabled"] // "false"')
KEYCLOAK_REDIRECTS=$(echo "$COZYSTACK_CM" | jq -r '.data["extra-keycloak-redirect-uri-for-dashboard"] // ""' )
TELEMETRY_ENABLED=$(echo "$COZYSTACK_CM" | jq -r '.data["telemetry-enabled"] // "true"')
BUNDLE_NAME=$(echo "$COZYSTACK_CM" | jq -r '.data["bundle-name"] // "paas-full"')
# Network configuration
POD_CIDR=$(echo "$COZYSTACK_CM" | jq -r '.data["ipv4-pod-cidr"] // "10.244.0.0/16"')
POD_GATEWAY=$(echo "$COZYSTACK_CM" | jq -r '.data["ipv4-pod-gateway"] // "10.244.0.1"')
SVC_CIDR=$(echo "$COZYSTACK_CM" | jq -r '.data["ipv4-svc-cidr"] // "10.96.0.0/16"')
JOIN_CIDR=$(echo "$COZYSTACK_CM" | jq -r '.data["ipv4-join-cidr"] // "100.64.0.0/16"')
EXTERNAL_IPS=$(echo "$COZYSTACK_CM" | jq -r '.data["expose-external-ips"] // ""')
if [ -z "$EXTERNAL_IPS" ]; then
EXTERNAL_IPS="[]"
else
EXTERNAL_IPS=$(echo "$EXTERNAL_IPS" | sed 's/,/\n/g' | awk 'BEGIN{print}{print " - "$0}')
fi
# Determine bundle type
case "$BUNDLE_NAME" in
paas-full|distro-full)
SYSTEM_ENABLED="true"
SYSTEM_TYPE="full"
;;
paas-hosted|distro-hosted)
SYSTEM_ENABLED="false"
SYSTEM_TYPE="hosted"
;;
*)
SYSTEM_ENABLED="false"
SYSTEM_TYPE="hosted"
;;
esac
# Update bundle naming
BUNDLE_NAME=$(echo "$BUNDLE_NAME" | sed 's/paas/isp/')
# Extract branding if available
BRANDING=$(echo "$BRANDING_CM" | jq -r '.data // {} | to_entries[] | "\(.key): \"\(.value)\""')
if [ -z "$BRANDING" ]; then
BRANDING="{}"
else
BRANDING=$(echo "$BRANDING" | awk 'BEGIN{print}{print " " $0}')
fi
# Extract scheduling if available
SCHEDULING_CONSTRAINTS=$(echo "$SCHEDULING_CM" | jq -r '.data["globalAppTopologySpreadConstraints"] // ""')
if [ -z "$SCHEDULING_CONSTRAINTS" ]; then
SCHEDULING_CONSTRAINTS='""'
else
SCHEDULING_CONSTRAINTS=$(echo "$SCHEDULING_CONSTRAINTS" | awk 'BEGIN{print}{print " " $0}')
fi
echo ""
echo "Extracted configuration:"
echo " Cluster Domain: $CLUSTER_DOMAIN"
echo " Root Host: $ROOT_HOST"
echo " API Server Endpoint: $API_SERVER_ENDPOINT"
echo " OIDC Enabled: $OIDC_ENABLED"
echo " Bundle Name: $BUNDLE_NAME"
echo " System Enabled: $SYSTEM_ENABLED"
echo " System Type: $SYSTEM_TYPE"
echo ""
# Generate Package YAML
PACKAGE_YAML=$(cat <<EOF
apiVersion: cozystack.io/v1alpha1
kind: Package
metadata:
name: cozystack.cozystack-platform
namespace: $NAMESPACE
spec:
variant: $BUNDLE_NAME
components:
platform:
values:
bundles:
system:
enabled: $SYSTEM_ENABLED
type: "$SYSTEM_TYPE"
iaas:
enabled: true
paas:
enabled: true
naas:
enabled: true
networking:
clusterDomain: "$CLUSTER_DOMAIN"
podCIDR: "$POD_CIDR"
podGateway: "$POD_GATEWAY"
serviceCIDR: "$SVC_CIDR"
joinCIDR: "$JOIN_CIDR"
publishing:
host: "$ROOT_HOST"
apiServerEndpoint: "$API_SERVER_ENDPOINT"
externalIPs: $EXTERNAL_IPS
authentication:
oidc:
enabled: $OIDC_ENABLED
keycloakExtraRedirectUri: "$KEYCLOAK_REDIRECTS"
scheduling:
globalAppTopologySpreadConstraints: $SCHEDULING_CONSTRAINTS
branding: $BRANDING
EOF
)
echo "Generated Package resource:"
echo "---"
echo "$PACKAGE_YAML"
echo "..."
echo ""
read -p "Do you want to apply this Package? (y/N) " -n 1 -r
echo ""
if [[ $REPLY =~ ^[Yy]$ ]]; then
echo "Applying Package..."
echo "$PACKAGE_YAML" | kubectl apply -f -
echo ""
echo "Package applied successfully!"
echo ""
echo "You can now safely delete the old ConfigMaps after verifying the migration:"
echo " kubectl delete configmap -n $NAMESPACE cozystack cozystack-branding cozystack-scheduling"
else
echo "Package not applied. You can save the output above and apply it manually."
fi
echo ""
echo "All done!"

View File

@@ -19,15 +19,14 @@ set -o nounset
set -o pipefail
SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
CODEGEN_PKG=${CODEGEN_PKG:-~/go/pkg/mod/k8s.io/code-generator@v0.34.1}
CODEGEN_PKG=${CODEGEN_PKG:-$(cd "${SCRIPT_ROOT}"; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)}
API_KNOWN_VIOLATIONS_DIR="${API_KNOWN_VIOLATIONS_DIR:-"${SCRIPT_ROOT}/api/api-rules"}"
UPDATE_API_KNOWN_VIOLATIONS="${UPDATE_API_KNOWN_VIOLATIONS:-true}"
CONTROLLER_GEN="go run sigs.k8s.io/controller-tools/cmd/controller-gen@v0.16.4"
TMPDIR=$(mktemp -d)
OPERATOR_CRDDIR=packages/core/installer/crds
OPERATOR_EMBEDDIR=internal/crdinstall/manifests
OPERATOR_CRDDIR=packages/core/installer/definitions
COZY_CONTROLLER_CRDDIR=packages/system/cozystack-controller/definitions
COZY_RD_CRDDIR=packages/system/application-definition-crd/definition
COZY_RD_CRDDIR=packages/system/cozystack-resource-definition-crd/definition
BACKUPS_CORE_CRDDIR=packages/system/backup-controller/definitions
BACKUPSTRATEGY_CRDDIR=packages/system/backupstrategy-controller/definitions
@@ -35,7 +34,7 @@ trap 'rm -rf ${TMPDIR}' EXIT
source "${CODEGEN_PKG}/kube_codegen.sh"
THIS_PKG="github.com/cozystack/cozystack"
THIS_PKG="k8s.io/sample-apiserver"
kube::codegen::gen_helpers \
--boilerplate "${SCRIPT_ROOT}/hack/boilerplate.go.txt" \
@@ -61,24 +60,14 @@ kube::codegen::gen_openapi \
--boilerplate "${SCRIPT_ROOT}/hack/boilerplate.go.txt" \
"${SCRIPT_ROOT}/pkg/apis"
kube::codegen::gen_client \
--with-applyconfig \
--output-dir "${SCRIPT_ROOT}/pkg/generated" \
--output-pkg "${THIS_PKG}/pkg/generated" \
--boilerplate "${SCRIPT_ROOT}/hack/boilerplate.go.txt" \
"${SCRIPT_ROOT}/pkg/apis"
$CONTROLLER_GEN object:headerFile="hack/boilerplate.go.txt" paths="./api/..."
$CONTROLLER_GEN rbac:roleName=manager-role crd paths="./api/..." output:crd:artifacts:config=${TMPDIR}
mv ${TMPDIR}/cozystack.io_packages.yaml ${OPERATOR_CRDDIR}/cozystack.io_packages.yaml
mv ${TMPDIR}/cozystack.io_packagesources.yaml ${OPERATOR_CRDDIR}/cozystack.io_packagesources.yaml
cp ${OPERATOR_CRDDIR}/cozystack.io_packages.yaml ${OPERATOR_EMBEDDIR}/cozystack.io_packages.yaml
cp ${OPERATOR_CRDDIR}/cozystack.io_packagesources.yaml ${OPERATOR_EMBEDDIR}/cozystack.io_packagesources.yaml
mv ${TMPDIR}/cozystack.io_applicationdefinitions.yaml \
${COZY_RD_CRDDIR}/cozystack.io_applicationdefinitions.yaml
mv ${TMPDIR}/cozystack.io_cozystackresourcedefinitions.yaml \
${COZY_RD_CRDDIR}/cozystack.io_cozystackresourcedefinitions.yaml
mv ${TMPDIR}/backups.cozystack.io*.yaml ${BACKUPS_CORE_CRDDIR}/
mv ${TMPDIR}/strategy.backups.cozystack.io*.yaml ${BACKUPSTRATEGY_CRDDIR}/

View File

@@ -65,21 +65,12 @@ case "$PWD" in
*"/extra/"*) SOURCE_NAME="cozystack-extra" ;;
esac
# Determine variant from PackageSource file
# Look for packages/core/platform/sources/${NAME}-application.yaml
PACKAGE_SOURCE_FILE="../../core/platform/sources/${NAME}-application.yaml"
if [[ -f "$PACKAGE_SOURCE_FILE" ]]; then
VARIANT="$(yq -r '.spec.variants[0].name // "default"' "$PACKAGE_SOURCE_FILE")"
else
VARIANT="default"
fi
# If file doesn't exist, create a minimal skeleton
OUT="${OUT:-$CRD_DIR/$NAME.yaml}"
if [[ ! -f "$OUT" ]]; then
cat >"$OUT" <<EOF
apiVersion: cozystack.io/v1alpha1
kind: ApplicationDefinition
kind: CozystackResourceDefinition
metadata:
name: ${NAME}
spec: {}
@@ -95,7 +86,6 @@ fi
export DESCRIPTION="$DESC"
export ICON_B64="$ICON_B64"
export SOURCE_NAME="$SOURCE_NAME"
export VARIANT="$VARIANT"
export SCHEMA_JSON_MIN="$(jq -c . "$SCHEMA_JSON")"
# Generate keysOrder from values.yaml
@@ -127,20 +117,21 @@ export KEYS_ORDER="$(
# Update only necessary fields in-place
# - openAPISchema is loaded from file as a multi-line string (block scalar)
# - prefix = "<name>-" or "" for extra
# - chartRef points to ExternalArtifact created by Package controller
# - labels ensure cozystack.io/ui: "true"
# - prefix = "<name>-"
# - sourceRef derived from directory (apps|extra)
yq -i '
.apiVersion = (.apiVersion // "cozystack.io/v1alpha1") |
.kind = (.kind // "ApplicationDefinition") |
.kind = (.kind // "CozystackResourceDefinition") |
.metadata.name = strenv(RES_NAME) |
.spec.application.openAPISchema = strenv(SCHEMA_JSON_MIN) |
(.spec.application.openAPISchema style="literal") |
.spec.release.prefix = (strenv(PREFIX)) |
del(.spec.release.labels."cozystack.io/application") |
del(.spec.release.labels."cozystack.io/ui") |
.spec.release.chartRef.kind = "ExternalArtifact" |
.spec.release.chartRef.name = ("cozystack-" + strenv(RES_NAME) + "-application-" + strenv(VARIANT) + "-" + strenv(RES_NAME)) |
.spec.release.chartRef.namespace = "cozy-system" |
.spec.release.labels."cozystack.io/ui" = "true" |
.spec.release.chart.name = strenv(RES_NAME) |
.spec.release.chart.sourceRef.kind = "HelmRepository" |
.spec.release.chart.sourceRef.name = strenv(SOURCE_NAME) |
.spec.release.chart.sourceRef.namespace = "cozy-public" |
.spec.dashboard.description = strenv(DESCRIPTION) |
.spec.dashboard.icon = strenv(ICON_B64) |
.spec.dashboard.keysOrder = env(KEYS_ORDER)

View File

@@ -3,14 +3,9 @@ set -xe
version=${VERSION:-$(git describe --tags)}
gh release upload --clobber $version _out/assets/cozystack-crds.yaml
gh release upload --clobber $version _out/assets/cozystack-operator-talos.yaml
gh release upload --clobber $version _out/assets/cozystack-operator-generic.yaml
gh release upload --clobber $version _out/assets/cozystack-operator-hosted.yaml
gh release upload --clobber $version _out/assets/cozystack-installer.yaml
gh release upload --clobber $version _out/assets/metal-amd64.iso
gh release upload --clobber $version _out/assets/metal-amd64.raw.xz
gh release upload --clobber $version _out/assets/nocloud-amd64.raw.xz
gh release upload --clobber $version _out/assets/kernel-amd64
gh release upload --clobber $version _out/assets/initramfs-metal-amd64.xz
gh release upload --clobber $version _out/assets/cozypkg-*.tar.gz
gh release upload --clobber $version _out/assets/cozypkg-checksums.txt

View File

@@ -1,75 +0,0 @@
package backupcontroller
import (
"context"
"fmt"
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
backupsv1alpha1 "github.com/cozystack/cozystack/api/backups/v1alpha1"
)
const (
// DefaultApplicationAPIGroup is the default API group for applications
// when not specified in ApplicationRef or ApplicationSelector.
// Deprecated: Use backupsv1alpha1.DefaultApplicationAPIGroup instead.
DefaultApplicationAPIGroup = backupsv1alpha1.DefaultApplicationAPIGroup
)
// NormalizeApplicationRef sets the default apiGroup to DefaultApplicationAPIGroup if it's not specified.
// Deprecated: Use backupsv1alpha1.NormalizeApplicationRef instead.
func NormalizeApplicationRef(ref corev1.TypedLocalObjectReference) corev1.TypedLocalObjectReference {
return backupsv1alpha1.NormalizeApplicationRef(ref)
}
// ResolvedBackupConfig contains the resolved strategy and storage configuration
// from a BackupClass.
type ResolvedBackupConfig struct {
StrategyRef corev1.TypedLocalObjectReference
Parameters map[string]string
}
// ResolveBackupClass resolves a BackupClass and finds the matching strategy for the given application.
// It normalizes the applicationRef's apiGroup (defaults to apps.cozystack.io if not specified)
// and matches it against the strategies in the BackupClass.
func ResolveBackupClass(
ctx context.Context,
c client.Client,
backupClassName string,
applicationRef corev1.TypedLocalObjectReference,
) (*ResolvedBackupConfig, error) {
// Normalize applicationRef (default apiGroup if not specified)
applicationRef = NormalizeApplicationRef(applicationRef)
// Get BackupClass
backupClass := &backupsv1alpha1.BackupClass{}
if err := c.Get(ctx, client.ObjectKey{Name: backupClassName}, backupClass); err != nil {
return nil, fmt.Errorf("failed to get BackupClass %s: %w", backupClassName, err)
}
// Determine application API group (already normalized, but extract for matching)
appAPIGroup := backupsv1alpha1.DefaultApplicationAPIGroup
if applicationRef.APIGroup != nil {
appAPIGroup = *applicationRef.APIGroup
}
// Find matching strategy
for _, strategy := range backupClass.Spec.Strategies {
// Normalize strategy's application selector (default apiGroup if not specified)
strategyAPIGroup := backupsv1alpha1.DefaultApplicationAPIGroup
if strategy.Application.APIGroup != nil && *strategy.Application.APIGroup != "" {
strategyAPIGroup = *strategy.Application.APIGroup
}
if strategyAPIGroup == appAPIGroup && strategy.Application.Kind == applicationRef.Kind {
return &ResolvedBackupConfig{
StrategyRef: strategy.StrategyRef,
Parameters: strategy.Parameters,
}, nil
}
}
return nil, fmt.Errorf("no matching strategy found in BackupClass %s for application %s/%s",
backupClassName, appAPIGroup, applicationRef.Kind)
}

View File

@@ -1,375 +0,0 @@
package backupcontroller
import (
"context"
"testing"
corev1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
strategyv1alpha1 "github.com/cozystack/cozystack/api/backups/strategy/v1alpha1"
backupsv1alpha1 "github.com/cozystack/cozystack/api/backups/v1alpha1"
)
func TestNormalizeApplicationRef(t *testing.T) {
tests := []struct {
name string
input corev1.TypedLocalObjectReference
expected corev1.TypedLocalObjectReference
}{
{
name: "apiGroup not specified - should default to apps.cozystack.io",
input: corev1.TypedLocalObjectReference{
Kind: "VirtualMachine",
Name: "vm1",
},
expected: corev1.TypedLocalObjectReference{
APIGroup: stringPtr(DefaultApplicationAPIGroup),
Kind: "VirtualMachine",
Name: "vm1",
},
},
{
name: "apiGroup is nil - should default to apps.cozystack.io",
input: corev1.TypedLocalObjectReference{
APIGroup: nil,
Kind: "VirtualMachine",
Name: "vm1",
},
expected: corev1.TypedLocalObjectReference{
APIGroup: stringPtr(DefaultApplicationAPIGroup),
Kind: "VirtualMachine",
Name: "vm1",
},
},
{
name: "apiGroup is empty string - should default to apps.cozystack.io",
input: corev1.TypedLocalObjectReference{
APIGroup: stringPtr(""),
Kind: "VirtualMachine",
Name: "vm1",
},
expected: corev1.TypedLocalObjectReference{
APIGroup: stringPtr(DefaultApplicationAPIGroup),
Kind: "VirtualMachine",
Name: "vm1",
},
},
{
name: "apiGroup is explicitly set - should keep it",
input: corev1.TypedLocalObjectReference{
APIGroup: stringPtr("custom.api.group.io"),
Kind: "CustomApp",
Name: "custom-app",
},
expected: corev1.TypedLocalObjectReference{
APIGroup: stringPtr("custom.api.group.io"),
Kind: "CustomApp",
Name: "custom-app",
},
},
{
name: "apiGroup is apps.cozystack.io - should keep it",
input: corev1.TypedLocalObjectReference{
APIGroup: stringPtr(DefaultApplicationAPIGroup),
Kind: "VirtualMachine",
Name: "vm1",
},
expected: corev1.TypedLocalObjectReference{
APIGroup: stringPtr(DefaultApplicationAPIGroup),
Kind: "VirtualMachine",
Name: "vm1",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := NormalizeApplicationRef(tt.input)
if !apiequality.Semantic.DeepEqual(result, tt.expected) {
t.Errorf("NormalizeApplicationRef() = %v, want %v", result, tt.expected)
}
})
}
}
func TestResolveBackupClass(t *testing.T) {
scheme := runtime.NewScheme()
err := backupsv1alpha1.AddToScheme(scheme)
if err != nil {
t.Fatalf("Failed to add backupsv1alpha1 to scheme: %v", err)
}
err = strategyv1alpha1.AddToScheme(scheme)
if err != nil {
t.Fatalf("Failed to add strategyv1alpha1 to scheme: %v", err)
}
tests := []struct {
name string
backupClass *backupsv1alpha1.BackupClass
applicationRef corev1.TypedLocalObjectReference
backupClassName string
wantErr bool
expectedStrategyRef *corev1.TypedLocalObjectReference
expectedParams map[string]string
}{
{
name: "successful resolution - matches VirtualMachine strategy",
backupClass: &backupsv1alpha1.BackupClass{
ObjectMeta: metav1.ObjectMeta{
Name: "velero",
},
Spec: backupsv1alpha1.BackupClassSpec{
Strategies: []backupsv1alpha1.BackupClassStrategy{
{
StrategyRef: corev1.TypedLocalObjectReference{
APIGroup: stringPtr("strategy.backups.cozystack.io"),
Kind: "Velero",
Name: "velero-strategy-vm",
},
Application: backupsv1alpha1.ApplicationSelector{
Kind: "VirtualMachine",
},
Parameters: map[string]string{
"backupStorageLocationName": "default",
},
},
{
StrategyRef: corev1.TypedLocalObjectReference{
APIGroup: stringPtr("strategy.backups.cozystack.io"),
Kind: "Velero",
Name: "velero-strategy-mariadb",
},
Application: backupsv1alpha1.ApplicationSelector{
Kind: "MariaDB",
},
Parameters: map[string]string{
"backupStorageLocationName": "mariadb-storage",
},
},
},
},
},
applicationRef: corev1.TypedLocalObjectReference{
Kind: "VirtualMachine",
Name: "vm1",
},
backupClassName: "velero",
wantErr: false,
expectedStrategyRef: &corev1.TypedLocalObjectReference{
APIGroup: stringPtr("strategy.backups.cozystack.io"),
Kind: "Velero",
Name: "velero-strategy-vm",
},
expectedParams: map[string]string{
"backupStorageLocationName": "default",
},
},
{
name: "successful resolution - matches MariaDB strategy with explicit apiGroup",
backupClass: &backupsv1alpha1.BackupClass{
ObjectMeta: metav1.ObjectMeta{
Name: "velero",
},
Spec: backupsv1alpha1.BackupClassSpec{
Strategies: []backupsv1alpha1.BackupClassStrategy{
{
StrategyRef: corev1.TypedLocalObjectReference{
APIGroup: stringPtr("strategy.backups.cozystack.io"),
Kind: "Velero",
Name: "velero-strategy-mariadb",
},
Application: backupsv1alpha1.ApplicationSelector{
APIGroup: stringPtr("apps.cozystack.io"),
Kind: "MariaDB",
},
Parameters: map[string]string{
"backupStorageLocationName": "mariadb-storage",
},
},
},
},
},
applicationRef: corev1.TypedLocalObjectReference{
APIGroup: stringPtr("apps.cozystack.io"),
Kind: "MariaDB",
Name: "mariadb1",
},
backupClassName: "velero",
wantErr: false,
expectedStrategyRef: &corev1.TypedLocalObjectReference{
APIGroup: stringPtr("strategy.backups.cozystack.io"),
Kind: "Velero",
Name: "velero-strategy-mariadb",
},
expectedParams: map[string]string{
"backupStorageLocationName": "mariadb-storage",
},
},
{
name: "successful resolution - applicationRef without apiGroup defaults correctly",
backupClass: &backupsv1alpha1.BackupClass{
ObjectMeta: metav1.ObjectMeta{
Name: "velero",
},
Spec: backupsv1alpha1.BackupClassSpec{
Strategies: []backupsv1alpha1.BackupClassStrategy{
{
StrategyRef: corev1.TypedLocalObjectReference{
APIGroup: stringPtr("strategy.backups.cozystack.io"),
Kind: "Velero",
Name: "velero-strategy-vm",
},
Application: backupsv1alpha1.ApplicationSelector{
Kind: "VirtualMachine",
},
Parameters: map[string]string{
"backupStorageLocationName": "default",
},
},
},
},
},
applicationRef: corev1.TypedLocalObjectReference{
// No APIGroup specified
Kind: "VirtualMachine",
Name: "vm1",
},
backupClassName: "velero",
wantErr: false,
expectedStrategyRef: &corev1.TypedLocalObjectReference{
APIGroup: stringPtr("strategy.backups.cozystack.io"),
Kind: "Velero",
Name: "velero-strategy-vm",
},
expectedParams: map[string]string{
"backupStorageLocationName": "default",
},
},
{
name: "error - BackupClass not found",
backupClass: &backupsv1alpha1.BackupClass{
ObjectMeta: metav1.ObjectMeta{
Name: "velero",
},
Spec: backupsv1alpha1.BackupClassSpec{
Strategies: []backupsv1alpha1.BackupClassStrategy{},
},
},
applicationRef: corev1.TypedLocalObjectReference{
Kind: "VirtualMachine",
Name: "vm1",
},
backupClassName: "nonexistent",
wantErr: true,
},
{
name: "error - no matching strategy found",
backupClass: &backupsv1alpha1.BackupClass{
ObjectMeta: metav1.ObjectMeta{
Name: "velero",
},
Spec: backupsv1alpha1.BackupClassSpec{
Strategies: []backupsv1alpha1.BackupClassStrategy{
{
StrategyRef: corev1.TypedLocalObjectReference{
APIGroup: stringPtr("strategy.backups.cozystack.io"),
Kind: "Velero",
Name: "velero-strategy-vm",
},
Application: backupsv1alpha1.ApplicationSelector{
Kind: "VirtualMachine",
},
},
},
},
},
applicationRef: corev1.TypedLocalObjectReference{
Kind: "PostgreSQL", // Not in BackupClass
Name: "pg1",
},
backupClassName: "velero",
wantErr: true,
},
{
name: "error - apiGroup mismatch",
backupClass: &backupsv1alpha1.BackupClass{
ObjectMeta: metav1.ObjectMeta{
Name: "velero",
},
Spec: backupsv1alpha1.BackupClassSpec{
Strategies: []backupsv1alpha1.BackupClassStrategy{
{
StrategyRef: corev1.TypedLocalObjectReference{
APIGroup: stringPtr("strategy.backups.cozystack.io"),
Kind: "Velero",
Name: "velero-strategy-vm",
},
Application: backupsv1alpha1.ApplicationSelector{
APIGroup: stringPtr("custom.api.group.io"),
Kind: "VirtualMachine",
},
},
},
},
},
applicationRef: corev1.TypedLocalObjectReference{
APIGroup: stringPtr("apps.cozystack.io"), // Different apiGroup
Kind: "VirtualMachine",
Name: "vm1",
},
backupClassName: "velero",
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctx := context.Background()
fakeClient := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(tt.backupClass).
Build()
resolved, err := ResolveBackupClass(ctx, fakeClient, tt.backupClassName, tt.applicationRef)
if tt.wantErr {
if err == nil {
t.Errorf("ResolveBackupClass() expected error but got none")
}
return
}
if err != nil {
t.Errorf("ResolveBackupClass() error = %v, wantErr %v", err, tt.wantErr)
return
}
if resolved == nil {
t.Errorf("ResolveBackupClass() returned nil, expected ResolvedBackupConfig")
return
}
// Verify strategy ref using apimachinery equality
if tt.expectedStrategyRef != nil {
if !apiequality.Semantic.DeepEqual(resolved.StrategyRef, *tt.expectedStrategyRef) {
t.Errorf("ResolveBackupClass() StrategyRef = %v, want %v", resolved.StrategyRef, *tt.expectedStrategyRef)
}
}
// Verify parameters using apimachinery equality
if tt.expectedParams != nil {
if !apiequality.Semantic.DeepEqual(resolved.Parameters, tt.expectedParams) {
t.Errorf("ResolveBackupClass() Parameters = %v, want %v", resolved.Parameters, tt.expectedParams)
}
}
})
}
}
func stringPtr(s string) *string {
return &s
}

View File

@@ -6,7 +6,6 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/dynamic"
@@ -21,8 +20,8 @@ import (
backupsv1alpha1 "github.com/cozystack/cozystack/api/backups/v1alpha1"
)
// BackupJobReconciler reconciles BackupJob with a strategy from the
// strategy.backups.cozystack.io API group.
// BackupVeleroStrategyReconciler reconciles BackupJob with a strategy referencing
// Velero.strategy.backups.cozystack.io objects.
type BackupJobReconciler struct {
client.Client
dynamic.Interface
@@ -46,42 +45,29 @@ func (r *BackupJobReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
return ctrl.Result{}, err
}
// Normalize ApplicationRef (default apiGroup if not specified)
normalizedAppRef := NormalizeApplicationRef(j.Spec.ApplicationRef)
// Resolve BackupClass
resolved, err := ResolveBackupClass(ctx, r.Client, j.Spec.BackupClassName, normalizedAppRef)
if err != nil {
logger.Error(err, "failed to resolve BackupClass", "backupClassName", j.Spec.BackupClassName)
return ctrl.Result{}, err
}
strategyRef := resolved.StrategyRef
// Validate strategyRef
if strategyRef.APIGroup == nil {
logger.V(1).Info("BackupJob resolved StrategyRef has nil APIGroup, skipping", "backupjob", j.Name)
if j.Spec.StrategyRef.APIGroup == nil {
logger.V(1).Info("BackupJob has nil StrategyRef.APIGroup, skipping", "backupjob", j.Name)
return ctrl.Result{}, nil
}
if *strategyRef.APIGroup != strategyv1alpha1.GroupVersion.Group {
logger.V(1).Info("BackupJob resolved StrategyRef.APIGroup doesn't match, skipping",
if *j.Spec.StrategyRef.APIGroup != strategyv1alpha1.GroupVersion.Group {
logger.V(1).Info("BackupJob StrategyRef.APIGroup doesn't match, skipping",
"backupjob", j.Name,
"expected", strategyv1alpha1.GroupVersion.Group,
"got", *strategyRef.APIGroup)
"got", *j.Spec.StrategyRef.APIGroup)
return ctrl.Result{}, nil
}
logger.Info("processing BackupJob", "backupjob", j.Name, "strategyKind", strategyRef.Kind, "backupClassName", j.Spec.BackupClassName)
switch strategyRef.Kind {
logger.Info("processing BackupJob", "backupjob", j.Name, "strategyKind", j.Spec.StrategyRef.Kind)
switch j.Spec.StrategyRef.Kind {
case strategyv1alpha1.JobStrategyKind:
return r.reconcileJob(ctx, j, resolved)
return r.reconcileJob(ctx, j)
case strategyv1alpha1.VeleroStrategyKind:
return r.reconcileVelero(ctx, j, resolved)
return r.reconcileVelero(ctx, j)
default:
logger.V(1).Info("BackupJob resolved StrategyRef.Kind not supported, skipping",
logger.V(1).Info("BackupJob StrategyRef.Kind not supported, skipping",
"backupjob", j.Name,
"kind", strategyRef.Kind,
"kind", j.Spec.StrategyRef.Kind,
"supported", []string{strategyv1alpha1.JobStrategyKind, strategyv1alpha1.VeleroStrategyKind})
return ctrl.Result{}, nil
}
@@ -89,17 +75,6 @@ func (r *BackupJobReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
// SetupWithManager registers our controller with the Manager and sets up watches.
func (r *BackupJobReconciler) SetupWithManager(mgr ctrl.Manager) error {
// index BackupJob by backupClassName for efficient lookups when BackupClass changes
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &backupsv1alpha1.BackupJob{}, "spec.backupClassName", func(obj client.Object) []string {
job := obj.(*backupsv1alpha1.BackupJob)
if job.Spec.BackupClassName == "" {
return []string{}
}
return []string{job.Spec.BackupClassName}
}); err != nil {
return err
}
cfg := mgr.GetConfig()
var err error
if r.Interface, err = dynamic.NewForConfig(cfg); err != nil {
@@ -116,27 +91,3 @@ func (r *BackupJobReconciler) SetupWithManager(mgr ctrl.Manager) error {
For(&backupsv1alpha1.BackupJob{}).
Complete(r)
}
func (r *BackupJobReconciler) markBackupJobFailed(ctx context.Context, backupJob *backupsv1alpha1.BackupJob, message string) (ctrl.Result, error) {
logger := getLogger(ctx)
now := metav1.Now()
backupJob.Status.CompletedAt = &now
backupJob.Status.Phase = backupsv1alpha1.BackupJobPhaseFailed
backupJob.Status.Message = message
// Add condition
backupJob.Status.Conditions = append(backupJob.Status.Conditions, metav1.Condition{
Type: "Ready",
Status: metav1.ConditionFalse,
Reason: "BackupFailed",
Message: message,
LastTransitionTime: now,
})
if err := r.Status().Update(ctx, backupJob); err != nil {
logger.Error(err, "failed to update BackupJob status to Failed")
return ctrl.Result{}, err
}
logger.Debug("BackupJob failed", "message", message)
return ctrl.Result{}, nil
}

View File

@@ -10,9 +10,6 @@ import (
)
func BackupJob(p *backupsv1alpha1.Plan, scheduledFor time.Time) *backupsv1alpha1.BackupJob {
// Normalize ApplicationRef (default apiGroup if not specified)
appRef := backupsv1alpha1.NormalizeApplicationRef(*p.Spec.ApplicationRef.DeepCopy())
job := &backupsv1alpha1.BackupJob{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-%d", p.Name, scheduledFor.Unix()/60),
@@ -22,8 +19,9 @@ func BackupJob(p *backupsv1alpha1.Plan, scheduledFor time.Time) *backupsv1alpha1
PlanRef: &corev1.LocalObjectReference{
Name: p.Name,
},
ApplicationRef: appRef,
BackupClassName: p.Spec.BackupClassName,
ApplicationRef: *p.Spec.ApplicationRef.DeepCopy(),
StorageRef: *p.Spec.StorageRef.DeepCopy(),
StrategyRef: *p.Spec.StrategyRef.DeepCopy(),
},
}
return job

View File

@@ -1,167 +0,0 @@
package factory
import (
"testing"
"time"
backupsv1alpha1 "github.com/cozystack/cozystack/api/backups/v1alpha1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestBackupJob(t *testing.T) {
tests := []struct {
name string
plan *backupsv1alpha1.Plan
scheduled time.Time
validate func(*testing.T, *backupsv1alpha1.BackupJob)
}{
{
name: "creates BackupJob with BackupClassName",
plan: &backupsv1alpha1.Plan{
ObjectMeta: metav1.ObjectMeta{
Name: "test-plan",
Namespace: "default",
},
Spec: backupsv1alpha1.PlanSpec{
ApplicationRef: corev1.TypedLocalObjectReference{
Kind: "VirtualMachine",
Name: "vm1",
},
BackupClassName: "velero",
Schedule: backupsv1alpha1.PlanSchedule{
Type: backupsv1alpha1.PlanScheduleTypeCron,
Cron: "0 2 * * *",
},
},
},
scheduled: time.Date(2024, 1, 1, 2, 0, 0, 0, time.UTC),
validate: func(t *testing.T, job *backupsv1alpha1.BackupJob) {
if job.Name == "" {
t.Error("BackupJob name should be set")
}
if job.Namespace != "default" {
t.Errorf("BackupJob namespace = %v, want default", job.Namespace)
}
if job.Spec.BackupClassName != "velero" {
t.Errorf("BackupJob BackupClassName = %v, want velero", job.Spec.BackupClassName)
}
if job.Spec.ApplicationRef.Kind != "VirtualMachine" {
t.Errorf("BackupJob ApplicationRef.Kind = %v, want VirtualMachine", job.Spec.ApplicationRef.Kind)
}
if job.Spec.ApplicationRef.Name != "vm1" {
t.Errorf("BackupJob ApplicationRef.Name = %v, want vm1", job.Spec.ApplicationRef.Name)
}
if job.Spec.PlanRef == nil || job.Spec.PlanRef.Name != "test-plan" {
t.Errorf("BackupJob PlanRef = %v, want {Name: test-plan}", job.Spec.PlanRef)
}
},
},
{
name: "normalizes ApplicationRef apiGroup",
plan: &backupsv1alpha1.Plan{
ObjectMeta: metav1.ObjectMeta{
Name: "test-plan",
Namespace: "default",
},
Spec: backupsv1alpha1.PlanSpec{
ApplicationRef: corev1.TypedLocalObjectReference{
// No APIGroup specified
Kind: "MariaDB",
Name: "mariadb1",
},
BackupClassName: "velero",
Schedule: backupsv1alpha1.PlanSchedule{
Type: backupsv1alpha1.PlanScheduleTypeCron,
Cron: "0 2 * * *",
},
},
},
scheduled: time.Date(2024, 1, 1, 2, 0, 0, 0, time.UTC),
validate: func(t *testing.T, job *backupsv1alpha1.BackupJob) {
if job.Spec.ApplicationRef.APIGroup == nil {
t.Error("BackupJob ApplicationRef.APIGroup should be set (normalized)")
return
}
if *job.Spec.ApplicationRef.APIGroup != "apps.cozystack.io" {
t.Errorf("BackupJob ApplicationRef.APIGroup = %v, want apps.cozystack.io", *job.Spec.ApplicationRef.APIGroup)
}
},
},
{
name: "preserves explicit ApplicationRef apiGroup",
plan: &backupsv1alpha1.Plan{
ObjectMeta: metav1.ObjectMeta{
Name: "test-plan",
Namespace: "default",
},
Spec: backupsv1alpha1.PlanSpec{
ApplicationRef: corev1.TypedLocalObjectReference{
APIGroup: stringPtr("custom.api.group.io"),
Kind: "CustomApp",
Name: "custom1",
},
BackupClassName: "velero",
Schedule: backupsv1alpha1.PlanSchedule{
Type: backupsv1alpha1.PlanScheduleTypeCron,
Cron: "0 2 * * *",
},
},
},
scheduled: time.Date(2024, 1, 1, 2, 0, 0, 0, time.UTC),
validate: func(t *testing.T, job *backupsv1alpha1.BackupJob) {
if job.Spec.ApplicationRef.APIGroup == nil {
t.Error("BackupJob ApplicationRef.APIGroup should be preserved")
return
}
if *job.Spec.ApplicationRef.APIGroup != "custom.api.group.io" {
t.Errorf("BackupJob ApplicationRef.APIGroup = %v, want custom.api.group.io", *job.Spec.ApplicationRef.APIGroup)
}
},
},
{
name: "generates unique job name based on timestamp",
plan: &backupsv1alpha1.Plan{
ObjectMeta: metav1.ObjectMeta{
Name: "test-plan",
Namespace: "default",
},
Spec: backupsv1alpha1.PlanSpec{
ApplicationRef: corev1.TypedLocalObjectReference{
Kind: "VirtualMachine",
Name: "vm1",
},
BackupClassName: "velero",
Schedule: backupsv1alpha1.PlanSchedule{
Type: backupsv1alpha1.PlanScheduleTypeCron,
Cron: "0 2 * * *",
},
},
},
scheduled: time.Date(2024, 1, 1, 2, 0, 0, 0, time.UTC),
validate: func(t *testing.T, job *backupsv1alpha1.BackupJob) {
if job.Name == "" {
t.Error("BackupJob name should be generated")
}
// Name should start with plan name
if len(job.Name) < len("test-plan") {
t.Errorf("BackupJob name = %v, should start with test-plan", job.Name)
}
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
job := BackupJob(tt.plan, tt.scheduled)
if job == nil {
t.Fatal("BackupJob() returned nil")
}
tt.validate(t, job)
})
}
}
func stringPtr(s string) *string {
return &s
}

View File

@@ -9,13 +9,7 @@ import (
backupsv1alpha1 "github.com/cozystack/cozystack/api/backups/v1alpha1"
)
func (r *BackupJobReconciler) reconcileJob(ctx context.Context, j *backupsv1alpha1.BackupJob, resolved *ResolvedBackupConfig) (ctrl.Result, error) {
_ = log.FromContext(ctx)
_ = resolved // Use resolved BackupClass parameters when implementing your job strategy
return ctrl.Result{}, nil
}
func (r *RestoreJobReconciler) reconcileJobRestore(ctx context.Context, restoreJob *backupsv1alpha1.RestoreJob, backup *backupsv1alpha1.Backup) (ctrl.Result, error) {
func (r *BackupJobReconciler) reconcileJob(ctx context.Context, j *backupsv1alpha1.BackupJob) (ctrl.Result, error) {
_ = log.FromContext(ctx)
return ctrl.Result{}, nil
}

View File

@@ -20,8 +20,8 @@ import (
)
const (
minRequeueDelay = 30 * time.Second
startingDeadline = 300 * time.Second
minRequeueDelay = 30 * time.Second
startingDeadlineSeconds = 300 * time.Second
)
// PlanReconciler reconciles a Plan object
@@ -45,7 +45,7 @@ func (r *PlanReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.
return ctrl.Result{}, err
}
tCheck := time.Now().Add(-startingDeadline)
tCheck := time.Now().Add(-startingDeadlineSeconds)
sch, err := cron.ParseStandard(p.Spec.Schedule.Cron)
if err != nil {
errWrapped := fmt.Errorf("could not parse cron %s: %w", p.Spec.Schedule.Cron, err)
@@ -78,7 +78,7 @@ func (r *PlanReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.
tNext := sch.Next(tCheck)
if time.Now().Before(tNext) {
return ctrl.Result{RequeueAfter: time.Until(tNext)}, nil
return ctrl.Result{RequeueAfter: tNext.Sub(time.Now())}, nil
}
job := factory.BackupJob(p, tNext)
@@ -88,12 +88,12 @@ func (r *PlanReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.
if err := r.Create(ctx, job); err != nil {
if apierrors.IsAlreadyExists(err) {
return ctrl.Result{RequeueAfter: startingDeadline}, nil
return ctrl.Result{RequeueAfter: startingDeadlineSeconds}, nil
}
return ctrl.Result{}, err
}
return ctrl.Result{RequeueAfter: startingDeadline}, nil
return ctrl.Result{RequeueAfter: startingDeadlineSeconds}, nil
}
// SetupWithManager registers our controller with the Manager and sets up watches.

View File

@@ -1,140 +0,0 @@
package backupcontroller
import (
"context"
"fmt"
"net/http"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
"sigs.k8s.io/controller-runtime/pkg/log"
strategyv1alpha1 "github.com/cozystack/cozystack/api/backups/strategy/v1alpha1"
backupsv1alpha1 "github.com/cozystack/cozystack/api/backups/v1alpha1"
)
// RestoreJobReconciler reconciles RestoreJob objects.
// It routes RestoreJobs to strategy-specific handlers based on the strategy
// referenced in the Backup that the RestoreJob is restoring from.
type RestoreJobReconciler struct {
client.Client
dynamic.Interface
meta.RESTMapper
Scheme *runtime.Scheme
Recorder record.EventRecorder
}
func (r *RestoreJobReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logger := log.FromContext(ctx)
logger.Info("reconciling RestoreJob", "namespace", req.Namespace, "name", req.Name)
restoreJob := &backupsv1alpha1.RestoreJob{}
err := r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: req.Name}, restoreJob)
if err != nil {
if apierrors.IsNotFound(err) {
logger.V(1).Info("RestoreJob not found, skipping")
return ctrl.Result{}, nil
}
logger.Error(err, "failed to get RestoreJob")
return ctrl.Result{}, err
}
// If already completed, no need to reconcile
if restoreJob.Status.Phase == backupsv1alpha1.RestoreJobPhaseSucceeded ||
restoreJob.Status.Phase == backupsv1alpha1.RestoreJobPhaseFailed {
logger.V(1).Info("RestoreJob already completed, skipping", "phase", restoreJob.Status.Phase)
return ctrl.Result{}, nil
}
// Step 1: Fetch the referenced Backup
backup := &backupsv1alpha1.Backup{}
backupKey := types.NamespacedName{Namespace: req.Namespace, Name: restoreJob.Spec.BackupRef.Name}
if err := r.Get(ctx, backupKey, backup); err != nil {
return r.markRestoreJobFailed(ctx, restoreJob, fmt.Sprintf("failed to get Backup: %v", err))
}
// Step 2: Determine effective strategy from backup.spec.strategyRef
if backup.Spec.StrategyRef.APIGroup == nil {
return r.markRestoreJobFailed(ctx, restoreJob, "Backup has nil StrategyRef.APIGroup")
}
if *backup.Spec.StrategyRef.APIGroup != strategyv1alpha1.GroupVersion.Group {
return r.markRestoreJobFailed(ctx, restoreJob,
fmt.Sprintf("StrategyRef.APIGroup doesn't match: %s", *backup.Spec.StrategyRef.APIGroup))
}
logger.Info("processing RestoreJob", "restorejob", restoreJob.Name, "backup", backup.Name, "strategyKind", backup.Spec.StrategyRef.Kind)
switch backup.Spec.StrategyRef.Kind {
case strategyv1alpha1.JobStrategyKind:
return r.reconcileJobRestore(ctx, restoreJob, backup)
case strategyv1alpha1.VeleroStrategyKind:
return r.reconcileVeleroRestore(ctx, restoreJob, backup)
default:
return r.markRestoreJobFailed(ctx, restoreJob, fmt.Sprintf("StrategyRef.Kind not supported: %s", backup.Spec.StrategyRef.Kind))
}
}
// SetupWithManager registers our controller with the Manager and sets up watches.
func (r *RestoreJobReconciler) SetupWithManager(mgr ctrl.Manager) error {
cfg := mgr.GetConfig()
var err error
if r.Interface, err = dynamic.NewForConfig(cfg); err != nil {
return err
}
var h *http.Client
if h, err = rest.HTTPClientFor(cfg); err != nil {
return err
}
if r.RESTMapper, err = apiutil.NewDynamicRESTMapper(cfg, h); err != nil {
return err
}
return ctrl.NewControllerManagedBy(mgr).
For(&backupsv1alpha1.RestoreJob{}).
Complete(r)
}
// getTargetApplicationRef determines the effective target application reference.
// According to DESIGN.md, if spec.targetApplicationRef is omitted, drivers SHOULD
// restore into backup.spec.applicationRef.
// The returned reference is normalized to ensure APIGroup has a default value.
func (r *RestoreJobReconciler) getTargetApplicationRef(restoreJob *backupsv1alpha1.RestoreJob, backup *backupsv1alpha1.Backup) corev1.TypedLocalObjectReference {
if restoreJob.Spec.TargetApplicationRef != nil {
return backupsv1alpha1.NormalizeApplicationRef(*restoreJob.Spec.TargetApplicationRef)
}
return backup.Spec.ApplicationRef
}
// markRestoreJobFailed updates the RestoreJob status to Failed with the given message.
func (r *RestoreJobReconciler) markRestoreJobFailed(ctx context.Context, restoreJob *backupsv1alpha1.RestoreJob, message string) (ctrl.Result, error) {
logger := getLogger(ctx)
now := metav1.Now()
restoreJob.Status.CompletedAt = &now
restoreJob.Status.Phase = backupsv1alpha1.RestoreJobPhaseFailed
restoreJob.Status.Message = message
// Add condition
restoreJob.Status.Conditions = append(restoreJob.Status.Conditions, metav1.Condition{
Type: "Ready",
Status: metav1.ConditionFalse,
Reason: "RestoreFailed",
Message: message,
LastTransitionTime: now,
})
if err := r.Status().Update(ctx, restoreJob); err != nil {
logger.Error(err, "failed to update RestoreJob status to Failed")
return ctrl.Result{}, err
}
logger.Debug("RestoreJob failed", "message", message)
return ctrl.Result{}, nil
}

View File

@@ -2,13 +2,16 @@ package backupcontroller
import (
"context"
"encoding/json"
"fmt"
"reflect"
"time"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -46,22 +49,36 @@ type S3Credentials struct {
AccessSecretKey string
}
// bucketInfo represents the structure of BucketInfo stored in the secret
type bucketInfo struct {
Spec struct {
BucketName string `json:"bucketName"`
SecretS3 struct {
Endpoint string `json:"endpoint"`
Region string `json:"region"`
AccessKeyID string `json:"accessKeyID"`
AccessSecretKey string `json:"accessSecretKey"`
} `json:"secretS3"`
} `json:"spec"`
}
const (
defaultRequeueAfter = 5 * time.Second
defaultActiveJobPollingInterval = defaultRequeueAfter
defaultRestoreRequeueAfter = 5 * time.Second
defaultActiveRestorePollingInterval = defaultRestoreRequeueAfter
defaultRequeueAfter = 5 * time.Second
defaultActiveJobPollingInterval = defaultRequeueAfter
// Velero requires API objects and secrets to be in the cozy-velero namespace
veleroNamespace = "cozy-velero"
veleroBackupNameMetadataKey = "velero.io/backup-name"
veleroBackupNamespaceMetadataKey = "velero.io/backup-namespace"
veleroNamespace = "cozy-velero"
virtualMachinePrefix = "virtual-machine-"
)
func storageS3SecretName(namespace, backupJobName string) string {
return fmt.Sprintf("backup-%s-%s-s3-credentials", namespace, backupJobName)
}
func boolPtr(b bool) *bool {
return &b
}
func (r *BackupJobReconciler) reconcileVelero(ctx context.Context, j *backupsv1alpha1.BackupJob, resolved *ResolvedBackupConfig) (ctrl.Result, error) {
func (r *BackupJobReconciler) reconcileVelero(ctx context.Context, j *backupsv1alpha1.BackupJob) (ctrl.Result, error) {
logger := getLogger(ctx)
logger.Debug("reconciling Velero strategy", "backupjob", j.Name, "phase", j.Status.Phase)
@@ -88,13 +105,13 @@ func (r *BackupJobReconciler) reconcileVelero(ctx context.Context, j *backupsv1a
logger.Debug("BackupJob already started", "startedAt", j.Status.StartedAt, "phase", j.Status.Phase)
}
// Step 2: Resolve inputs - Read Strategy from resolved config
logger.Debug("fetching Velero strategy", "strategyName", resolved.StrategyRef.Name)
// Step 2: Resolve inputs - Read Strategy, Storage, Application, optionally Plan
logger.Debug("fetching Velero strategy", "strategyName", j.Spec.StrategyRef.Name)
veleroStrategy := &strategyv1alpha1.Velero{}
if err := r.Get(ctx, client.ObjectKey{Name: resolved.StrategyRef.Name}, veleroStrategy); err != nil {
if err := r.Get(ctx, client.ObjectKey{Name: j.Spec.StrategyRef.Name}, veleroStrategy); err != nil {
if errors.IsNotFound(err) {
logger.Error(err, "Velero strategy not found", "strategyName", resolved.StrategyRef.Name)
return r.markBackupJobFailed(ctx, j, fmt.Sprintf("Velero strategy not found: %s", resolved.StrategyRef.Name))
logger.Error(err, "Velero strategy not found", "strategyName", j.Spec.StrategyRef.Name)
return r.markBackupJobFailed(ctx, j, fmt.Sprintf("Velero strategy not found: %s", j.Spec.StrategyRef.Name))
}
logger.Error(err, "failed to get Velero strategy")
return ctrl.Result{}, err
@@ -103,6 +120,7 @@ func (r *BackupJobReconciler) reconcileVelero(ctx context.Context, j *backupsv1a
// Step 3: Execute backup logic
// Check if we already created a Velero Backup
// Use human-readable timestamp: YYYY-MM-DD-HH-MM-SS
if j.Status.StartedAt == nil {
logger.Error(nil, "StartedAt is nil after status update, this should not happen")
return ctrl.Result{RequeueAfter: defaultRequeueAfter}, nil
@@ -125,7 +143,7 @@ func (r *BackupJobReconciler) reconcileVelero(ctx context.Context, j *backupsv1a
if len(veleroBackupList.Items) == 0 {
// Create Velero Backup
logger.Debug("Velero Backup not found, creating new one")
if err := r.createVeleroBackup(ctx, j, veleroStrategy, resolved); err != nil {
if err := r.createVeleroBackup(ctx, j, veleroStrategy); err != nil {
logger.Error(err, "failed to create Velero Backup")
return r.markBackupJobFailed(ctx, j, fmt.Sprintf("failed to create Velero Backup: %v", err))
}
@@ -177,7 +195,7 @@ func (r *BackupJobReconciler) reconcileVelero(ctx context.Context, j *backupsv1a
if phase == "Completed" {
// Check if we already created the Backup resource
if j.Status.BackupRef == nil {
backup, err := r.createBackupResource(ctx, j, veleroBackup, resolved)
backup, err := r.createBackupResource(ctx, j, veleroBackup)
if err != nil {
return r.markBackupJobFailed(ctx, j, fmt.Sprintf("failed to create Backup resource: %v", err))
}
@@ -208,7 +226,293 @@ func (r *BackupJobReconciler) reconcileVelero(ctx context.Context, j *backupsv1a
return ctrl.Result{RequeueAfter: 5 * time.Second}, nil
}
func (r *BackupJobReconciler) createVeleroBackup(ctx context.Context, backupJob *backupsv1alpha1.BackupJob, strategy *strategyv1alpha1.Velero, resolved *ResolvedBackupConfig) error {
// resolveBucketStorageRef discovers S3 credentials from a Bucket storageRef
// It follows this flow:
// 1. Get the Bucket resource (apps.cozystack.io/v1alpha1)
// 2. Find the BucketAccess that references this bucket
// 3. Get the secret from BucketAccess.spec.credentialsSecretName
// 4. Decode BucketInfo from secret.data.BucketInfo and extract S3 credentials
func (r *BackupJobReconciler) resolveBucketStorageRef(ctx context.Context, storageRef corev1.TypedLocalObjectReference, namespace string) (*S3Credentials, error) {
logger := getLogger(ctx)
// Step 1: Get the Bucket resource
bucket := &unstructured.Unstructured{}
bucket.SetGroupVersionKind(schema.GroupVersionKind{
Group: *storageRef.APIGroup,
Version: "v1alpha1",
Kind: storageRef.Kind,
})
if *storageRef.APIGroup != "apps.cozystack.io" {
return nil, fmt.Errorf("Unsupported storage APIGroup: %v, expected apps.cozystack.io", storageRef.APIGroup)
}
bucketKey := client.ObjectKey{Namespace: namespace, Name: storageRef.Name}
if err := r.Get(ctx, bucketKey, bucket); err != nil {
return nil, fmt.Errorf("failed to get Bucket %s: %w", storageRef.Name, err)
}
// Step 2: Determine the bucket claim name
// For apps.cozystack.io Bucket, the BucketClaim name is typically the same as the Bucket name
// or follows a pattern. Based on the templates, it's usually the Release.Name which equals the Bucket name
bucketName := storageRef.Name
// Step 3: Get BucketAccess by name (assuming BucketAccess name matches bucketName)
bucketAccess := &unstructured.Unstructured{}
bucketAccess.SetGroupVersionKind(schema.GroupVersionKind{
Group: "objectstorage.k8s.io",
Version: "v1alpha1",
Kind: "BucketAccess",
})
bucketAccessKey := client.ObjectKey{Name: "bucket-" + bucketName, Namespace: namespace}
if err := r.Get(ctx, bucketAccessKey, bucketAccess); err != nil {
return nil, fmt.Errorf("failed to get BucketAccess %s in namespace %s: %w", bucketName, namespace, err)
}
// Step 4: Get the secret name from BucketAccess
secretName, found, err := unstructured.NestedString(bucketAccess.Object, "spec", "credentialsSecretName")
if err != nil {
return nil, fmt.Errorf("failed to get credentialsSecretName from BucketAccess: %w", err)
}
if !found || secretName == "" {
return nil, fmt.Errorf("credentialsSecretName not found in BucketAccess %s", bucketAccessKey.Name)
}
// Step 5: Get the secret
secret := &corev1.Secret{}
secretKey := client.ObjectKey{Namespace: namespace, Name: secretName}
if err := r.Get(ctx, secretKey, secret); err != nil {
return nil, fmt.Errorf("failed to get secret %s: %w", secretName, err)
}
// Step 6: Decode BucketInfo from secret.data.BucketInfo
bucketInfoData, found := secret.Data["BucketInfo"]
if !found {
return nil, fmt.Errorf("BucketInfo key not found in secret %s", secretName)
}
// Parse JSON value
var info bucketInfo
if err := json.Unmarshal(bucketInfoData, &info); err != nil {
return nil, fmt.Errorf("failed to unmarshal BucketInfo from secret %s: %w", secretName, err)
}
// Step 7: Extract and return S3 credentials
creds := &S3Credentials{
BucketName: info.Spec.BucketName,
Endpoint: info.Spec.SecretS3.Endpoint,
Region: info.Spec.SecretS3.Region,
AccessKeyID: info.Spec.SecretS3.AccessKeyID,
AccessSecretKey: info.Spec.SecretS3.AccessSecretKey,
}
logger.Debug("resolved S3 credentials from Bucket storageRef",
"bucket", storageRef.Name,
"bucketName", creds.BucketName,
"endpoint", creds.Endpoint)
return creds, nil
}
// createS3CredsForVelero creates or updates a Kubernetes Secret containing
// Velero S3 credentials in the format expected by Velero's cloud-credentials plugin.
func (r *BackupJobReconciler) createS3CredsForVelero(ctx context.Context, backupJob *backupsv1alpha1.BackupJob, creds *S3Credentials) error {
logger := getLogger(ctx)
secretName := storageS3SecretName(backupJob.Namespace, backupJob.Name)
secretNamespace := veleroNamespace
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: secretNamespace,
},
Type: corev1.SecretTypeOpaque,
StringData: map[string]string{
"cloud": fmt.Sprintf(`[default]
aws_access_key_id=%s
aws_secret_access_key=%s
services = seaweed-s3
[services seaweed-s3]
s3 =
endpoint_url = %s
`, creds.AccessKeyID, creds.AccessSecretKey, creds.Endpoint),
},
}
foundSecret := &corev1.Secret{}
secretKey := client.ObjectKey{Name: secretName, Namespace: secretNamespace}
err := r.Get(ctx, secretKey, foundSecret)
if err != nil && errors.IsNotFound(err) {
// Create the Secret
if err := r.Create(ctx, secret); err != nil {
r.Recorder.Event(backupJob, corev1.EventTypeWarning, "SecretCreationFailed",
fmt.Sprintf("Failed to create Velero credentials secret %s/%s: %v", secretNamespace, secretName, err))
return fmt.Errorf("failed to create Velero credentials secret: %w", err)
}
logger.Debug("created Velero credentials secret", "secret", secretName)
r.Recorder.Event(backupJob, corev1.EventTypeNormal, "SecretCreated",
fmt.Sprintf("Created Velero credentials secret %s/%s", secretNamespace, secretName))
} else if err == nil {
// Update if necessary - only update if the secret data has actually changed
// Compare the new secret data with existing secret data
existingData := foundSecret.Data
if existingData == nil {
existingData = make(map[string][]byte)
}
newData := make(map[string][]byte)
for k, v := range secret.StringData {
newData[k] = []byte(v)
}
// Check if data has changed
dataChanged := false
if len(existingData) != len(newData) {
dataChanged = true
} else {
for k, newVal := range newData {
existingVal, exists := existingData[k]
if !exists || !reflect.DeepEqual(existingVal, newVal) {
dataChanged = true
break
}
}
}
if dataChanged {
foundSecret.StringData = secret.StringData
foundSecret.Data = nil // Clear .Data so .StringData will be used
if err := r.Update(ctx, foundSecret); err != nil {
r.Recorder.Event(backupJob, corev1.EventTypeWarning, "SecretUpdateFailed",
fmt.Sprintf("Failed to update Velero credentials secret %s/%s: %v", secretNamespace, secretName, err))
return fmt.Errorf("failed to update Velero credentials secret: %w", err)
}
logger.Debug("updated Velero credentials secret", "secret", secretName)
r.Recorder.Event(backupJob, corev1.EventTypeNormal, "SecretUpdated",
fmt.Sprintf("Updated Velero credentials secret %s/%s", secretNamespace, secretName))
} else {
logger.Debug("Velero credentials secret data unchanged, skipping update", "secret", secretName)
}
} else if err != nil {
return fmt.Errorf("error checking for existing Velero credentials secret: %w", err)
}
return nil
}
// createBackupStorageLocation creates or updates a Velero BackupStorageLocation resource.
func (r *BackupJobReconciler) createBackupStorageLocation(ctx context.Context, bsl *velerov1.BackupStorageLocation) error {
logger := getLogger(ctx)
foundBSL := &velerov1.BackupStorageLocation{}
bslKey := client.ObjectKey{Name: bsl.Name, Namespace: bsl.Namespace}
err := r.Get(ctx, bslKey, foundBSL)
if err != nil && errors.IsNotFound(err) {
// Create the BackupStorageLocation
if err := r.Create(ctx, bsl); err != nil {
return fmt.Errorf("failed to create BackupStorageLocation: %w", err)
}
logger.Debug("created BackupStorageLocation", "name", bsl.Name, "namespace", bsl.Namespace)
} else if err == nil {
// Update if necessary - use patch to avoid conflicts with Velero's status updates
// Only update if the spec has actually changed
if !reflect.DeepEqual(foundBSL.Spec, bsl.Spec) {
// Retry on conflict since Velero may be updating status concurrently
for i := 0; i < 3; i++ {
if err := r.Get(ctx, bslKey, foundBSL); err != nil {
return fmt.Errorf("failed to get BackupStorageLocation for update: %w", err)
}
foundBSL.Spec = bsl.Spec
if err := r.Update(ctx, foundBSL); err != nil {
if errors.IsConflict(err) && i < 2 {
logger.Debug("conflict updating BackupStorageLocation, retrying", "attempt", i+1)
time.Sleep(100 * time.Millisecond)
continue
}
return fmt.Errorf("failed to update BackupStorageLocation: %w", err)
}
logger.Debug("updated BackupStorageLocation", "name", bsl.Name, "namespace", bsl.Namespace)
return nil
}
} else {
logger.Debug("BackupStorageLocation spec unchanged, skipping update", "name", bsl.Name, "namespace", bsl.Namespace)
}
} else if err != nil {
return fmt.Errorf("error checking for existing BackupStorageLocation: %w", err)
}
return nil
}
// createVolumeSnapshotLocation creates or updates a Velero VolumeSnapshotLocation resource.
func (r *BackupJobReconciler) createVolumeSnapshotLocation(ctx context.Context, vsl *velerov1.VolumeSnapshotLocation) error {
logger := getLogger(ctx)
foundVSL := &velerov1.VolumeSnapshotLocation{}
vslKey := client.ObjectKey{Name: vsl.Name, Namespace: vsl.Namespace}
err := r.Get(ctx, vslKey, foundVSL)
if err != nil && errors.IsNotFound(err) {
// Create the VolumeSnapshotLocation
if err := r.Create(ctx, vsl); err != nil {
return fmt.Errorf("failed to create VolumeSnapshotLocation: %w", err)
}
logger.Debug("created VolumeSnapshotLocation", "name", vsl.Name, "namespace", vsl.Namespace)
} else if err == nil {
// Update if necessary - only update if the spec has actually changed
if !reflect.DeepEqual(foundVSL.Spec, vsl.Spec) {
// Retry on conflict since Velero may be updating status concurrently
for i := 0; i < 3; i++ {
if err := r.Get(ctx, vslKey, foundVSL); err != nil {
return fmt.Errorf("failed to get VolumeSnapshotLocation for update: %w", err)
}
foundVSL.Spec = vsl.Spec
if err := r.Update(ctx, foundVSL); err != nil {
if errors.IsConflict(err) && i < 2 {
logger.Debug("conflict updating VolumeSnapshotLocation, retrying", "attempt", i+1)
time.Sleep(100 * time.Millisecond)
continue
}
return fmt.Errorf("failed to update VolumeSnapshotLocation: %w", err)
}
logger.Debug("updated VolumeSnapshotLocation", "name", vsl.Name, "namespace", vsl.Namespace)
return nil
}
} else {
logger.Debug("VolumeSnapshotLocation spec unchanged, skipping update", "name", vsl.Name, "namespace", vsl.Namespace)
}
} else if err != nil {
return fmt.Errorf("error checking for existing VolumeSnapshotLocation: %w", err)
}
return nil
}
func (r *BackupJobReconciler) markBackupJobFailed(ctx context.Context, backupJob *backupsv1alpha1.BackupJob, message string) (ctrl.Result, error) {
logger := getLogger(ctx)
now := metav1.Now()
backupJob.Status.CompletedAt = &now
backupJob.Status.Phase = backupsv1alpha1.BackupJobPhaseFailed
backupJob.Status.Message = message
// Add condition
backupJob.Status.Conditions = append(backupJob.Status.Conditions, metav1.Condition{
Type: "Ready",
Status: metav1.ConditionFalse,
Reason: "BackupFailed",
Message: message,
LastTransitionTime: now,
})
if err := r.Status().Update(ctx, backupJob); err != nil {
logger.Error(err, "failed to update BackupJob status to Failed")
return ctrl.Result{}, err
}
logger.Debug("BackupJob failed", "message", message)
return ctrl.Result{}, nil
}
func (r *BackupJobReconciler) createVeleroBackup(ctx context.Context, backupJob *backupsv1alpha1.BackupJob, strategy *strategyv1alpha1.Velero) error {
logger := getLogger(ctx)
logger.Debug("createVeleroBackup called", "strategy", strategy.Name)
@@ -225,12 +529,7 @@ func (r *BackupJobReconciler) createVeleroBackup(ctx context.Context, backupJob
return err
}
templateContext := map[string]interface{}{
"Application": app.Object,
"Parameters": resolved.Parameters,
}
veleroBackupSpec, err := template.Template(&strategy.Spec.Template.Spec, templateContext)
veleroBackupSpec, err := template.Template(&strategy.Spec.Template.Spec, app.Object)
if err != nil {
return err
}
@@ -262,8 +561,13 @@ func (r *BackupJobReconciler) createVeleroBackup(ctx context.Context, backupJob
return nil
}
func (r *BackupJobReconciler) createBackupResource(ctx context.Context, backupJob *backupsv1alpha1.BackupJob, veleroBackup *velerov1.Backup, resolved *ResolvedBackupConfig) (*backupsv1alpha1.Backup, error) {
func (r *BackupJobReconciler) createBackupResource(ctx context.Context, backupJob *backupsv1alpha1.BackupJob, veleroBackup *velerov1.Backup) (*backupsv1alpha1.Backup, error) {
logger := getLogger(ctx)
// Extract artifact information from Velero Backup
// Create a basic artifact referencing the Velero backup
artifact := &backupsv1alpha1.BackupArtifact{
URI: fmt.Sprintf("velero://%s/%s", backupJob.Namespace, veleroBackup.Name),
}
// Get takenAt from Velero Backup creation timestamp or status
takenAt := metav1.Now()
@@ -275,18 +579,13 @@ func (r *BackupJobReconciler) createBackupResource(ctx context.Context, backupJo
// Extract driver metadata (e.g., Velero backup name)
driverMetadata := map[string]string{
veleroBackupNameMetadataKey: veleroBackup.Name,
veleroBackupNamespaceMetadataKey: veleroBackup.Namespace,
}
// Create a basic artifact referencing the Velero backup
artifact := &backupsv1alpha1.BackupArtifact{
URI: fmt.Sprintf("velero://%s/%s", veleroBackup.Namespace, veleroBackup.Name),
"velero.io/backup-name": veleroBackup.Name,
"velero.io/backup-namespace": veleroBackup.Namespace,
}
backup := &backupsv1alpha1.Backup{
ObjectMeta: metav1.ObjectMeta{
Name: backupJob.Name,
Name: fmt.Sprintf("%s", backupJob.Name),
Namespace: backupJob.Namespace,
OwnerReferences: []metav1.OwnerReference{
{
@@ -300,13 +599,13 @@ func (r *BackupJobReconciler) createBackupResource(ctx context.Context, backupJo
},
Spec: backupsv1alpha1.BackupSpec{
ApplicationRef: backupJob.Spec.ApplicationRef,
StrategyRef: resolved.StrategyRef,
StorageRef: backupJob.Spec.StorageRef,
StrategyRef: backupJob.Spec.StrategyRef,
TakenAt: takenAt,
DriverMetadata: driverMetadata,
},
Status: backupsv1alpha1.BackupStatus{
Phase: backupsv1alpha1.BackupPhaseReady,
Artifact: artifact,
Phase: backupsv1alpha1.BackupPhaseReady,
},
}
@@ -314,6 +613,10 @@ func (r *BackupJobReconciler) createBackupResource(ctx context.Context, backupJo
backup.Spec.PlanRef = backupJob.Spec.PlanRef
}
if artifact != nil {
backup.Status.Artifact = artifact
}
if err := r.Create(ctx, backup); err != nil {
logger.Error(err, "failed to create Backup resource")
return nil, err
@@ -322,178 +625,3 @@ func (r *BackupJobReconciler) createBackupResource(ctx context.Context, backupJo
logger.Debug("created Backup resource", "name", backup.Name)
return backup, nil
}
// reconcileVeleroRestore handles restore operations for Velero strategy.
func (r *RestoreJobReconciler) reconcileVeleroRestore(ctx context.Context, restoreJob *backupsv1alpha1.RestoreJob, backup *backupsv1alpha1.Backup) (ctrl.Result, error) {
logger := getLogger(ctx)
logger.Debug("reconciling Velero strategy restore", "restorejob", restoreJob.Name, "backup", backup.Name)
// Step 1: On first reconcile, set startedAt and phase = Running
if restoreJob.Status.StartedAt == nil {
logger.Debug("setting RestoreJob StartedAt and phase to Running")
now := metav1.Now()
restoreJob.Status.StartedAt = &now
restoreJob.Status.Phase = backupsv1alpha1.RestoreJobPhaseRunning
if err := r.Status().Update(ctx, restoreJob); err != nil {
logger.Error(err, "failed to update RestoreJob status")
return ctrl.Result{}, err
}
return ctrl.Result{RequeueAfter: defaultRestoreRequeueAfter}, nil
}
// Step 2: Resolve inputs - Read Strategy, Storage, target Application
logger.Debug("fetching Velero strategy", "strategyName", backup.Spec.StrategyRef.Name)
veleroStrategy := &strategyv1alpha1.Velero{}
if err := r.Get(ctx, client.ObjectKey{Name: backup.Spec.StrategyRef.Name}, veleroStrategy); err != nil {
if errors.IsNotFound(err) {
logger.Error(err, "Velero strategy not found", "strategyName", backup.Spec.StrategyRef.Name)
return r.markRestoreJobFailed(ctx, restoreJob, fmt.Sprintf("Velero strategy not found: %s", backup.Spec.StrategyRef.Name))
}
logger.Error(err, "failed to get Velero strategy")
return ctrl.Result{}, err
}
logger.Debug("fetched Velero strategy", "strategyName", veleroStrategy.Name)
// Get Velero backup name from Backup's driverMetadata
veleroBackupName, ok := backup.Spec.DriverMetadata[veleroBackupNameMetadataKey]
if !ok {
return r.markRestoreJobFailed(ctx, restoreJob, fmt.Sprintf("Backup missing Velero backup name in driverMetadata (key: %s)", veleroBackupNameMetadataKey))
}
// Step 3: Execute restore logic
// Check if we already created a Velero Restore
logger.Debug("checking for existing Velero Restore", "namespace", veleroNamespace)
veleroRestoreList := &velerov1.RestoreList{}
opts := []client.ListOption{
client.InNamespace(veleroNamespace),
client.MatchingLabels{
backupsv1alpha1.OwningJobNameLabel: restoreJob.Name,
backupsv1alpha1.OwningJobNamespaceLabel: restoreJob.Namespace,
},
}
if err := r.List(ctx, veleroRestoreList, opts...); err != nil {
logger.Error(err, "failed to get Velero Restore")
return ctrl.Result{}, err
}
if len(veleroRestoreList.Items) == 0 {
// Create Velero Restore
logger.Debug("Velero Restore not found, creating new one")
if err := r.createVeleroRestore(ctx, restoreJob, backup, veleroStrategy, veleroBackupName); err != nil {
logger.Error(err, "failed to create Velero Restore")
return r.markRestoreJobFailed(ctx, restoreJob, fmt.Sprintf("failed to create Velero Restore: %v", err))
}
logger.Debug("created Velero Restore, requeuing")
// Requeue to check status
return ctrl.Result{RequeueAfter: defaultRestoreRequeueAfter}, nil
}
if len(veleroRestoreList.Items) > 1 {
logger.Error(fmt.Errorf("too many Velero restores for RestoreJob"), "found more than one Velero Restore referencing a single RestoreJob as owner")
return r.markRestoreJobFailed(ctx, restoreJob, "found multiple Velero Restores for this RestoreJob")
}
veleroRestore := veleroRestoreList.Items[0].DeepCopy()
logger.Debug("found existing Velero Restore", "phase", veleroRestore.Status.Phase)
// Check Velero Restore status
phase := string(veleroRestore.Status.Phase)
if phase == "" {
// Still in progress, requeue
return ctrl.Result{RequeueAfter: defaultActiveRestorePollingInterval}, nil
}
// Step 4: On success
if phase == "Completed" {
now := metav1.Now()
restoreJob.Status.CompletedAt = &now
restoreJob.Status.Phase = backupsv1alpha1.RestoreJobPhaseSucceeded
if err := r.Status().Update(ctx, restoreJob); err != nil {
logger.Error(err, "failed to update RestoreJob status")
return ctrl.Result{}, err
}
logger.Debug("RestoreJob succeeded")
return ctrl.Result{}, nil
}
// Step 5: On failure
if phase == "Failed" || phase == "PartiallyFailed" {
message := fmt.Sprintf("Velero Restore failed with phase: %s", phase)
if veleroRestore.Status.FailureReason != "" {
message = fmt.Sprintf("%s: %s", message, veleroRestore.Status.FailureReason)
}
return r.markRestoreJobFailed(ctx, restoreJob, message)
}
// Still in progress (InProgress, New, etc.)
return ctrl.Result{RequeueAfter: defaultRestoreRequeueAfter}, nil
}
// createVeleroRestore creates a Velero Restore resource.
func (r *RestoreJobReconciler) createVeleroRestore(ctx context.Context, restoreJob *backupsv1alpha1.RestoreJob, backup *backupsv1alpha1.Backup, strategy *strategyv1alpha1.Velero, veleroBackupName string) error {
logger := getLogger(ctx)
logger.Debug("createVeleroRestore called", "strategy", strategy.Name, "veleroBackupName", veleroBackupName)
// Determine target application reference
targetAppRef := r.getTargetApplicationRef(restoreJob, backup)
// Get the target application object for templating
mapping, err := r.RESTMapping(schema.GroupKind{Group: *targetAppRef.APIGroup, Kind: targetAppRef.Kind})
if err != nil {
return fmt.Errorf("failed to get REST mapping for target application: %w", err)
}
ns := restoreJob.Namespace
if mapping.Scope.Name() != meta.RESTScopeNameNamespace {
ns = ""
}
app, err := r.Resource(mapping.Resource).Namespace(ns).Get(ctx, targetAppRef.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get target application: %w", err)
}
// Build template context
templateContext := map[string]interface{}{
"Application": app.Object,
// TODO: Parameters are not currently stored on Backup, so they're unavailable during restore.
// This is a design limitation that should be addressed by persisting Parameters on the Backup object.
"Parameters": map[string]string{},
}
// Template the restore spec from the strategy, or use defaults if not specified
var veleroRestoreSpec velerov1.RestoreSpec
if strategy.Spec.Template.RestoreSpec != nil {
templatedSpec, err := template.Template(strategy.Spec.Template.RestoreSpec, templateContext)
if err != nil {
return fmt.Errorf("failed to template Velero Restore spec: %w", err)
}
veleroRestoreSpec = *templatedSpec
}
// Set the backupName in the spec (required by Velero)
veleroRestoreSpec.BackupName = veleroBackupName
generateName := fmt.Sprintf("%s.%s-", restoreJob.Namespace, restoreJob.Name)
veleroRestore := &velerov1.Restore{
ObjectMeta: metav1.ObjectMeta{
GenerateName: generateName,
Namespace: veleroNamespace,
Labels: map[string]string{
backupsv1alpha1.OwningJobNameLabel: restoreJob.Name,
backupsv1alpha1.OwningJobNamespaceLabel: restoreJob.Namespace,
},
},
Spec: veleroRestoreSpec,
}
if err := r.Create(ctx, veleroRestore); err != nil {
logger.Error(err, "failed to create Velero Restore", "generateName", generateName)
r.Recorder.Event(restoreJob, corev1.EventTypeWarning, "VeleroRestoreCreationFailed",
fmt.Sprintf("Failed to create Velero Restore %s/%s: %v", veleroNamespace, generateName, err))
return err
}
logger.Debug("created Velero Restore", "name", veleroRestore.Name, "namespace", veleroRestore.Namespace)
r.Recorder.Event(restoreJob, corev1.EventTypeNormal, "VeleroRestoreCreated",
fmt.Sprintf("Created Velero Restore %s/%s", veleroNamespace, veleroRestore.Name))
return nil
}

View File

@@ -1,208 +0,0 @@
package backupcontroller
import (
"context"
"testing"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
dynamicfake "k8s.io/client-go/dynamic/fake"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client"
clientfake "sigs.k8s.io/controller-runtime/pkg/client/fake"
strategyv1alpha1 "github.com/cozystack/cozystack/api/backups/strategy/v1alpha1"
backupsv1alpha1 "github.com/cozystack/cozystack/api/backups/v1alpha1"
velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
)
// mockRESTMapper implements meta.RESTMapper for testing
type mockRESTMapper struct {
mapping *meta.RESTMapping
}
func (m *mockRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) {
return m.mapping, nil
}
func (m *mockRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) {
return []*meta.RESTMapping{m.mapping}, nil
}
func (m *mockRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) {
return m.mapping.GroupVersionKind, nil
}
func (m *mockRESTMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) {
return []schema.GroupVersionKind{m.mapping.GroupVersionKind}, nil
}
func (m *mockRESTMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) {
return m.mapping.Resource, nil
}
func (m *mockRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) {
return []schema.GroupVersionResource{m.mapping.Resource}, nil
}
func (m *mockRESTMapper) ResourceSingularizer(resource string) (singular string, err error) {
return resource, nil
}
func TestCreateVeleroBackup_TemplateContext(t *testing.T) {
// Setup scheme
testScheme := runtime.NewScheme()
_ = scheme.AddToScheme(testScheme)
_ = backupsv1alpha1.AddToScheme(testScheme)
_ = strategyv1alpha1.AddToScheme(testScheme)
_ = velerov1.AddToScheme(testScheme)
// Create test application (VirtualMachine-like object)
testApp := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "test-vm",
Namespace: "default",
Labels: map[string]string{
"apps.cozystack.io/application.Kind": "VirtualMachine",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "test-container",
Image: "test-image:latest",
},
},
},
}
// Create dynamic client with the test application
dynamicClient := dynamicfake.NewSimpleDynamicClient(testScheme, testApp)
// Create REST mapping
mapping := &meta.RESTMapping{
Resource: schema.GroupVersionResource{
Group: "",
Version: "v1",
Resource: "pods",
},
GroupVersionKind: schema.GroupVersionKind{
Group: "",
Version: "v1",
Kind: "Pod",
},
Scope: meta.RESTScopeNamespace,
}
// Create BackupJob
backupJob := &backupsv1alpha1.BackupJob{
ObjectMeta: metav1.ObjectMeta{
Name: "test-backup-job",
Namespace: "default",
},
Spec: backupsv1alpha1.BackupJobSpec{
ApplicationRef: corev1.TypedLocalObjectReference{
APIGroup: stringPtr(""),
Kind: "Pod",
Name: "test-vm",
},
BackupClassName: "velero",
},
}
// Create Velero strategy with template that uses Application and Parameters
veleroStrategy := &strategyv1alpha1.Velero{
ObjectMeta: metav1.ObjectMeta{
Name: "velero-strategy",
},
Spec: strategyv1alpha1.VeleroSpec{
Template: strategyv1alpha1.VeleroTemplate{
Spec: velerov1.BackupSpec{
// Use template variables to verify context is passed correctly
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "{{ .Application.metadata.name }}",
},
},
// Use Parameters in template
StorageLocation: "{{ .Parameters.backupStorageLocationName }}",
},
},
},
}
// Create ResolvedBackupConfig with parameters
resolved := &ResolvedBackupConfig{
StrategyRef: corev1.TypedLocalObjectReference{
APIGroup: stringPtr("strategy.backups.cozystack.io"),
Kind: "Velero",
Name: "velero-strategy",
},
Parameters: map[string]string{
"backupStorageLocationName": "default-storage",
},
}
// Create fake client for controller
fakeClient := clientfake.NewClientBuilder().
WithScheme(testScheme).
WithObjects(backupJob, veleroStrategy).
Build()
// Create reconciler with fake event recorder
reconciler := &BackupJobReconciler{
Client: fakeClient,
Interface: dynamicClient,
RESTMapper: &mockRESTMapper{mapping: mapping},
Scheme: testScheme,
Recorder: record.NewFakeRecorder(10),
}
// Create context with logger
ctx := context.Background()
// Call createVeleroBackup
err := reconciler.createVeleroBackup(ctx, backupJob, veleroStrategy, resolved)
if err != nil {
t.Fatalf("createVeleroBackup() error = %v", err)
}
// Verify that the template was executed correctly by checking the created Velero Backup
// The template should have replaced {{ .Application.metadata.name }} with "test-vm"
// and {{ .Parameters.backupStorageLocationName }} with "default-storage"
// Get the created Velero Backup
veleroBackups := &velerov1.BackupList{}
err = fakeClient.List(ctx, veleroBackups, client.InNamespace(veleroNamespace))
if err != nil {
t.Fatalf("Failed to list Velero Backups: %v", err)
}
if len(veleroBackups.Items) == 0 {
t.Fatal("Expected Velero Backup to be created, but none found")
}
veleroBackup := veleroBackups.Items[0]
// Verify template context was used correctly:
// 1. Application.metadata.name should be replaced with "test-vm"
if veleroBackup.Spec.LabelSelector == nil {
t.Fatal("Expected LabelSelector to be set by template")
}
if appLabel, ok := veleroBackup.Spec.LabelSelector.MatchLabels["app"]; ok {
if appLabel != "test-vm" {
t.Errorf("Template context Application.metadata.name not applied correctly. Expected 'test-vm', got '%s'", appLabel)
}
} else {
t.Error("Template context Application.metadata.name not found in label selector")
}
// 2. Parameters.backupStorageLocationName should be replaced with "default-storage"
if veleroBackup.Spec.StorageLocation != "default-storage" {
t.Errorf("Template context Parameters.backupStorageLocationName not applied correctly. Expected 'default-storage', got '%s'", veleroBackup.Spec.StorageLocation)
}
}

View File

@@ -1,188 +0,0 @@
package controller
import (
"context"
"fmt"
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
helmv2 "github.com/fluxcd/helm-controller/api/v2"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
)
// +kubebuilder:rbac:groups=cozystack.io,resources=applicationdefinitions,verbs=get;list;watch
// +kubebuilder:rbac:groups=helm.toolkit.fluxcd.io,resources=helmreleases,verbs=get;list;watch;update;patch
// ApplicationDefinitionHelmReconciler reconciles ApplicationDefinitions
// and updates related HelmReleases when an ApplicationDefinition changes.
// This controller does NOT watch HelmReleases to avoid mutual reconciliation storms
// with Flux's helm-controller.
type ApplicationDefinitionHelmReconciler struct {
client.Client
Scheme *runtime.Scheme
}
func (r *ApplicationDefinitionHelmReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logger := log.FromContext(ctx)
// Get the ApplicationDefinition that triggered this reconciliation
appDef := &cozyv1alpha1.ApplicationDefinition{}
if err := r.Get(ctx, req.NamespacedName, appDef); err != nil {
logger.Error(err, "failed to get ApplicationDefinition", "name", req.Name)
return ctrl.Result{}, client.IgnoreNotFound(err)
}
// Update HelmReleases related to this specific ApplicationDefinition
if err := r.updateHelmReleasesForAppDef(ctx, appDef); err != nil {
logger.Error(err, "failed to update HelmReleases for ApplicationDefinition", "appDef", appDef.Name)
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
func (r *ApplicationDefinitionHelmReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
Named("applicationdefinition-helm-reconciler").
For(&cozyv1alpha1.ApplicationDefinition{}).
Complete(r)
}
// updateHelmReleasesForAppDef updates all HelmReleases that match the application labels from ApplicationDefinition
func (r *ApplicationDefinitionHelmReconciler) updateHelmReleasesForAppDef(ctx context.Context, appDef *cozyv1alpha1.ApplicationDefinition) error {
logger := log.FromContext(ctx)
// Use application labels to find HelmReleases
// Labels: apps.cozystack.io/application.kind and apps.cozystack.io/application.group
applicationKind := appDef.Spec.Application.Kind
// Validate that applicationKind is non-empty
if applicationKind == "" {
logger.V(4).Info("Skipping HelmRelease update: Application.Kind is empty", "appDef", appDef.Name)
return nil
}
applicationGroup := "apps.cozystack.io" // All applications use this group
// Build label selector for HelmReleases
// Only reconcile HelmReleases with apps.cozystack.io/application.* labels
labelSelector := client.MatchingLabels{
"apps.cozystack.io/application.kind": applicationKind,
"apps.cozystack.io/application.group": applicationGroup,
}
// List all HelmReleases with matching labels
hrList := &helmv2.HelmReleaseList{}
if err := r.List(ctx, hrList, labelSelector); err != nil {
logger.Error(err, "failed to list HelmReleases", "kind", applicationKind, "group", applicationGroup)
return err
}
logger.V(4).Info("Found HelmReleases to update", "appDef", appDef.Name, "kind", applicationKind, "count", len(hrList.Items))
// Update each HelmRelease
for i := range hrList.Items {
hr := &hrList.Items[i]
if err := r.updateHelmReleaseChart(ctx, hr, appDef); err != nil {
logger.Error(err, "failed to update HelmRelease", "name", hr.Name, "namespace", hr.Namespace)
continue
}
}
return nil
}
// expectedValuesFrom returns the expected valuesFrom configuration for HelmReleases
func expectedValuesFrom() []helmv2.ValuesReference {
return []helmv2.ValuesReference{
{
Kind: "Secret",
Name: "cozystack-values",
},
}
}
// valuesFromEqual compares two ValuesReference slices
func valuesFromEqual(a, b []helmv2.ValuesReference) bool {
if len(a) != len(b) {
return false
}
for i := range a {
if a[i].Kind != b[i].Kind ||
a[i].Name != b[i].Name ||
a[i].ValuesKey != b[i].ValuesKey ||
a[i].TargetPath != b[i].TargetPath ||
a[i].Optional != b[i].Optional {
return false
}
}
return true
}
// updateHelmReleaseChart updates the chart and valuesFrom in HelmRelease based on ApplicationDefinition
func (r *ApplicationDefinitionHelmReconciler) updateHelmReleaseChart(ctx context.Context, hr *helmv2.HelmRelease, appDef *cozyv1alpha1.ApplicationDefinition) error {
logger := log.FromContext(ctx)
hrCopy := hr.DeepCopy()
updated := false
// Validate ChartRef configuration exists
if appDef.Spec.Release.ChartRef == nil ||
appDef.Spec.Release.ChartRef.Kind == "" ||
appDef.Spec.Release.ChartRef.Name == "" ||
appDef.Spec.Release.ChartRef.Namespace == "" {
logger.Error(fmt.Errorf("invalid ChartRef in ApplicationDefinition"), "Skipping HelmRelease chartRef update: ChartRef is nil or incomplete",
"appDef", appDef.Name)
return nil
}
// Use ChartRef directly from ApplicationDefinition
expectedChartRef := appDef.Spec.Release.ChartRef
// Check if chartRef needs to be updated
if hrCopy.Spec.ChartRef == nil {
hrCopy.Spec.ChartRef = expectedChartRef
// Clear the old chart field when switching to chartRef
hrCopy.Spec.Chart = nil
updated = true
} else if hrCopy.Spec.ChartRef.Kind != expectedChartRef.Kind ||
hrCopy.Spec.ChartRef.Name != expectedChartRef.Name ||
hrCopy.Spec.ChartRef.Namespace != expectedChartRef.Namespace {
hrCopy.Spec.ChartRef = expectedChartRef
updated = true
}
// Check and update valuesFrom configuration
expected := expectedValuesFrom()
if !valuesFromEqual(hrCopy.Spec.ValuesFrom, expected) {
logger.V(4).Info("Updating HelmRelease valuesFrom", "name", hr.Name, "namespace", hr.Namespace)
hrCopy.Spec.ValuesFrom = expected
updated = true
}
// Check and update labels from ApplicationDefinition
if len(appDef.Spec.Release.Labels) > 0 {
if hrCopy.Labels == nil {
hrCopy.Labels = make(map[string]string)
}
for key, value := range appDef.Spec.Release.Labels {
if hrCopy.Labels[key] != value {
logger.V(4).Info("Updating HelmRelease label", "name", hr.Name, "namespace", hr.Namespace, "label", key, "value", value)
hrCopy.Labels[key] = value
updated = true
}
}
}
if updated {
logger.V(4).Info("Updating HelmRelease", "name", hr.Name, "namespace", hr.Namespace)
if err := r.Update(ctx, hrCopy); err != nil {
return fmt.Errorf("failed to update HelmRelease: %w", err)
}
}
return nil
}

View File

@@ -23,7 +23,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
type ApplicationDefinitionReconciler struct {
type CozystackResourceDefinitionReconciler struct {
client.Client
Scheme *runtime.Scheme
@@ -32,23 +32,25 @@ type ApplicationDefinitionReconciler struct {
mu sync.Mutex
lastEvent time.Time
lastHandled time.Time
CozystackAPIKind string
}
func (r *ApplicationDefinitionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
func (r *CozystackResourceDefinitionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
// Only handle debounced restart logic
// HelmRelease reconciliation is handled by ApplicationDefinitionHelmReconciler
// HelmRelease reconciliation is handled by CozystackResourceDefinitionHelmReconciler
return r.debouncedRestart(ctx)
}
func (r *ApplicationDefinitionReconciler) SetupWithManager(mgr ctrl.Manager) error {
func (r *CozystackResourceDefinitionReconciler) SetupWithManager(mgr ctrl.Manager) error {
if r.Debounce == 0 {
r.Debounce = 5 * time.Second
}
return ctrl.NewControllerManagedBy(mgr).
Named("applicationdefinition-controller").
Named("cozystackresource-controller").
Watches(
&cozyv1alpha1.ApplicationDefinition{},
&cozyv1alpha1.CozystackResourceDefinition{},
handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request {
r.mu.Lock()
r.lastEvent = time.Now()
@@ -64,22 +66,22 @@ func (r *ApplicationDefinitionReconciler) SetupWithManager(mgr ctrl.Manager) err
Complete(r)
}
type appDefHashView struct {
Name string `json:"name"`
Spec cozyv1alpha1.ApplicationDefinitionSpec `json:"spec"`
type crdHashView struct {
Name string `json:"name"`
Spec cozyv1alpha1.CozystackResourceDefinitionSpec `json:"spec"`
}
func (r *ApplicationDefinitionReconciler) computeConfigHash(ctx context.Context) (string, error) {
list := &cozyv1alpha1.ApplicationDefinitionList{}
func (r *CozystackResourceDefinitionReconciler) computeConfigHash(ctx context.Context) (string, error) {
list := &cozyv1alpha1.CozystackResourceDefinitionList{}
if err := r.List(ctx, list); err != nil {
return "", err
}
slices.SortFunc(list.Items, sortAppDefs)
slices.SortFunc(list.Items, sortCozyRDs)
views := make([]appDefHashView, 0, len(list.Items))
views := make([]crdHashView, 0, len(list.Items))
for i := range list.Items {
views = append(views, appDefHashView{
views = append(views, crdHashView{
Name: list.Items[i].Name,
Spec: list.Items[i].Spec,
})
@@ -92,7 +94,7 @@ func (r *ApplicationDefinitionReconciler) computeConfigHash(ctx context.Context)
return hex.EncodeToString(sum[:]), nil
}
func (r *ApplicationDefinitionReconciler) debouncedRestart(ctx context.Context) (ctrl.Result, error) {
func (r *CozystackResourceDefinitionReconciler) debouncedRestart(ctx context.Context) (ctrl.Result, error) {
logger := log.FromContext(ctx)
r.mu.Lock()
@@ -130,7 +132,7 @@ func (r *ApplicationDefinitionReconciler) debouncedRestart(ctx context.Context)
r.mu.Lock()
r.lastHandled = le
r.mu.Unlock()
logger.Info("No changes in ApplicationDefinition config; skipping restart", "hash", newHash)
logger.Info("No changes in CRD config; skipping restart", "hash", newHash)
return ctrl.Result{}, nil
}
@@ -149,24 +151,34 @@ func (r *ApplicationDefinitionReconciler) debouncedRestart(ctx context.Context)
return ctrl.Result{}, nil
}
func (r *ApplicationDefinitionReconciler) getWorkload(
func (r *CozystackResourceDefinitionReconciler) getWorkload(
ctx context.Context,
key types.NamespacedName,
) (tpl *corev1.PodTemplateSpec, obj client.Object, patch client.Patch, err error) {
dep := &appsv1.Deployment{}
if err := r.Get(ctx, key, dep); err != nil {
return nil, nil, nil, err
if r.CozystackAPIKind == "Deployment" {
dep := &appsv1.Deployment{}
if err := r.Get(ctx, key, dep); err != nil {
return nil, nil, nil, err
}
obj = dep
tpl = &dep.Spec.Template
patch = client.MergeFrom(dep.DeepCopy())
} else {
ds := &appsv1.DaemonSet{}
if err := r.Get(ctx, key, ds); err != nil {
return nil, nil, nil, err
}
obj = ds
tpl = &ds.Spec.Template
patch = client.MergeFrom(ds.DeepCopy())
}
obj = dep
tpl = &dep.Spec.Template
patch = client.MergeFrom(dep.DeepCopy())
if tpl.Annotations == nil {
tpl.Annotations = make(map[string]string)
}
return tpl, obj, patch, nil
}
func sortAppDefs(a, b cozyv1alpha1.ApplicationDefinition) int {
func sortCozyRDs(a, b cozyv1alpha1.CozystackResourceDefinition) int {
if a.Name == b.Name {
return 0
}

View File

@@ -0,0 +1,166 @@
package controller
import (
"context"
"fmt"
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
helmv2 "github.com/fluxcd/helm-controller/api/v2"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
)
// +kubebuilder:rbac:groups=cozystack.io,resources=cozystackresourcedefinitions,verbs=get;list;watch
// +kubebuilder:rbac:groups=helm.toolkit.fluxcd.io,resources=helmreleases,verbs=get;list;watch;update;patch
// CozystackResourceDefinitionHelmReconciler reconciles CozystackResourceDefinitions
// and updates related HelmReleases when a CozyRD changes.
// This controller does NOT watch HelmReleases to avoid mutual reconciliation storms
// with Flux's helm-controller.
type CozystackResourceDefinitionHelmReconciler struct {
client.Client
Scheme *runtime.Scheme
}
func (r *CozystackResourceDefinitionHelmReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logger := log.FromContext(ctx)
// Get the CozystackResourceDefinition that triggered this reconciliation
crd := &cozyv1alpha1.CozystackResourceDefinition{}
if err := r.Get(ctx, req.NamespacedName, crd); err != nil {
logger.Error(err, "failed to get CozystackResourceDefinition", "name", req.Name)
return ctrl.Result{}, client.IgnoreNotFound(err)
}
// Update HelmReleases related to this specific CozyRD
if err := r.updateHelmReleasesForCRD(ctx, crd); err != nil {
logger.Error(err, "failed to update HelmReleases for CRD", "crd", crd.Name)
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
func (r *CozystackResourceDefinitionHelmReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
Named("cozystackresourcedefinition-helm-reconciler").
For(&cozyv1alpha1.CozystackResourceDefinition{}).
Complete(r)
}
// updateHelmReleasesForCRD updates all HelmReleases that match the application labels from CozystackResourceDefinition
func (r *CozystackResourceDefinitionHelmReconciler) updateHelmReleasesForCRD(ctx context.Context, crd *cozyv1alpha1.CozystackResourceDefinition) error {
logger := log.FromContext(ctx)
// Use application labels to find HelmReleases
// Labels: apps.cozystack.io/application.kind and apps.cozystack.io/application.group
applicationKind := crd.Spec.Application.Kind
// Validate that applicationKind is non-empty
if applicationKind == "" {
logger.V(4).Info("Skipping HelmRelease update: Application.Kind is empty", "crd", crd.Name)
return nil
}
applicationGroup := "apps.cozystack.io" // All applications use this group
// Build label selector for HelmReleases
// Only reconcile HelmReleases with cozystack.io/ui=true label
labelSelector := client.MatchingLabels{
"apps.cozystack.io/application.kind": applicationKind,
"apps.cozystack.io/application.group": applicationGroup,
"cozystack.io/ui": "true",
}
// List all HelmReleases with matching labels
hrList := &helmv2.HelmReleaseList{}
if err := r.List(ctx, hrList, labelSelector); err != nil {
logger.Error(err, "failed to list HelmReleases", "kind", applicationKind, "group", applicationGroup)
return err
}
logger.V(4).Info("Found HelmReleases to update", "crd", crd.Name, "kind", applicationKind, "count", len(hrList.Items))
// Update each HelmRelease
for i := range hrList.Items {
hr := &hrList.Items[i]
if err := r.updateHelmReleaseChart(ctx, hr, crd); err != nil {
logger.Error(err, "failed to update HelmRelease", "name", hr.Name, "namespace", hr.Namespace)
continue
}
}
return nil
}
// updateHelmReleaseChart updates the chart in HelmRelease based on CozystackResourceDefinition
func (r *CozystackResourceDefinitionHelmReconciler) updateHelmReleaseChart(ctx context.Context, hr *helmv2.HelmRelease, crd *cozyv1alpha1.CozystackResourceDefinition) error {
logger := log.FromContext(ctx)
hrCopy := hr.DeepCopy()
updated := false
// Validate Chart configuration exists
if crd.Spec.Release.Chart.Name == "" {
logger.V(4).Info("Skipping HelmRelease chart update: Chart.Name is empty", "crd", crd.Name)
return nil
}
// Validate SourceRef fields
if crd.Spec.Release.Chart.SourceRef.Kind == "" ||
crd.Spec.Release.Chart.SourceRef.Name == "" ||
crd.Spec.Release.Chart.SourceRef.Namespace == "" {
logger.Error(fmt.Errorf("invalid SourceRef in CRD"), "Skipping HelmRelease chart update: SourceRef fields are incomplete",
"crd", crd.Name,
"kind", crd.Spec.Release.Chart.SourceRef.Kind,
"name", crd.Spec.Release.Chart.SourceRef.Name,
"namespace", crd.Spec.Release.Chart.SourceRef.Namespace)
return nil
}
// Get version and reconcileStrategy from CRD or use defaults
version := ">= 0.0.0-0"
reconcileStrategy := "Revision"
// TODO: Add Version and ReconcileStrategy fields to CozystackResourceDefinitionChart if needed
// Build expected SourceRef
expectedSourceRef := helmv2.CrossNamespaceObjectReference{
Kind: crd.Spec.Release.Chart.SourceRef.Kind,
Name: crd.Spec.Release.Chart.SourceRef.Name,
Namespace: crd.Spec.Release.Chart.SourceRef.Namespace,
}
if hrCopy.Spec.Chart == nil {
// Need to create Chart spec
hrCopy.Spec.Chart = &helmv2.HelmChartTemplate{
Spec: helmv2.HelmChartTemplateSpec{
Chart: crd.Spec.Release.Chart.Name,
Version: version,
ReconcileStrategy: reconcileStrategy,
SourceRef: expectedSourceRef,
},
}
updated = true
} else {
// Update existing Chart spec
if hrCopy.Spec.Chart.Spec.Chart != crd.Spec.Release.Chart.Name ||
hrCopy.Spec.Chart.Spec.SourceRef != expectedSourceRef {
hrCopy.Spec.Chart.Spec.Chart = crd.Spec.Release.Chart.Name
hrCopy.Spec.Chart.Spec.SourceRef = expectedSourceRef
updated = true
}
}
if updated {
logger.V(4).Info("Updating HelmRelease chart", "name", hr.Name, "namespace", hr.Namespace)
if err := r.Update(ctx, hrCopy); err != nil {
return fmt.Errorf("failed to update HelmRelease: %w", err)
}
}
return nil
}

View File

@@ -1,355 +0,0 @@
# Dashboard Resource Integration Guide
This guide explains how to add a new Kubernetes resource to the Cozystack dashboard. The dashboard provides a unified interface for viewing and managing Kubernetes resources through custom table views, detail pages, and sidebar navigation.
## Overview
Adding a new resource to the dashboard requires three main components:
1. **CustomColumnsOverride**: Defines how the resource appears in list/table views
2. **Factory**: Defines the detail page layout for individual resource instances
3. **Sidebar Entry**: Adds navigation to the resource in the sidebar menu
## Prerequisites
- The resource must have a Kubernetes CustomResourceDefinition (CRD) or be a built-in Kubernetes resource
- Know the resource's API group, version, kind, and plural name
- Understand the resource's spec structure to display relevant fields
## Step-by-Step Guide
### Step 1: Add CustomColumnsOverride
The CustomColumnsOverride defines the columns shown in the resource list table and how clicking on a row navigates to the detail page.
**Location**: `internal/controller/dashboard/static_refactored.go` in `CreateAllCustomColumnsOverrides()`
**Example**:
```go
// Stock namespace backups cozystack io v1alpha1 plans
createCustomColumnsOverride("stock-namespace-/backups.cozystack.io/v1alpha1/plans", []any{
createCustomColumnWithJsonPath("Name", ".metadata.name", "Plan", "", "/openapi-ui/{2}/{reqsJsonPath[0]['.metadata.namespace']['-']}/factory/plan-details/{reqsJsonPath[0]['.metadata.name']['-']}"),
createApplicationRefColumn("Application"),
createTimestampColumn("Created", ".metadata.creationTimestamp"),
}),
```
**Key Components**:
- **ID Format**: `stock-namespace-/{group}/{version}/{plural}`
- **Name Column**: Use `createCustomColumnWithJsonPath()` with:
- Badge value: Resource kind in PascalCase (e.g., "Plan", "Service")
- Link href: `/openapi-ui/{2}/{namespace}/factory/{resource-details}/{name}`
- **Additional Columns**: Use helper functions like:
- `createStringColumn()`: Simple string values
- `createTimestampColumn()`: Timestamp fields
- `createReadyColumn()`: Ready status from conditions
- Custom helpers for complex fields
**Helper Functions Available**:
- `createCustomColumnWithJsonPath(name, jsonPath, badgeValue, badgeColor, linkHref)`: Column with badge and link
- `createStringColumn(name, jsonPath)`: Simple string column
- `createTimestampColumn(name, jsonPath)`: Timestamp with formatting
- `createReadyColumn()`: Ready status column
- `createBoolColumn(name, jsonPath)`: Boolean column
- `createArrayColumn(name, jsonPath)`: Array column
### Step 2: Add Factory (Detail Page)
The Factory defines the detail page layout when viewing an individual resource instance.
**Location**: `internal/controller/dashboard/static_refactored.go` in `CreateAllFactories()`
**Using Unified Factory Approach** (Recommended):
```go
// Resource details factory using unified approach
resourceConfig := UnifiedResourceConfig{
Name: "resource-details", // Must match the href in CustomColumnsOverride
ResourceType: "factory",
Kind: "ResourceKind", // PascalCase
Plural: "resources", // lowercase plural
Title: "resource", // lowercase singular
}
resourceTabs := []any{
map[string]any{
"key": "details",
"label": "Details",
"children": []any{
contentCard("details-card", map[string]any{
"marginBottom": "24px",
}, []any{
antdText("details-title", true, "Resource details", map[string]any{
"fontSize": 20,
"marginBottom": "12px",
}),
spacer("details-spacer", 16),
antdRow("details-grid", []any{48, 12}, []any{
antdCol("col-left", 12, []any{
antdFlexVertical("col-left-stack", 24, []any{
// Metadata fields: Name, Namespace, Created, etc.
}),
}),
antdCol("col-right", 12, []any{
antdFlexVertical("col-right-stack", 24, []any{
// Spec fields
}),
}),
}),
}),
},
},
}
resourceSpec := createUnifiedFactory(resourceConfig, resourceTabs, []any{"/api/clusters/{2}/k8s/apis/{group}/{version}/namespaces/{3}/{plural}/{6}"})
// Add to return list
return []*dashboardv1alpha1.Factory{
// ... other factories
createFactory("resource-details", resourceSpec),
}
```
**Key Components**:
- **Factory Key**: Must match the href path segment (e.g., `plan-details` matches `/factory/plan-details/...`)
- **API Endpoint**: `/api/clusters/{2}/k8s/apis/{group}/{version}/namespaces/{3}/{plural}/{6}`
- `{2}`: Cluster name
- `{3}`: Namespace
- `{6}`: Resource name
- **Sidebar Tags**: Automatically set to `["{lowercase-kind}-sidebar"]` by `createUnifiedFactory()`
- **Tabs**: Define the detail page content (Details, YAML, etc.)
**UI Helper Functions**:
- `contentCard(id, style, children)`: Container card
- `antdText(id, strong, text, style)`: Text element
- `antdFlexVertical(id, gap, children)`: Vertical flex container
- `antdRow(id, gutter, children)`: Row layout
- `antdCol(id, span, children)`: Column layout
- `parsedText(id, text, style)`: Text with JSON path parsing
- `parsedTextWithFormatter(id, text, formatter)`: Formatted text (e.g., timestamp)
- `spacer(id, space)`: Spacing element
**Displaying Fields**:
```go
antdFlexVertical("field-block", 4, []any{
antdText("field-label", true, "Field Label", nil),
parsedText("field-value", "{reqsJsonPath[0]['.spec.fieldName']['-']}", nil),
}),
```
### Step 3: Add Sidebar Entry
The sidebar entry adds navigation to the resource in the sidebar menu.
**Location**: `internal/controller/dashboard/sidebar.go` in `ensureSidebar()`
**3a. Add to keysAndTags** (around line 110-116):
```go
// Add sidebar for {group} {kind} resource
keysAndTags["{plural}"] = []any{"{lowercase-kind}-sidebar"}
```
**3b. Add Sidebar Section** (if creating a new section, around line 169):
```go
// Add hardcoded {SectionName} section
menuItems = append(menuItems, map[string]any{
"key": "{section-key}",
"label": "{SectionName}",
"children": []any{
map[string]any{
"key": "{plural}",
"label": "{ResourceLabel}",
"link": "/openapi-ui/{clusterName}/{namespace}/api-table/{group}/{version}/{plural}",
},
},
}),
```
**3c. Add Sidebar ID to targetIDs** (around line 220):
```go
"stock-project-factory-{lowercase-kind}-details",
```
**3d. Update Category Ordering** (if adding a new section):
- Update the comment around line 29 to include the new section
- Update `orderCategoryLabels()` function if needed
- Add skip condition in the category loop (around line 149)
**Important Notes**:
- The sidebar tag (`{lowercase-kind}-sidebar`) must match what the Factory uses
- The link format: `/openapi-ui/{clusterName}/{namespace}/api-table/{group}/{version}/{plural}`
- All sidebars share the same `keysAndTags` and `menuItems`, so changes affect all sidebar instances
### Step 4: Verify Integration
1. **Check Factory-Sidebar Connection**:
- Factory uses `sidebarTags: ["{lowercase-kind}-sidebar"]`
- Sidebar has `keysAndTags["{plural}"] = []any{"{lowercase-kind}-sidebar"}`
- Sidebar ID `stock-project-factory-{lowercase-kind}-details` exists in `targetIDs`
2. **Check Navigation Flow**:
- Sidebar link → List table (CustomColumnsOverride)
- List table Name column → Detail page (Factory)
- All paths use consistent naming
3. **Test**:
- Verify the resource appears in the sidebar
- Verify the list table displays correctly
- Verify clicking a resource navigates to the detail page
- Verify the detail page displays all relevant fields
## Common Patterns
### Displaying Object References
For fields that reference other resources (e.g., `applicationRef`, `storageRef`):
```go
// In CustomColumnsOverride
createApplicationRefColumn("Application"), // Uses helper function
// In Factory details tab
parsedText("application-ref-value",
"{reqsJsonPath[0]['.spec.applicationRef.kind']['-']}.{reqsJsonPath[0]['.spec.applicationRef.apiGroup']['-']}/{reqsJsonPath[0]['.spec.applicationRef.name']['-']}",
nil),
```
### Displaying Timestamps
```go
antdFlexVertical("created-block", 4, []any{
antdText("time-label", true, "Created", nil),
antdFlex("time-block", 6, []any{
map[string]any{
"type": "antdText",
"data": map[string]any{
"id": "time-icon",
"text": "🌐",
},
},
map[string]any{
"type": "parsedText",
"data": map[string]any{
"formatter": "timestamp",
"id": "time-value",
"text": "{reqsJsonPath[0]['.metadata.creationTimestamp']['-']}",
},
},
}),
}),
```
### Displaying Namespace with Link
```go
antdFlexVertical("meta-namespace-block", 8, []any{
antdText("meta-namespace-label", true, "Namespace", nil),
antdFlex("header-row", 6, []any{
// Badge component
map[string]any{
"type": "antdText",
"data": map[string]any{
"id": "header-badge",
"text": "NS",
"title": "namespace",
"style": map[string]any{
"backgroundColor": "#a25792ff",
// ... badge styles
},
},
},
// Link component
map[string]any{
"type": "antdLink",
"data": map[string]any{
"id": "namespace-link",
"text": "{reqsJsonPath[0]['.metadata.namespace']['-']}",
"href": "/openapi-ui/{2}/{reqsJsonPath[0]['.metadata.namespace']['-']}/factory/marketplace",
},
},
}),
}),
```
## File Reference
- **CustomColumnsOverride**: `internal/controller/dashboard/static_refactored.go``CreateAllCustomColumnsOverrides()`
- **Factory**: `internal/controller/dashboard/static_refactored.go``CreateAllFactories()`
- **Sidebar**: `internal/controller/dashboard/sidebar.go``ensureSidebar()`
- **Helper Functions**: `internal/controller/dashboard/static_helpers.go`
- **UI Helpers**: `internal/controller/dashboard/ui_helpers.go`
- **Unified Helpers**: `internal/controller/dashboard/unified_helpers.go`
## AI Agent Prompt Template
Use this template when asking an AI agent to add a new resource to the dashboard:
```
Please add support for the {ResourceKind} resource ({group}/{version}/{plural}) to the Cozystack dashboard.
Resource Details:
- API Group: {group}
- Version: {version}
- Kind: {ResourceKind}
- Plural: {plural}
- Namespaced: {true/false}
Requirements:
1. Add a CustomColumnsOverride in CreateAllCustomColumnsOverrides() with:
- ID: stock-namespace-/{group}/{version}/{plural}
- Name column with {ResourceKind} badge linking to /factory/{lowercase-kind}-details/{name}
- Additional columns: {list relevant columns}
2. Add a Factory in CreateAllFactories() with:
- Key: {lowercase-kind}-details
- API endpoint: /api/clusters/{2}/k8s/apis/{group}/{version}/namespaces/{3}/{plural}/{6}
- Details tab showing all spec fields:
{list spec fields to display}
- Use createUnifiedFactory() approach
3. Add sidebar entry in ensureSidebar():
- Add keysAndTags entry: keysAndTags["{plural}"] = []any{"{lowercase-kind}-sidebar"}
- Add sidebar section: {specify section name or "add to existing section"}
- Add to targetIDs: "stock-project-factory-{lowercase-kind}-details"
4. Ensure Factory sidebarTags matches the keysAndTags entry
Please follow the existing patterns in the codebase, particularly the Plan resource implementation as a reference.
```
## Example: Plan Resource
The Plan resource (`backups.cozystack.io/v1alpha1/plans`) serves as a complete reference implementation:
- **CustomColumnsOverride**: [Diff](https://github.com/cozystack/cozystack/compare/1f0b5ff9ac0d9d8896af46f8a19501c8b728671d..88da2d1f642b6cf03873d368dfdc675de23f1513#diff-8309b1db3362715b3d94a8b0beae7e95d3ccaf248d4f8702aaa12fba398da895R374-R380) in `static_refactored.go`
- **Factory**: [Diff](https://github.com/cozystack/cozystack/compare/1f0b5ff9ac0d9d8896af46f8a19501c8b728671d..88da2d1f642b6cf03873d368dfdc675de23f1513#diff-8309b1db3362715b3d94a8b0beae7e95d3ccaf248d4f8702aaa12fba398da895R1443-R1558) in `static_refactored.go`
- **Sidebar**: [Diff](https://github.com/cozystack/cozystack/compare/1f0b5ff9ac0d9d8896af46f8a19501c8b728671d..88da2d1f642b6cf03873d368dfdc675de23f1513#diff-be79027f7179e457a8f10e225bb921a197ffa390eb8f916d8d21379fadd54a56) in `sidebar.go`
- **Helper Function**: `createApplicationRefColumn()` in `static_helpers.go` ([diff](https://github.com/cozystack/cozystack/compare/1f0b5ff9ac0d9d8896af46f8a19501c8b728671d..88da2d1f642b6cf03873d368dfdc675de23f1513#diff-f17bcccc089cac3a8e965b13b9ab26e678d45bfc9a58d842399f218703e06a08R1026-R1046))
Review [this implementation](https://github.com/cozystack/cozystack/compare/1f0b5ff9ac0d9d8896af46f8a19501c8b728671d..88da2d1f642b6cf03873d368dfdc675de23f1513) for a complete working example.
## Troubleshooting
### Resource doesn't appear in sidebar
- Check that `keysAndTags["{plural}"]` is set correctly
- Verify the sidebar section is added to `menuItems`
- Ensure the sidebar ID is in `targetIDs`
### Clicking resource doesn't navigate to detail page
- Verify the CustomColumnsOverride href matches the Factory key
- Check that the Factory key is exactly `{lowercase-kind}-details`
- Ensure the Factory is added to the return list in `CreateAllFactories()`
### Detail page shows wrong sidebar
- Verify Factory `sidebarTags` matches `keysAndTags["{plural}"]`
- Check that the sidebar ID `stock-project-factory-{lowercase-kind}-details` exists
- Ensure all sidebars are updated (they share the same `keysAndTags`)
### Fields not displaying correctly
- Verify JSON paths are correct (use `.spec.fieldName` format)
- Check that `reqsJsonPath[0]` index is used for single resource views
- Ensure field names match the actual resource spec structure
## Additional Resources
- Kubernetes API Conventions: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md
- Dashboard API Types: `api/dashboard/v1alpha1/`
- Resource Types: `api/backups/v1alpha1/` (example)

View File

@@ -14,7 +14,7 @@ import (
)
// ensureBreadcrumb creates or updates a Breadcrumb resource for the given CRD
func (m *Manager) ensureBreadcrumb(ctx context.Context, crd *cozyv1alpha1.ApplicationDefinition) error {
func (m *Manager) ensureBreadcrumb(ctx context.Context, crd *cozyv1alpha1.CozystackResourceDefinition) error {
group, version, kind := pickGVK(crd)
lowerKind := strings.ToLower(kind)
@@ -33,12 +33,12 @@ func (m *Manager) ensureBreadcrumb(ctx context.Context, crd *cozyv1alpha1.Applic
key := plural // e.g., "virtualmachines"
label := labelPlural
link := fmt.Sprintf("/openapi-ui/{cluster}/{namespace}/api-table/%s/%s/%s", strings.ToLower(group), strings.ToLower(version), plural)
link := fmt.Sprintf("/openapi-ui/{clusterName}/{namespace}/api-table/%s/%s/%s", strings.ToLower(group), strings.ToLower(version), plural)
// If this is a module, change the first breadcrumb item to "Tenant Modules"
if crd.Spec.Dashboard != nil && crd.Spec.Dashboard.Module {
key = "tenantmodules"
label = "Tenant Modules"
link = "/openapi-ui/{cluster}/{namespace}/api-table/core.cozystack.io/v1alpha1/tenantmodules"
link = "/openapi-ui/{clusterName}/{namespace}/api-table/core.cozystack.io/v1alpha1/tenantmodules"
}
items := []any{

View File

@@ -21,7 +21,7 @@ import (
//
// metadata.name: stock-namespace-<group>.<version>.<plural>
// spec.id: stock-namespace-/<group>/<version>/<plural>
func (m *Manager) ensureCustomColumnsOverride(ctx context.Context, crd *cozyv1alpha1.ApplicationDefinition) (controllerutil.OperationResult, error) {
func (m *Manager) ensureCustomColumnsOverride(ctx context.Context, crd *cozyv1alpha1.CozystackResourceDefinition) (controllerutil.OperationResult, error) {
g, v, kind := pickGVK(crd)
plural := pickPlural(kind, crd)
// Details page segment uses lowercase kind, mirroring your example
@@ -34,6 +34,9 @@ func (m *Manager) ensureCustomColumnsOverride(ctx context.Context, crd *cozyv1al
obj.SetName(name)
href := fmt.Sprintf("/openapi-ui/{2}/{reqsJsonPath[0]['.metadata.namespace']['-']}/factory/%s/{reqsJsonPath[0]['.metadata.name']['-']}", detailsSegment)
if g == "apps.cozystack.io" && kind == "Tenant" && plural == "tenants" {
href = "/openapi-ui/{2}/{reqsJsonPath[0]['.status.namespace']['-']}/api-table/core.cozystack.io/v1alpha1/tenantmodules"
}
desired := map[string]any{
"spec": map[string]any{

View File

@@ -15,7 +15,7 @@ import (
)
// ensureCustomFormsOverride creates or updates a CustomFormsOverride resource for the given CRD
func (m *Manager) ensureCustomFormsOverride(ctx context.Context, crd *cozyv1alpha1.ApplicationDefinition) error {
func (m *Manager) ensureCustomFormsOverride(ctx context.Context, crd *cozyv1alpha1.CozystackResourceDefinition) error {
g, v, kind := pickGVK(crd)
plural := pickPlural(kind, crd)
@@ -84,53 +84,6 @@ func (m *Manager) ensureCustomFormsOverride(ctx context.Context, crd *cozyv1alph
return err
}
// ensureCFOMapping updates the CFOMapping resource to include a mapping for the given CRD
func (m *Manager) ensureCFOMapping(ctx context.Context, crd *cozyv1alpha1.ApplicationDefinition) error {
g, v, kind := pickGVK(crd)
plural := pickPlural(kind, crd)
resourcePath := fmt.Sprintf("/%s/%s/%s", g, v, plural)
customizationID := fmt.Sprintf("default-%s", resourcePath)
obj := &dashv1alpha1.CFOMapping{}
obj.SetName("cfomapping")
_, err := controllerutil.CreateOrUpdate(ctx, m.Client, obj, func() error {
// Parse existing mappings
mappings := make(map[string]string)
if obj.Spec.JSON.Raw != nil {
var spec map[string]any
if err := json.Unmarshal(obj.Spec.JSON.Raw, &spec); err == nil {
if m, ok := spec["mappings"].(map[string]any); ok {
for k, val := range m {
if s, ok := val.(string); ok {
mappings[k] = s
}
}
}
}
}
// Add/update the mapping for this CRD
mappings[resourcePath] = customizationID
specData := map[string]any{
"mappings": mappings,
}
b, err := json.Marshal(specData)
if err != nil {
return err
}
newSpec := dashv1alpha1.ArbitrarySpec{JSON: apiextv1.JSON{Raw: b}}
if !compareArbitrarySpecs(obj.Spec, newSpec) {
obj.Spec = newSpec
}
return nil
})
return err
}
// buildMultilineStringSchema parses OpenAPI schema and creates schema with multilineString
// for all string fields inside spec that don't have enum
func buildMultilineStringSchema(openAPISchema string) (map[string]any, error) {

View File

@@ -16,7 +16,7 @@ import (
)
// ensureCustomFormsPrefill creates or updates a CustomFormsPrefill resource for the given CRD
func (m *Manager) ensureCustomFormsPrefill(ctx context.Context, crd *cozyv1alpha1.ApplicationDefinition) (reconcile.Result, error) {
func (m *Manager) ensureCustomFormsPrefill(ctx context.Context, crd *cozyv1alpha1.CozystackResourceDefinition) (reconcile.Result, error) {
logger := log.FromContext(ctx)
app := crd.Spec.Application

View File

@@ -15,7 +15,7 @@ import (
)
// ensureFactory creates or updates a Factory resource for the given CRD
func (m *Manager) ensureFactory(ctx context.Context, crd *cozyv1alpha1.ApplicationDefinition) error {
func (m *Manager) ensureFactory(ctx context.Context, crd *cozyv1alpha1.CozystackResourceDefinition) error {
g, v, kind := pickGVK(crd)
plural := pickPlural(kind, crd)
@@ -47,7 +47,7 @@ func (m *Manager) ensureFactory(ctx context.Context, crd *cozyv1alpha1.Applicati
if prefix, ok := vncTabPrefix(kind); ok {
tabs = append(tabs, vncTab(prefix))
}
tabs = append(tabs, yamlTab(g, v, plural))
tabs = append(tabs, yamlTab(plural))
// Use unified factory creation
config := UnifiedResourceConfig{
@@ -160,11 +160,11 @@ func detailsTab(kind, endpoint, schemaJSON string, keysOrder [][]string) map[str
map[string]any{
"type": "EnrichedTable",
"data": map[string]any{
"id": "vpc-subnets-table",
"baseprefix": "/openapi-ui",
"cluster": "{2}",
"customizationId": "virtualprivatecloud-subnets",
"fetchUrl": "/api/clusters/{2}/k8s/api/v1/namespaces/{3}/configmaps",
"id": "vpc-subnets-table",
"baseprefix": "/openapi-ui",
"clusterNamePartOfUrl": "{2}",
"customizationId": "virtualprivatecloud-subnets",
"fetchUrl": "/api/clusters/{2}/k8s/api/v1/namespaces/{3}/configmaps",
"fieldSelector": map[string]any{
"metadata.name": "virtualprivatecloud-{6}-subnets",
},
@@ -174,31 +174,6 @@ func detailsTab(kind, endpoint, schemaJSON string, keysOrder [][]string) map[str
}),
)
}
if kind == "Tenant" {
leftColStack = append(leftColStack, antdFlexVertical("tenant-external-ip-count", 4, []any{
antdText("tenant-external-ip-count-label", true, "External IPs count", nil),
parsedText("tenant-external-ip-count-value", `{reqsJsonPath[0]['.status.externalIPsCount']['0']}`, nil),
}))
rightColStack = append(rightColStack,
antdFlexVertical("resource-quotas-block", 4, []any{
antdText("resource-quotas-label", true, "Resource Quotas", map[string]any{
"fontSize": float64(20),
"marginBottom": float64(12),
}),
map[string]any{
"type": "EnrichedTable",
"data": map[string]any{
"id": "resource-quotas-table",
"baseprefix": "/openapi-ui",
"cluster": "{2}",
"customizationId": "factory-resource-quotas",
"fetchUrl": "/api/clusters/{2}/k8s/api/v1/namespaces/{reqsJsonPath[0]['.status.namespace']}/resourcequotas",
"pathToItems": []any{`items`},
},
},
}),
)
}
return map[string]any{
"key": "details",
@@ -242,13 +217,13 @@ func detailsTab(kind, endpoint, schemaJSON string, keysOrder [][]string) map[str
map[string]any{
"type": "EnrichedTable",
"data": map[string]any{
"id": "conditions-table",
"fetchUrl": endpoint,
"cluster": "{2}",
"customizationId": "factory-status-conditions",
"baseprefix": "/openapi-ui",
"withoutControls": true,
"pathToItems": []any{"status", "conditions"},
"id": "conditions-table",
"fetchUrl": endpoint,
"clusterNamePartOfUrl": "{2}",
"customizationId": "factory-status-conditions",
"baseprefix": "/openapi-ui",
"withoutControls": true,
"pathToItems": []any{"status", "conditions"},
},
},
}),
@@ -264,12 +239,12 @@ func workloadsTab(kind string) map[string]any {
map[string]any{
"type": "EnrichedTable",
"data": map[string]any{
"id": "workloads-table",
"fetchUrl": "/api/clusters/{2}/k8s/apis/cozystack.io/v1alpha1/namespaces/{3}/workloadmonitors",
"cluster": "{2}",
"baseprefix": "/openapi-ui",
"customizationId": "factory-details-v1alpha1.cozystack.io.workloadmonitors",
"pathToItems": []any{"items"},
"id": "workloads-table",
"fetchUrl": "/api/clusters/{2}/k8s/apis/cozystack.io/v1alpha1/namespaces/{3}/workloadmonitors",
"clusterNamePartOfUrl": "{2}",
"baseprefix": "/openapi-ui",
"customizationId": "factory-details-v1alpha1.cozystack.io.workloadmonitors",
"pathToItems": []any{"items"},
"labelSelector": map[string]any{
"apps.cozystack.io/application.group": "apps.cozystack.io",
"apps.cozystack.io/application.kind": kind,
@@ -289,12 +264,12 @@ func servicesTab(kind string) map[string]any {
map[string]any{
"type": "EnrichedTable",
"data": map[string]any{
"id": "services-table",
"fetchUrl": "/api/clusters/{2}/k8s/api/v1/namespaces/{3}/services",
"cluster": "{2}",
"baseprefix": "/openapi-ui",
"customizationId": "factory-details-v1.services",
"pathToItems": []any{"items"},
"id": "services-table",
"fetchUrl": "/api/clusters/{2}/k8s/api/v1/namespaces/{3}/services",
"clusterNamePartOfUrl": "{2}",
"baseprefix": "/openapi-ui",
"customizationId": "factory-details-v1.services",
"pathToItems": []any{"items"},
"labelSelector": map[string]any{
"apps.cozystack.io/application.group": "apps.cozystack.io",
"apps.cozystack.io/application.kind": kind,
@@ -315,12 +290,12 @@ func ingressesTab(kind string) map[string]any {
map[string]any{
"type": "EnrichedTable",
"data": map[string]any{
"id": "ingresses-table",
"fetchUrl": "/api/clusters/{2}/k8s/apis/networking.k8s.io/v1/namespaces/{3}/ingresses",
"cluster": "{2}",
"baseprefix": "/openapi-ui",
"customizationId": "factory-details-networking.k8s.io.v1.ingresses",
"pathToItems": []any{"items"},
"id": "ingresses-table",
"fetchUrl": "/api/clusters/{2}/k8s/apis/networking.k8s.io/v1/namespaces/{3}/ingresses",
"clusterNamePartOfUrl": "{2}",
"baseprefix": "/openapi-ui",
"customizationId": "factory-details-networking.k8s.io.v1.ingresses",
"pathToItems": []any{"items"},
"labelSelector": map[string]any{
"apps.cozystack.io/application.group": "apps.cozystack.io",
"apps.cozystack.io/application.kind": kind,
@@ -341,12 +316,12 @@ func secretsTab(kind string) map[string]any {
map[string]any{
"type": "EnrichedTable",
"data": map[string]any{
"id": "secrets-table",
"fetchUrl": "/api/clusters/{2}/k8s/apis/core.cozystack.io/v1alpha1/namespaces/{3}/tenantsecrets",
"cluster": "{2}",
"baseprefix": "/openapi-ui",
"customizationId": "factory-details-v1alpha1.core.cozystack.io.tenantsecrets",
"pathToItems": []any{"items"},
"id": "secrets-table",
"fetchUrl": "/api/clusters/{2}/k8s/apis/core.cozystack.io/v1alpha1/namespaces/{3}/tenantsecrets",
"clusterNamePartOfUrl": "{2}",
"baseprefix": "/openapi-ui",
"customizationId": "factory-details-v1alpha1.core.cozystack.io.tenantsecrets",
"pathToItems": []any{"items"},
"labelSelector": map[string]any{
"apps.cozystack.io/application.group": "apps.cozystack.io",
"apps.cozystack.io/application.kind": kind,
@@ -358,7 +333,7 @@ func secretsTab(kind string) map[string]any {
}
}
func yamlTab(group, version, plural string) map[string]any {
func yamlTab(plural string) map[string]any {
return map[string]any{
"key": "yaml",
"label": "YAML",
@@ -369,10 +344,8 @@ func yamlTab(group, version, plural string) map[string]any {
"id": "yaml-editor",
"cluster": "{2}",
"isNameSpaced": true,
"type": "apis",
"apiGroup": group,
"apiVersion": version,
"plural": plural,
"type": "builtin",
"typeName": plural,
"prefillValuesRequestIndex": float64(0),
"readOnly": true,
"substractHeight": float64(400),
@@ -584,7 +557,7 @@ type factoryFlags struct {
// factoryFeatureFlags tries several conventional locations so you can evolve the API
// without breaking the controller. Defaults are false (hidden).
func factoryFeatureFlags(crd *cozyv1alpha1.ApplicationDefinition) factoryFlags {
func factoryFeatureFlags(crd *cozyv1alpha1.CozystackResourceDefinition) factoryFlags {
var f factoryFlags
f.Workloads = true

View File

@@ -23,7 +23,7 @@ type fieldInfo struct {
// pickGVK tries to read group/version/kind from the CRD. We prefer the "application" section,
// falling back to other likely fields if your schema differs.
func pickGVK(crd *cozyv1alpha1.ApplicationDefinition) (group, version, kind string) {
func pickGVK(crd *cozyv1alpha1.CozystackResourceDefinition) (group, version, kind string) {
// Best guess based on your examples:
if crd.Spec.Application.Kind != "" {
kind = crd.Spec.Application.Kind
@@ -41,7 +41,7 @@ func pickGVK(crd *cozyv1alpha1.ApplicationDefinition) (group, version, kind stri
}
// pickPlural prefers a field on the CRD if you have it; otherwise do a simple lowercase + "s".
func pickPlural(kind string, crd *cozyv1alpha1.ApplicationDefinition) string {
func pickPlural(kind string, crd *cozyv1alpha1.CozystackResourceDefinition) string {
// If you have crd.Spec.Application.Plural, prefer it. Example:
if crd.Spec.Application.Plural != "" {
return crd.Spec.Application.Plural

View File

@@ -41,7 +41,7 @@ func AddToScheme(s *runtime.Scheme) error {
}
// Manager owns logic for creating/updating dashboard resources derived from CRDs.
// Its easy to extend: add new ensure* methods and wire them into EnsureForAppDef.
// Its easy to extend: add new ensure* methods and wire them into EnsureForCRD.
type Manager struct {
client.Client
Scheme *runtime.Scheme
@@ -56,7 +56,7 @@ func NewManager(c client.Client, scheme *runtime.Scheme) *Manager {
func (m *Manager) SetupWithManager(mgr ctrl.Manager) error {
if err := ctrl.NewControllerManagedBy(mgr).
Named("dashboard-reconciler").
For(&cozyv1alpha1.ApplicationDefinition{}).
For(&cozyv1alpha1.CozystackResourceDefinition{}).
Complete(m); err != nil {
return err
}
@@ -72,7 +72,7 @@ func (m *Manager) SetupWithManager(mgr ctrl.Manager) error {
func (m *Manager) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
l := log.FromContext(ctx)
crd := &cozyv1alpha1.ApplicationDefinition{}
crd := &cozyv1alpha1.CozystackResourceDefinition{}
err := m.Get(ctx, types.NamespacedName{Name: req.Name}, crd)
if err != nil {
@@ -85,10 +85,10 @@ func (m *Manager) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result,
return ctrl.Result{}, err
}
return m.EnsureForAppDef(ctx, crd)
return m.EnsureForCRD(ctx, crd)
}
// EnsureForAppDef is the single entry-point used by the controller.
// EnsureForCRD is the single entry-point used by the controller.
// Add more ensure* calls here as you implement support for other resources:
//
// - ensureBreadcrumb (implemented)
@@ -99,7 +99,7 @@ func (m *Manager) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result,
// - ensureMarketplacePanel (implemented)
// - ensureSidebar (implemented)
// - ensureTableUriMapping (implemented)
func (m *Manager) EnsureForAppDef(ctx context.Context, crd *cozyv1alpha1.ApplicationDefinition) (reconcile.Result, error) {
func (m *Manager) EnsureForCRD(ctx context.Context, crd *cozyv1alpha1.CozystackResourceDefinition) (reconcile.Result, error) {
// Early return if crd.Spec.Dashboard is nil to prevent oscillation
if crd.Spec.Dashboard == nil {
return reconcile.Result{}, nil
@@ -132,10 +132,6 @@ func (m *Manager) EnsureForAppDef(ctx context.Context, crd *cozyv1alpha1.Applica
return reconcile.Result{}, err
}
if err := m.ensureCFOMapping(ctx, crd); err != nil {
return reconcile.Result{}, err
}
if err := m.ensureSidebar(ctx, crd); err != nil {
return reconcile.Result{}, err
}
@@ -143,10 +139,6 @@ func (m *Manager) EnsureForAppDef(ctx context.Context, crd *cozyv1alpha1.Applica
if err := m.ensureFactory(ctx, crd); err != nil {
return reconcile.Result{}, err
}
if err := m.ensureNavigation(ctx, crd); err != nil {
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
@@ -156,7 +148,7 @@ func (m *Manager) InitializeStaticResources(ctx context.Context) error {
}
// addDashboardLabels adds standard dashboard management labels to a resource
func (m *Manager) addDashboardLabels(obj client.Object, crd *cozyv1alpha1.ApplicationDefinition, resourceType string) {
func (m *Manager) addDashboardLabels(obj client.Object, crd *cozyv1alpha1.CozystackResourceDefinition, resourceType string) {
labels := obj.GetLabels()
if labels == nil {
labels = make(map[string]string)
@@ -205,7 +197,7 @@ func (m *Manager) getStaticResourceSelector() client.MatchingLabels {
// CleanupOrphanedResources removes dashboard resources that are no longer needed
// This should be called after cache warming to ensure all current resources are known
func (m *Manager) CleanupOrphanedResources(ctx context.Context) error {
var crdList cozyv1alpha1.ApplicationDefinitionList
var crdList cozyv1alpha1.CozystackResourceDefinitionList
if err := m.List(ctx, &crdList, &client.ListOptions{}); err != nil {
return err
}
@@ -236,7 +228,7 @@ func (m *Manager) CleanupOrphanedResources(ctx context.Context) error {
}
// buildExpectedResourceSet creates a map of expected resource names by type
func (m *Manager) buildExpectedResourceSet(crds []cozyv1alpha1.ApplicationDefinition) map[string]map[string]bool {
func (m *Manager) buildExpectedResourceSet(crds []cozyv1alpha1.CozystackResourceDefinition) map[string]map[string]bool {
expected := make(map[string]map[string]bool)
// Initialize maps for each resource type

View File

@@ -16,7 +16,7 @@ import (
)
// ensureMarketplacePanel creates or updates a MarketplacePanel resource for the given CRD
func (m *Manager) ensureMarketplacePanel(ctx context.Context, crd *cozyv1alpha1.ApplicationDefinition) (reconcile.Result, error) {
func (m *Manager) ensureMarketplacePanel(ctx context.Context, crd *cozyv1alpha1.CozystackResourceDefinition) (reconcile.Result, error) {
logger := log.FromContext(ctx)
mp := &dashv1alpha1.MarketplacePanel{}
@@ -74,7 +74,7 @@ func (m *Manager) ensureMarketplacePanel(ctx context.Context, crd *cozyv1alpha1.
"type": "nonCrd",
"apiGroup": "apps.cozystack.io",
"apiVersion": "v1alpha1",
"plural": app.Plural, // e.g., "buckets"
"typeName": app.Plural, // e.g., "buckets"
"disabled": false,
"hidden": false,
"tags": tags,

View File

@@ -1,69 +0,0 @@
package dashboard
import (
"context"
"encoding/json"
"fmt"
"strings"
dashv1alpha1 "github.com/cozystack/cozystack/api/dashboard/v1alpha1"
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
)
// ensureNavigation updates the Navigation resource to include a baseFactoriesMapping entry for the given CRD
func (m *Manager) ensureNavigation(ctx context.Context, crd *cozyv1alpha1.ApplicationDefinition) error {
g, v, kind := pickGVK(crd)
plural := pickPlural(kind, crd)
lowerKind := strings.ToLower(kind)
factoryKey := fmt.Sprintf("%s-details", lowerKind)
// All CRD resources are namespaced API resources
mappingKey := fmt.Sprintf("base-factory-namespaced-api-%s-%s-%s", g, v, plural)
obj := &dashv1alpha1.Navigation{}
obj.SetName("navigation")
_, err := controllerutil.CreateOrUpdate(ctx, m.Client, obj, func() error {
// Parse existing spec
spec := make(map[string]any)
if obj.Spec.JSON.Raw != nil {
if err := json.Unmarshal(obj.Spec.JSON.Raw, &spec); err != nil {
spec = make(map[string]any)
}
}
// Get or create baseFactoriesMapping
var mappings map[string]string
if existing, ok := spec["baseFactoriesMapping"].(map[string]any); ok {
mappings = make(map[string]string, len(existing))
for k, val := range existing {
if s, ok := val.(string); ok {
mappings[k] = s
}
}
} else {
mappings = make(map[string]string)
}
// Add/update the mapping for this CRD
mappings[mappingKey] = factoryKey
spec["baseFactoriesMapping"] = mappings
b, err := json.Marshal(spec)
if err != nil {
return err
}
newSpec := dashv1alpha1.ArbitrarySpec{JSON: apiextv1.JSON{Raw: b}}
if !compareArbitrarySpecs(obj.Spec, newSpec) {
obj.Spec = newSpec
}
return nil
})
return err
}

View File

@@ -22,18 +22,18 @@ import (
//
// Menu rules:
// - The first section is "Marketplace" with two hardcoded entries:
// - Marketplace (/openapi-ui/{cluster}/{namespace}/factory/marketplace)
// - Tenant Info (/openapi-ui/{cluster}/{namespace}/factory/info-details/info)
// - Marketplace (/openapi-ui/{clusterName}/{namespace}/factory/marketplace)
// - Tenant Info (/openapi-ui/{clusterName}/{namespace}/factory/info-details/info)
// - All other sections are built from CRDs where spec.dashboard != nil.
// - Categories are ordered strictly as:
// Marketplace, IaaS, PaaS, NaaS, <others A→Z>, Resources, Backups, Administration
// Marketplace, IaaS, PaaS, NaaS, <others A→Z>, Resources, Administration
// - Items within each category: sort by Weight (desc), then Label (A→Z).
func (m *Manager) ensureSidebar(ctx context.Context, crd *cozyv1alpha1.ApplicationDefinition) error {
func (m *Manager) ensureSidebar(ctx context.Context, crd *cozyv1alpha1.CozystackResourceDefinition) error {
// Build the full menu once.
// 1) Fetch all CRDs
var all []cozyv1alpha1.ApplicationDefinition
var crdList cozyv1alpha1.ApplicationDefinitionList
var all []cozyv1alpha1.CozystackResourceDefinition
var crdList cozyv1alpha1.CozystackResourceDefinitionList
if err := m.List(ctx, &crdList, &client.ListOptions{}); err != nil {
return err
}
@@ -91,7 +91,7 @@ func (m *Manager) ensureSidebar(ctx context.Context, crd *cozyv1alpha1.Applicati
// Weight (default 0)
weight := def.Spec.Dashboard.Weight
link := fmt.Sprintf("/openapi-ui/{cluster}/{namespace}/api-table/%s/%s/%s", g, v, plural)
link := fmt.Sprintf("/openapi-ui/{clusterName}/{namespace}/api-table/%s/%s/%s", g, v, plural)
categories[cat] = append(categories[cat], item{
Key: plural,
@@ -111,17 +111,6 @@ func (m *Manager) ensureSidebar(ctx context.Context, crd *cozyv1alpha1.Applicati
keysAndTags["services"] = []any{"service-sidebar"}
keysAndTags["secrets"] = []any{"secret-sidebar"}
keysAndTags["ingresses"] = []any{"ingress-sidebar"}
// Add sidebar for v1/services type loadbalancer
keysAndTags["loadbalancer-services"] = []any{"external-ips-sidebar"}
// Add sidebar for backups.cozystack.io Plan resource
keysAndTags["plans"] = []any{"plan-sidebar"}
// Add sidebar for backups.cozystack.io BackupJob resource
keysAndTags["backupjobs"] = []any{"backupjob-sidebar"}
// Add sidebar for backups.cozystack.io Backup resource
keysAndTags["backups"] = []any{"backup-sidebar"}
// 3) Sort items within each category by Weight (desc), then Label (A→Z)
for cat := range categories {
@@ -134,10 +123,10 @@ func (m *Manager) ensureSidebar(ctx context.Context, crd *cozyv1alpha1.Applicati
}
// 4) Order categories strictly:
// Marketplace (hardcoded), IaaS, PaaS, NaaS, <others A→Z>, Resources, Backups (hardcoded), Administration (hardcoded)
// Marketplace (hardcoded), IaaS, PaaS, NaaS, <others A→Z>, Resources, Administration
orderedCats := orderCategoryLabels(categories)
// 5) Build menuItems (hardcode "Marketplace"; then dynamic categories; then hardcode "Backups" and "Administration")
// 5) Build menuItems (hardcode "Marketplace"; then dynamic categories; then hardcode "Administration")
menuItems := []any{
map[string]any{
"key": "marketplace",
@@ -146,15 +135,15 @@ func (m *Manager) ensureSidebar(ctx context.Context, crd *cozyv1alpha1.Applicati
map[string]any{
"key": "marketplace",
"label": "Marketplace",
"link": "/openapi-ui/{cluster}/{namespace}/factory/marketplace",
"link": "/openapi-ui/{clusterName}/{namespace}/factory/marketplace",
},
},
},
}
for _, cat := range orderedCats {
// Skip "Marketplace", "Backups", and "Administration" here since they're hardcoded
if strings.EqualFold(cat, "Marketplace") || strings.EqualFold(cat, "Backups") || strings.EqualFold(cat, "Administration") {
// Skip "Marketplace" and "Administration" here since they're hardcoded
if strings.EqualFold(cat, "Marketplace") || strings.EqualFold(cat, "Administration") {
continue
}
children := []any{}
@@ -174,29 +163,6 @@ func (m *Manager) ensureSidebar(ctx context.Context, crd *cozyv1alpha1.Applicati
}
}
// Add hardcoded Backups section
menuItems = append(menuItems, map[string]any{
"key": "backups",
"label": "Backups",
"children": []any{
map[string]any{
"key": "plans",
"label": "Plans",
"link": "/openapi-ui/{clusterName}/{namespace}/api-table/backups.cozystack.io/v1alpha1/plans",
},
map[string]any{
"key": "backupjobs",
"label": "BackupJobs",
"link": "/openapi-ui/{clusterName}/{namespace}/api-table/backups.cozystack.io/v1alpha1/backupjobs",
},
map[string]any{
"key": "backups",
"label": "Backups",
"link": "/openapi-ui/{clusterName}/{namespace}/api-table/backups.cozystack.io/v1alpha1/backups",
},
},
})
// Add hardcoded Administration section
menuItems = append(menuItems, map[string]any{
"key": "administration",
@@ -205,22 +171,17 @@ func (m *Manager) ensureSidebar(ctx context.Context, crd *cozyv1alpha1.Applicati
map[string]any{
"key": "info",
"label": "Info",
"link": "/openapi-ui/{cluster}/{namespace}/factory/info-details/info",
"link": "/openapi-ui/{clusterName}/{namespace}/factory/info-details/info",
},
map[string]any{
"key": "modules",
"label": "Modules",
"link": "/openapi-ui/{cluster}/{namespace}/api-table/core.cozystack.io/v1alpha1/tenantmodules",
},
map[string]any{
"key": "loadbalancer-services",
"label": "External IPs",
"link": "/openapi-ui/{clusterName}/{namespace}/factory/external-ips",
"link": "/openapi-ui/{clusterName}/{namespace}/api-table/core.cozystack.io/v1alpha1/tenantmodules",
},
map[string]any{
"key": "tenants",
"label": "Tenants",
"link": "/openapi-ui/{cluster}/{namespace}/api-table/apps.cozystack.io/v1alpha1/tenants",
"link": "/openapi-ui/{clusterName}/{namespace}/api-table/apps.cozystack.io/v1alpha1/tenants",
},
},
})
@@ -240,10 +201,6 @@ func (m *Manager) ensureSidebar(ctx context.Context, crd *cozyv1alpha1.Applicati
"stock-project-factory-kube-service-details",
"stock-project-factory-kube-secret-details",
"stock-project-factory-kube-ingress-details",
"stock-project-factory-plan-details",
"stock-project-factory-backupjob-details",
"stock-project-factory-backup-details",
"stock-project-factory-external-ips",
"stock-project-api-form",
"stock-project-api-table",
"stock-project-builtin-form",
@@ -271,7 +228,7 @@ func (m *Manager) ensureSidebar(ctx context.Context, crd *cozyv1alpha1.Applicati
// upsertMultipleSidebars creates/updates several Sidebar resources with the same menu spec.
func (m *Manager) upsertMultipleSidebars(
ctx context.Context,
crd *cozyv1alpha1.ApplicationDefinition,
crd *cozyv1alpha1.CozystackResourceDefinition,
ids []string,
keysAndTags map[string]any,
menuItems []any,
@@ -378,7 +335,7 @@ func orderCategoryLabels[T any](cats map[string][]T) []string {
}
// safeCategory returns spec.dashboard.category or "Resources" if not set.
func safeCategory(def *cozyv1alpha1.ApplicationDefinition) string {
func safeCategory(def *cozyv1alpha1.CozystackResourceDefinition) string {
if def == nil || def.Spec.Dashboard == nil {
return "Resources"
}

Some files were not shown because too many files have changed in this diff Show More