Compare commits

..

1 Commits

Author SHA1 Message Date
Andrei Kvapil
fe70003c5e Add openshft-console
Signed-off-by: Andrei Kvapil <kvapss@gmail.com>
2024-12-18 15:41:49 +01:00
971 changed files with 27134 additions and 103697 deletions

2
.github/CODEOWNERS vendored
View File

@@ -1 +1 @@
* @kvaps @lllamnyp
* @kvaps

View File

@@ -1,12 +1,7 @@
name: Pre-Commit Checks
on:
push:
branches:
- main
pull_request:
paths-ignore:
- '**.md'
on: [push, pull_request]
jobs:
pre-commit:
runs-on: ubuntu-22.04

View File

@@ -1,96 +0,0 @@
name: Releasing PR
on:
pull_request:
types: [labeled, opened, synchronize, reopened, closed]
jobs:
verify:
name: Test Release
runs-on: [self-hosted]
permissions:
contents: read
packages: write
if: |
contains(github.event.pull_request.labels.*.name, 'ok-to-test') &&
contains(github.event.pull_request.labels.*.name, 'release') &&
github.event.action != 'closed'
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
fetch-tags: true
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
registry: ghcr.io
- name: Run tests
run: make test
finalize:
name: Finalize Release
runs-on: [self-hosted]
permissions:
contents: write
if: |
github.event.pull_request.merged == true &&
contains(github.event.pull_request.labels.*.name, 'release')
steps:
- name: Extract tag from branch name
id: get_tag
uses: actions/github-script@v7
with:
script: |
const branch = context.payload.pull_request.head.ref;
const match = branch.match(/^release-(\d+\.\d+\.\d+(?:[-\w\.]+)?)$/);
if (!match) {
core.setFailed(`Branch '${branch}' does not match expected format 'release-X.Y.Z[-suffix]'`);
} else {
const tag = match[1];
core.setOutput('tag', tag);
console.log(`✅ Extracted tag: ${tag}`);
}
- name: Checkout repo
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Create tag on merged commit
run: |
git tag ${{ steps.get_tag.outputs.tag }} ${{ github.sha }}
git push origin ${{ steps.get_tag.outputs.tag }}
- name: Publish draft release
uses: actions/github-script@v7
with:
script: |
const tag = '${{ steps.get_tag.outputs.tag }}';
const releases = await github.rest.repos.listReleases({
owner: context.repo.owner,
repo: context.repo.repo
});
const release = releases.data.find(r => r.tag_name === tag && r.draft);
if (!release) {
throw new Error(`Draft release with tag ${tag} not found`);
}
await github.rest.repos.updateRelease({
owner: context.repo.owner,
repo: context.repo.repo,
release_id: release.id,
draft: false
});
console.log(`✅ Published release for ${tag}`);

View File

@@ -1,39 +0,0 @@
name: Pull Request
on:
pull_request:
types: [labeled, opened, synchronize, reopened]
jobs:
e2e:
name: Build and Test
runs-on: [self-hosted]
permissions:
contents: read
packages: write
if: |
contains(github.event.pull_request.labels.*.name, 'ok-to-test') &&
!contains(github.event.pull_request.labels.*.name, 'release')
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
fetch-tags: true
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
registry: ghcr.io
- name: make build
run: |
make build
- name: make test
run: |
make test

View File

@@ -1,162 +0,0 @@
name: Versioned Tag
on:
push:
tags:
- 'v*.*.*'
jobs:
prepare-release:
name: Prepare Release
runs-on: [self-hosted]
permissions:
contents: write
packages: write
pull-requests: write
steps:
- name: Check if release already exists
id: check_release
uses: actions/github-script@v7
with:
script: |
const tag = context.ref.replace('refs/tags/', '');
const releases = await github.rest.repos.listReleases({
owner: context.repo.owner,
repo: context.repo.repo
});
const existing = releases.data.find(r => r.tag_name === tag && !r.draft);
if (existing) {
core.setOutput('skip', 'true');
} else {
core.setOutput('skip', 'false');
}
- name: Skip if release already exists
if: steps.check_release.outputs.skip == 'true'
run: echo "Release already exists, skipping workflow."
- name: Checkout code
if: steps.check_release.outputs.skip == 'false'
uses: actions/checkout@v4
with:
fetch-depth: 0
fetch-tags: true
- name: Login to GitHub Container Registry
if: steps.check_release.outputs.skip == 'false'
uses: docker/login-action@v3
with:
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
registry: ghcr.io
- name: Build
if: steps.check_release.outputs.skip == 'false'
run: make build
- name: Commit release artifacts
if: steps.check_release.outputs.skip == 'false'
env:
GIT_AUTHOR_NAME: ${{ github.actor }}
GIT_AUTHOR_EMAIL: ${{ github.actor }}@users.noreply.github.com
run: |
git config user.name "$GIT_AUTHOR_NAME"
git config user.email "$GIT_AUTHOR_EMAIL"
git add .
git commit -m "Prepare release ${GITHUB_REF#refs/tags/}" -s || echo "No changes to commit"
- name: Create release branch
if: steps.check_release.outputs.skip == 'false'
run: |
BRANCH_NAME="release-${GITHUB_REF#refs/tags/v}"
git branch -f "$BRANCH_NAME"
git push origin "$BRANCH_NAME" --force
- name: Create pull request if not exists
if: steps.check_release.outputs.skip == 'false'
uses: actions/github-script@v7
with:
script: |
const version = context.ref.replace('refs/tags/v', '');
const branch = `release-${version}`;
const base = 'main';
const prs = await github.rest.pulls.list({
owner: context.repo.owner,
repo: context.repo.repo,
head: `${context.repo.owner}:${branch}`,
base
});
if (prs.data.length === 0) {
const newPr = await github.rest.pulls.create({
owner: context.repo.owner,
repo: context.repo.repo,
head: branch,
base: base,
title: `Release v${version}`,
body:
`This PR prepares the release \`v${version}\`.\n` +
`(Please merge it before releasing draft)`,
draft: false
});
console.log(`Created pull request #${newPr.data.number} from ${branch} to ${base}`);
await github.rest.issues.addLabels({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: newPr.data.number,
labels: ['release']
});
} else {
console.log(`Pull request already exists from ${branch} to ${base}`);
}
- name: Create or reuse draft release
if: steps.check_release.outputs.skip == 'false'
id: create_release
uses: actions/github-script@v7
with:
script: |
const tag = context.ref.replace('refs/tags/', '');
const releases = await github.rest.repos.listReleases({
owner: context.repo.owner,
repo: context.repo.repo
});
let release = releases.data.find(r => r.tag_name === tag);
if (!release) {
release = await github.rest.repos.createRelease({
owner: context.repo.owner,
repo: context.repo.repo,
tag_name: tag,
name: `${tag}`,
draft: true,
prerelease: false
});
}
core.setOutput('upload_url', release.upload_url);
- name: Build assets
if: steps.check_release.outputs.skip == 'false'
run: make assets
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload assets
if: steps.check_release.outputs.skip == 'false'
run: make upload_assets VERSION=${GITHUB_REF#refs/tags/}
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Delete pushed tag
if: steps.check_release.outputs.skip == 'false'
run: |
git push --delete origin ${GITHUB_REF#refs/tags/}
- name: Run tests
run: make test

View File

@@ -13,8 +13,8 @@ but it means a lot to us.
To add your organization to this list, you can either:
- [open a pull request](https://github.com/cozystack/cozystack/pulls) to directly update this file, or
- [edit this file](https://github.com/cozystack/cozystack/blob/main/ADOPTERS.md) directly in GitHub
- [open a pull request](https://github.com/aenix-io/cozystack/pulls) to directly update this file, or
- [edit this file](https://github.com/aenix-io/cozystack/blob/main/ADOPTERS.md) directly in GitHub
Feel free to ask in the Slack chat if you any questions and/or require
assistance with updating this list.

View File

@@ -6,13 +6,13 @@ As you get started, you are in the best position to give us feedbacks on areas o
* Problems found while setting up the development environment
* Gaps in our documentation
* Bugs in our GitHub actions
* Bugs in our Github actions
First, though, it is important that you read the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
First, though, it is important that you read the [code of conduct](CODE_OF_CONDUCT.md).
The guidelines below are a starting point. We don't want to limit your
creativity, passion, and initiative. If you think there's a better way, please
feel free to bring it up in a GitHub discussion, or open a pull request. We're
feel free to bring it up in a Github discussion, or open a pull request. We're
certain there are always better ways to do things, we just need to start some
constructive dialogue!
@@ -23,9 +23,9 @@ We welcome many types of contributions including:
* New features
* Builds, CI/CD
* Bug fixes
* [Documentation](https://GitHub.com/cozystack/cozystack-website/tree/main)
* [Documentation](https://github.com/aenix-io/cozystack-website/tree/main)
* Issue Triage
* Answering questions on Slack or GitHub Discussions
* Answering questions on Slack or Github Discussions
* Web design
* Communications / Social Media / Blog Posts
* Events participation
@@ -34,7 +34,7 @@ We welcome many types of contributions including:
## Ask for Help
The best way to reach us with a question when contributing is to drop a line in
our [Telegram channel](https://t.me/cozystack), or start a new GitHub discussion.
our [Telegram channel](https://t.me/cozystack), or start a new Github discussion.
## Raising Issues

View File

@@ -1,91 +0,0 @@
# Cozystack Governance
This document defines the governance structure of the Cozystack community, outlining how members collaborate to achieve shared goals.
## Overview
**Cozystack**, a Cloud Native Computing Foundation (CNCF) project, is committed
to building an open, inclusive, productive, and self-governing open source
community focused on building a high-quality open source PaaS and framework for building clouds.
## Code Repositories
The following code repositories are governed by the Cozystack community and
maintained under the `cozystack` namespace:
* **[Cozystack](https://github.com/cozystack/cozystack):** Main Cozystack codebase
* **[website](https://github.com/cozystack/website):** Cozystack website and documentation sources
* **[Talm](https://github.com/cozystack/talm):** Tool for managing Talos Linux the GitOps way
* **[cozy-proxy](https://github.com/cozystack/cozy-proxy):** A simple kube-proxy addon for 1:1 NAT services in Kubernetes with NFT backend
* **[cozystack-telemetry-server](https://github.com/cozystack/cozystack-telemetry-server):** Cozystack telemetry
* **[talos-bootstrap](https://github.com/cozystack/talos-bootstrap):** An interactive Talos Linux installer
* **[talos-meta-tool](https://github.com/cozystack/talos-meta-tool):** Tool for writing network metadata into META partition
## Community Roles
* **Users:** Members that engage with the Cozystack community via any medium, including Slack, Telegram, GitHub, and mailing lists.
* **Contributors:** Members contributing to the projects by contributing and reviewing code, writing documentation,
responding to issues, participating in proposal discussions, and so on.
* **Directors:** Non-technical project leaders.
* **Maintainers**: Technical project leaders.
## Contributors
Cozystack is for everyone. Anyone can become a Cozystack contributor simply by
contributing to the project, whether through code, documentation, blog posts,
community management, or other means.
As with all Cozystack community members, contributors are expected to follow the
[Cozystack Code of Conduct](https://github.com/cozystack/cozystack/blob/main/CODE_OF_CONDUCT.md).
All contributions to Cozystack code, documentation, or other components in the
Cozystack GitHub organisation must follow the
[contributing guidelines](https://github.com/cozystack/cozystack/blob/main/CONTRIBUTING.md).
Whether these contributions are merged into the project is the prerogative of the maintainers.
## Directors
Directors are responsible for non-technical leadership functions within the project.
This includes representing Cozystack and its maintainers to the community, to the press,
and to the outside world; interfacing with CNCF and other governance entities;
and participating in project decision-making processes when appropriate.
Directors are elected by a majority vote of the maintainers.
## Maintainers
Maintainers have the right to merge code into the project.
Anyone can become a Cozystack maintainer (see "Becoming a maintainer" below).
### Expectations
Cozystack maintainers are expected to:
* Review pull requests, triage issues, and fix bugs in their areas of
expertise, ensuring that all changes go through the project's code review
and integration processes.
* Monitor cncf-cozystack-* emails, the Cozystack Slack channels in Kubernetes
and CNCF Slack workspaces, Telegram groups, and help out when possible.
* Rapidly respond to any time-sensitive security release processes.
* Attend Cozystack community meetings.
If a maintainer is no longer interested in or cannot perform the duties
listed above, they should move themselves to emeritus status.
If necessary, this can also occur through the decision-making process outlined below.
### Becoming a Maintainer
Anyone can become a Cozystack maintainer. Maintainers should be extremely
proficient in cloud native technologies and/or Go; have relevant domain expertise;
have the time and ability to meet the maintainer's expectations above;
and demonstrate the ability to work with the existing maintainers and project processes.
To become a maintainer, start by expressing interest to existing maintainers.
Existing maintainers will then ask you to demonstrate the qualifications above
by contributing PRs, doing code reviews, and other such tasks under their guidance.
After several months of working together, maintainers will decide whether to grant maintainer status.
## Project Decision-making Process
Ideally, all project decisions are resolved by consensus of maintainers and directors.
If this is not possible, a vote will be called.
The voting process is a simple majority in which each maintainer and director receives one vote.

View File

@@ -1,12 +1,7 @@
# The Cozystack Maintainers
| Maintainer | GitHub Username | Company | Responsibility |
| ---------- | --------------- | ------- | --------------------------------- |
| Andrei Kvapil | [@kvaps](https://github.com/kvaps) | Ænix | Core Maintainer |
| George Gaál | [@gecube](https://github.com/gecube) | Ænix | DevOps Practices in Platform, Developers Advocate |
| Kingdon Barrett | [@kingdonb](https://github.com/kingdonb) | Urmanac | FluxCD and flux-operator |
| Timofei Larkin | [@lllamnyp](https://github.com/lllamnyp) | 3commas | Etcd-operator Lead |
| Artem Bortnikov | [@aobort](https://github.com/aobort) | Timescale | Etcd-operator Lead |
| Andrei Gumilev | [@chumkaska](https://github.com/chumkaska) | Ænix | Platform Documentation |
| Timur Tukaev | [@tym83](https://github.com/tym83) | Ænix | Cozystack Website, Marketing, Community Management |
| Kirill Klinchenkov | [@klinch0](https://github.com/klinch0) | Ænix | Core Maintainer |
| Maintainer | GitHub Username | Company |
| ---------- | --------------- | ------- |
| Andrei Kvapil | [@kvaps](https://github.com/kvaps) | Ænix |
| George Gaál | [@gecube](https://github.com/gecube) | Ænix |
| Eduard Generalov | [@egeneralov](https://github.com/egeneralov) | Ænix |

View File

@@ -1,24 +1,14 @@
.PHONY: manifests repos assets
build-deps:
@command -V find docker skopeo jq gh helm > /dev/null
@yq --version | grep -q "mikefarah" || (echo "mikefarah/yq is required" && exit 1)
@tar --version | grep -q GNU || (echo "GNU tar is required" && exit 1)
@sed --version | grep -q GNU || (echo "GNU sed is required" && exit 1)
@awk --version | grep -q GNU || (echo "GNU awk is required" && exit 1)
build: build-deps
build:
make -C packages/apps/http-cache image
make -C packages/apps/postgres image
make -C packages/apps/mysql image
make -C packages/apps/clickhouse image
make -C packages/apps/kubernetes image
make -C packages/extra/monitoring image
make -C packages/system/cozystack-api image
make -C packages/system/cozystack-controller image
make -C packages/system/cilium image
make -C packages/system/kubeovn image
make -C packages/system/kubeovn-webhook image
make -C packages/system/dashboard image
make -C packages/system/kamaji image
make -C packages/system/bucket image
@@ -26,6 +16,10 @@ build: build-deps
make -C packages/core/installer image
make manifests
manifests:
(cd packages/core/installer/; helm template -n cozy-installer installer .) > manifests/cozystack-installer.yaml
sed -i 's|@sha256:[^"]\+||' manifests/cozystack-installer.yaml
repos:
rm -rf _out
make -C packages/apps check-version-map
@@ -36,21 +30,10 @@ repos:
mkdir -p _out/logos
cp ./packages/apps/*/logos/*.svg ./packages/extra/*/logos/*.svg _out/logos/
manifests:
mkdir -p _out/assets
(cd packages/core/installer/; helm template -n cozy-installer installer .) > _out/assets/cozystack-installer.yaml
assets:
make -C packages/core/installer/ assets
test:
make -C packages/core/testing apply
make -C packages/core/testing test
#make -C packages/core/testing test-applications
generate:
hack/update-codegen.sh
upload_assets: manifests
hack/upload-assets.sh
make -C packages/core/testing test-applications

View File

@@ -2,31 +2,30 @@
![Cozystack](img/cozystack-logo-white.svg#gh-dark-mode-only)
[![Open Source](https://img.shields.io/badge/Open-Source-brightgreen)](https://opensource.org/)
[![Apache-2.0 License](https://img.shields.io/github/license/cozystack/cozystack)](https://opensource.org/licenses/)
[![Support](https://img.shields.io/badge/$-support-12a0df.svg?style=flat)](https://cozystack.io/support/)
[![Active](http://img.shields.io/badge/Status-Active-green.svg)](https://github.com/cozystack/cozystack)
[![GitHub Release](https://img.shields.io/github/release/cozystack/cozystack.svg?style=flat)](https://github.com/cozystack/cozystack/releases/latest)
[![GitHub Commit](https://img.shields.io/github/commit-activity/y/cozystack/cozystack)](https://github.com/cozystack/cozystack/graphs/contributors)
[![Apache-2.0 License](https://img.shields.io/github/license/aenix-io/cozystack)](https://opensource.org/licenses/)
[![Support](https://img.shields.io/badge/$-support-12a0df.svg?style=flat)](https://aenix.io/contact-us/#meet)
[![Active](http://img.shields.io/badge/Status-Active-green.svg)](https://aenix.io/cozystack/)
[![GitHub Release](https://img.shields.io/github/release/aenix-io/cozystack.svg?style=flat)](https://github.com/aenix-io/cozystack)
[![GitHub Commit](https://img.shields.io/github/commit-activity/y/aenix-io/cozystack)](https://github.com/aenix-io/cozystack)
# Cozystack
**Cozystack** is a free PaaS platform and framework for building clouds.
With Cozystack, you can transform a bunch of servers into an intelligent system with a simple REST API for spawning Kubernetes clusters,
Database-as-a-Service, virtual machines, load balancers, HTTP caching services, and other services with ease.
With Cozystack, you can transform your bunch of servers into an intelligent system with a simple REST API for spawning Kubernetes clusters, Database-as-a-Service, virtual machines, load balancers, HTTP caching services, and other services with ease.
Use Cozystack to build your own cloud or provide a cost-effective development environment.
You can use Cozystack to build your own cloud or to provide a cost-effective development environments.
## Use-Cases
* [**Using Cozystack to build a public cloud**](https://cozystack.io/docs/guides/use-cases/public-cloud/)
You can use Cozystack as a backend for a public cloud
* [**Using Cozystack to build public cloud**](https://cozystack.io/docs/use-cases/public-cloud/)
You can use Cozystack as backend for a public cloud
* [**Using Cozystack to build a private cloud**](https://cozystack.io/docs/guides/use-cases/private-cloud/)
You can use Cozystack as a platform to build a private cloud powered by Infrastructure-as-Code approach
* [**Using Cozystack to build private cloud**](https://cozystack.io/docs/use-cases/private-cloud/)
You can use Cozystack as platform to build a private cloud powered by Infrastructure-as-Code approach
* [**Using Cozystack as a Kubernetes distribution**](https://cozystack.io/docs/guides/use-cases/kubernetes-distribution/)
You can use Cozystack as a Kubernetes distribution for Bare Metal
* [**Using Cozystack as Kubernetes distribution**](https://cozystack.io/docs/use-cases/kubernetes-distribution/)
You can use Cozystack as Kubernetes distribution for Bare Metal
## Screenshot
@@ -34,32 +33,32 @@ You can use Cozystack as a Kubernetes distribution for Bare Metal
## Documentation
The documentation is located on the [cozystack.io](https://cozystack.io) website.
The documentation is located on official [cozystack.io](https://cozystack.io) website.
Read the [Getting Started](https://cozystack.io/docs/getting-started/) section for a quick start.
Read [Get Started](https://cozystack.io/docs/get-started/) section for a quick start.
If you encounter any difficulties, start with the [troubleshooting guide](https://cozystack.io/docs/operations/troubleshooting/) and work your way through the process that we've outlined.
If you encounter any difficulties, start with the [troubleshooting guide](https://cozystack.io/docs/troubleshooting/), and work your way through the process that we've outlined.
## Versioning
Versioning adheres to the [Semantic Versioning](http://semver.org/) principles.
A full list of the available releases is available in the GitHub repository's [Release](https://github.com/cozystack/cozystack/releases) section.
A full list of the available releases is available in the GitHub repository's [Release](https://github.com/aenix-io/cozystack/releases) section.
- [Roadmap](https://cozystack.io/docs/roadmap/)
- [Roadmap](https://github.com/orgs/aenix-io/projects/2)
## Contributions
Contributions are highly appreciated and very welcomed!
In case of bugs, please check if the issue has already been opened by checking the [GitHub Issues](https://github.com/cozystack/cozystack/issues) section.
If it isn't, you can open a new one. A detailed report will help us replicate it, assess it, and work on a fix.
In case of bugs, please, check if the issue has been already opened by checking the [GitHub Issues](https://github.com/aenix-io/cozystack/issues) section.
In case it isn't, you can open a new one: a detailed report will help us to replicate it, assess it, and work on a fix.
You can express your intention to on the fix on your own.
You can express your intention in working on the fix on your own.
Commits are used to generate the changelog, and their author will be referenced in it.
If you have **Feature Requests** please use the [Discussion's Feature Request section](https://github.com/cozystack/cozystack/discussions/categories/feature-requests).
In case of **Feature Requests** please use the [Discussion's Feature Request section](https://github.com/aenix-io/cozystack/discussions/categories/feature-requests).
You are welcome to join our weekly community meetings (just add this events to your [Google Calendar](https://calendar.google.com/calendar?cid=ZTQzZDIxZTVjOWI0NWE5NWYyOGM1ZDY0OWMyY2IxZTFmNDMzZTJlNjUzYjU2ZGJiZGE3NGNhMzA2ZjBkMGY2OEBncm91cC5jYWxlbmRhci5nb29nbGUuY29t) or [iCal](https://calendar.google.com/calendar/ical/e43d21e5c9b45a95f28c5d649c2cb1e1f433e2e653b56dbbda74ca306f0d0f68%40group.calendar.google.com/public/basic.ics)) or [Telegram group](https://t.me/cozystack).
You can join our weekly community meetings (just add this events to your [Google Calendar](https://calendar.google.com/calendar?cid=ZTQzZDIxZTVjOWI0NWE5NWYyOGM1ZDY0OWMyY2IxZTFmNDMzZTJlNjUzYjU2ZGJiZGE3NGNhMzA2ZjBkMGY2OEBncm91cC5jYWxlbmRhci5nb29nbGUuY29t) or [iCal](https://calendar.google.com/calendar/ical/e43d21e5c9b45a95f28c5d649c2cb1e1f433e2e653b56dbbda74ca306f0d0f68%40group.calendar.google.com/public/basic.ics)) or [Telegram group](https://t.me/cozystack).
## License
@@ -68,4 +67,8 @@ The code is provided as-is with no warranties.
## Commercial Support
A list of companies providing commercial support for this project can be found on [official site](https://cozystack.io/support/).
[**Ænix**](https://aenix.io) offers enterprise-grade support, available 24/7.
We provide all types of assistance, including consultations, development of missing features, design, assistance with installation, and integration.
[Contact us](https://aenix.io/contact/)

View File

@@ -1,4 +1,4 @@
API rule violation: list_type_missing,github.com/cozystack/cozystack/pkg/apis/apps/v1alpha1,ApplicationStatus,Conditions
API rule violation: list_type_missing,github.com/aenix.io/cozystack/pkg/apis/apps/v1alpha1,ApplicationStatus,Conditions
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,Ref
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,Schema
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,XEmbeddedResource

View File

@@ -1,36 +0,0 @@
/*
Copyright 2025.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1alpha1 contains API Schema definitions for the v1alpha1 API group.
// +kubebuilder:object:generate=true
// +groupName=cozystack.io
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects.
GroupVersion = schema.GroupVersion{Group: "cozystack.io", Version: "v1alpha1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)

View File

@@ -1,70 +0,0 @@
/*
Copyright 2025.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// WorkloadStatus defines the observed state of Workload
type WorkloadStatus struct {
// Kind represents the type of workload (redis, postgres, etc.)
// +required
Kind string `json:"kind"`
// Type represents the specific role of the workload (redis, sentinel, etc.)
// If not specified, defaults to Kind
// +optional
Type string `json:"type,omitempty"`
// Resources specifies the compute resources allocated to this workload
// +required
Resources map[string]resource.Quantity `json:"resources"`
// Operational indicates if all pods of the workload are ready
// +optional
Operational bool `json:"operational"`
}
// +kubebuilder:object:root=true
// +kubebuilder:printcolumn:name="Kind",type="string",JSONPath=".status.kind"
// +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".status.type"
// +kubebuilder:printcolumn:name="CPU",type="string",JSONPath=".status.resources.cpu"
// +kubebuilder:printcolumn:name="Memory",type="string",JSONPath=".status.resources.memory"
// +kubebuilder:printcolumn:name="Operational",type="boolean",JSONPath=`.status.operational`
// Workload is the Schema for the workloads API
type Workload struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Status WorkloadStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// WorkloadList contains a list of Workload
type WorkloadList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Workload `json:"items"`
}
func init() {
SchemeBuilder.Register(&Workload{}, &WorkloadList{})
}

View File

@@ -1,91 +0,0 @@
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// WorkloadMonitorSpec defines the desired state of WorkloadMonitor
type WorkloadMonitorSpec struct {
// Selector is a label selector to find workloads to monitor
// +required
Selector map[string]string `json:"selector"`
// Kind specifies the kind of the workload
// +optional
Kind string `json:"kind,omitempty"`
// Type specifies the type of the workload
// +optional
Type string `json:"type,omitempty"`
// Version specifies the version of the workload
// +optional
Version string `json:"version,omitempty"`
// MinReplicas specifies the minimum number of replicas that should be available
// +kubebuilder:validation:Minimum=0
// +optional
MinReplicas *int32 `json:"minReplicas,omitempty"`
// Replicas is the desired number of replicas
// If not specified, will use observedReplicas as the target
// +kubebuilder:validation:Minimum=0
// +optional
Replicas *int32 `json:"replicas,omitempty"`
}
// WorkloadMonitorStatus defines the observed state of WorkloadMonitor
type WorkloadMonitorStatus struct {
// Operational indicates if the workload meets all operational requirements
// +optional
Operational *bool `json:"operational,omitempty"`
// AvailableReplicas is the number of ready replicas
// +optional
AvailableReplicas int32 `json:"availableReplicas"`
// ObservedReplicas is the total number of pods observed
// +optional
ObservedReplicas int32 `json:"observedReplicas"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Kind",type="string",JSONPath=".spec.kind"
// +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".spec.type"
// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version"
// +kubebuilder:printcolumn:name="Replicas",type="integer",JSONPath=".spec.replicas"
// +kubebuilder:printcolumn:name="MinReplicas",type="integer",JSONPath=".spec.minReplicas"
// +kubebuilder:printcolumn:name="Available",type="integer",JSONPath=".status.availableReplicas"
// +kubebuilder:printcolumn:name="Observed",type="integer",JSONPath=".status.observedReplicas"
// +kubebuilder:printcolumn:name="Operational",type="boolean",JSONPath=".status.operational"
// WorkloadMonitor is the Schema for the workloadmonitors API
type WorkloadMonitor struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec WorkloadMonitorSpec `json:"spec,omitempty"`
Status WorkloadMonitorStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// WorkloadMonitorList contains a list of WorkloadMonitor
type WorkloadMonitorList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []WorkloadMonitor `json:"items"`
}
func init() {
SchemeBuilder.Register(&WorkloadMonitor{}, &WorkloadMonitorList{})
}
// GetSelector returns the label selector from metadata
func (w *WorkloadMonitor) GetSelector() map[string]string {
return w.Spec.Selector
}
// Selector specifies the label selector for workloads
type Selector map[string]string

View File

@@ -1,238 +0,0 @@
//go:build !ignore_autogenerated
/*
Copyright 2025 The Cozystack Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v1alpha1
import (
"k8s.io/apimachinery/pkg/api/resource"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in Selector) DeepCopyInto(out *Selector) {
{
in := &in
*out = make(Selector, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Selector.
func (in Selector) DeepCopy() Selector {
if in == nil {
return nil
}
out := new(Selector)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Workload) DeepCopyInto(out *Workload) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workload.
func (in *Workload) DeepCopy() *Workload {
if in == nil {
return nil
}
out := new(Workload)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Workload) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkloadList) DeepCopyInto(out *WorkloadList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Workload, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadList.
func (in *WorkloadList) DeepCopy() *WorkloadList {
if in == nil {
return nil
}
out := new(WorkloadList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *WorkloadList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkloadMonitor) DeepCopyInto(out *WorkloadMonitor) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadMonitor.
func (in *WorkloadMonitor) DeepCopy() *WorkloadMonitor {
if in == nil {
return nil
}
out := new(WorkloadMonitor)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *WorkloadMonitor) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkloadMonitorList) DeepCopyInto(out *WorkloadMonitorList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]WorkloadMonitor, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadMonitorList.
func (in *WorkloadMonitorList) DeepCopy() *WorkloadMonitorList {
if in == nil {
return nil
}
out := new(WorkloadMonitorList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *WorkloadMonitorList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkloadMonitorSpec) DeepCopyInto(out *WorkloadMonitorSpec) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.MinReplicas != nil {
in, out := &in.MinReplicas, &out.MinReplicas
*out = new(int32)
**out = **in
}
if in.Replicas != nil {
in, out := &in.Replicas, &out.Replicas
*out = new(int32)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadMonitorSpec.
func (in *WorkloadMonitorSpec) DeepCopy() *WorkloadMonitorSpec {
if in == nil {
return nil
}
out := new(WorkloadMonitorSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkloadMonitorStatus) DeepCopyInto(out *WorkloadMonitorStatus) {
*out = *in
if in.Operational != nil {
in, out := &in.Operational, &out.Operational
*out = new(bool)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadMonitorStatus.
func (in *WorkloadMonitorStatus) DeepCopy() *WorkloadMonitorStatus {
if in == nil {
return nil
}
out := new(WorkloadMonitorStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkloadStatus) DeepCopyInto(out *WorkloadStatus) {
*out = *in
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = make(map[string]resource.Quantity, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadStatus.
func (in *WorkloadStatus) DeepCopy() *WorkloadStatus {
if in == nil {
return nil
}
out := new(WorkloadStatus)
in.DeepCopyInto(out)
return out
}

View File

@@ -19,7 +19,7 @@ package main
import (
"os"
"github.com/cozystack/cozystack/pkg/cmd/server"
"github.com/aenix.io/cozystack/pkg/cmd/server"
genericapiserver "k8s.io/apiserver/pkg/server"
"k8s.io/component-base/cli"
)

View File

@@ -1,29 +0,0 @@
package main
import (
"flag"
"log"
"net/http"
"path/filepath"
)
func main() {
addr := flag.String("address", ":8123", "Address to listen on")
dir := flag.String("dir", "/cozystack/assets", "Directory to serve files from")
flag.Parse()
absDir, err := filepath.Abs(*dir)
if err != nil {
log.Fatalf("Error getting absolute path for %s: %v", *dir, err)
}
fs := http.FileServer(http.Dir(absDir))
http.Handle("/", fs)
log.Printf("Server starting on %s, serving directory %s", *addr, absDir)
err = http.ListenAndServe(*addr, nil)
if err != nil {
log.Fatalf("Server failed to start: %v", err)
}
}

View File

@@ -1,219 +0,0 @@
/*
Copyright 2025.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"crypto/tls"
"flag"
"os"
"time"
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
// to ensure that exec-entrypoint and run can make use of them.
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/controller-runtime/pkg/metrics/filters"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"sigs.k8s.io/controller-runtime/pkg/webhook"
cozystackiov1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
"github.com/cozystack/cozystack/internal/controller"
"github.com/cozystack/cozystack/internal/telemetry"
// +kubebuilder:scaffold:imports
)
var (
scheme = runtime.NewScheme()
setupLog = ctrl.Log.WithName("setup")
)
func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(cozystackiov1alpha1.AddToScheme(scheme))
// +kubebuilder:scaffold:scheme
}
func main() {
var metricsAddr string
var enableLeaderElection bool
var probeAddr string
var secureMetrics bool
var enableHTTP2 bool
var disableTelemetry bool
var telemetryEndpoint string
var telemetryInterval string
var cozystackVersion string
var tlsOpts []func(*tls.Config)
flag.StringVar(&metricsAddr, "metrics-bind-address", "0", "The address the metrics endpoint binds to. "+
"Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.")
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
flag.BoolVar(&enableLeaderElection, "leader-elect", false,
"Enable leader election for controller manager. "+
"Enabling this will ensure there is only one active controller manager.")
flag.BoolVar(&secureMetrics, "metrics-secure", true,
"If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.")
flag.BoolVar(&enableHTTP2, "enable-http2", false,
"If set, HTTP/2 will be enabled for the metrics and webhook servers")
flag.BoolVar(&disableTelemetry, "disable-telemetry", false,
"Disable telemetry collection")
flag.StringVar(&telemetryEndpoint, "telemetry-endpoint", "https://telemetry.cozystack.io",
"Endpoint for sending telemetry data")
flag.StringVar(&telemetryInterval, "telemetry-interval", "15m",
"Interval between telemetry data collection (e.g. 15m, 1h)")
flag.StringVar(&cozystackVersion, "cozystack-version", "unknown",
"Version of Cozystack")
opts := zap.Options{
Development: false,
}
opts.BindFlags(flag.CommandLine)
flag.Parse()
// Parse telemetry interval
interval, err := time.ParseDuration(telemetryInterval)
if err != nil {
setupLog.Error(err, "invalid telemetry interval")
os.Exit(1)
}
// Configure telemetry
telemetryConfig := telemetry.Config{
Disabled: disableTelemetry,
Endpoint: telemetryEndpoint,
Interval: interval,
CozystackVersion: cozystackVersion,
}
ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
// if the enable-http2 flag is false (the default), http/2 should be disabled
// due to its vulnerabilities. More specifically, disabling http/2 will
// prevent from being vulnerable to the HTTP/2 Stream Cancellation and
// Rapid Reset CVEs. For more information see:
// - https://github.com/advisories/GHSA-qppj-fm5r-hxr3
// - https://github.com/advisories/GHSA-4374-p667-p6c8
disableHTTP2 := func(c *tls.Config) {
setupLog.Info("disabling http/2")
c.NextProtos = []string{"http/1.1"}
}
if !enableHTTP2 {
tlsOpts = append(tlsOpts, disableHTTP2)
}
webhookServer := webhook.NewServer(webhook.Options{
TLSOpts: tlsOpts,
})
// Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server.
// More info:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.1/pkg/metrics/server
// - https://book.kubebuilder.io/reference/metrics.html
metricsServerOptions := metricsserver.Options{
BindAddress: metricsAddr,
SecureServing: secureMetrics,
TLSOpts: tlsOpts,
}
if secureMetrics {
// FilterProvider is used to protect the metrics endpoint with authn/authz.
// These configurations ensure that only authorized users and service accounts
// can access the metrics endpoint. The RBAC are configured in 'config/rbac/kustomization.yaml'. More info:
// https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.1/pkg/metrics/filters#WithAuthenticationAndAuthorization
metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization
// TODO(user): If CertDir, CertName, and KeyName are not specified, controller-runtime will automatically
// generate self-signed certificates for the metrics server. While convenient for development and testing,
// this setup is not recommended for production.
}
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
Metrics: metricsServerOptions,
WebhookServer: webhookServer,
HealthProbeBindAddress: probeAddr,
LeaderElection: enableLeaderElection,
LeaderElectionID: "19a0338c.cozystack.io",
// LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily
// when the Manager ends. This requires the binary to immediately end when the
// Manager is stopped, otherwise, this setting is unsafe. Setting this significantly
// speeds up voluntary leader transitions as the new leader don't have to wait
// LeaseDuration time first.
//
// In the default scaffold provided, the program ends immediately after
// the manager stops, so would be fine to enable this option. However,
// if you are doing or is intended to do any operation such as perform cleanups
// after the manager stops then its usage might be unsafe.
// LeaderElectionReleaseOnCancel: true,
})
if err != nil {
setupLog.Error(err, "unable to start manager")
os.Exit(1)
}
if err = (&controller.WorkloadMonitorReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "WorkloadMonitor")
os.Exit(1)
}
if err = (&controller.WorkloadReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Workload")
os.Exit(1)
}
// +kubebuilder:scaffold:builder
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up health check")
os.Exit(1)
}
if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up ready check")
os.Exit(1)
}
// Initialize telemetry collector
collector, err := telemetry.NewCollector(mgr.GetClient(), &telemetryConfig, mgr.GetConfig())
if err != nil {
setupLog.V(1).Error(err, "unable to create telemetry collector, telemetry will be disabled")
}
if collector != nil {
if err := mgr.Add(collector); err != nil {
setupLog.Error(err, "unable to set up telemetry collector")
setupLog.V(1).Error(err, "unable to set up telemetry collector, continuing without telemetry")
}
}
setupLog.Info("starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

24
go.mod
View File

@@ -1,27 +1,22 @@
// This is a generated file. Do not edit directly.
module github.com/cozystack/cozystack
module github.com/aenix.io/cozystack
go 1.23.0
require (
github.com/fluxcd/helm-controller/api v1.1.0
github.com/emicklei/go-restful/v3 v3.11.0
github.com/google/gofuzz v1.2.0
github.com/onsi/ginkgo/v2 v2.19.0
github.com/onsi/gomega v1.33.1
github.com/spf13/cobra v1.8.1
github.com/stretchr/testify v1.9.0
gopkg.in/yaml.v2 v2.4.0
k8s.io/api v0.31.2
k8s.io/apiextensions-apiserver v0.31.2
k8s.io/apimachinery v0.31.2
k8s.io/apiserver v0.31.2
k8s.io/client-go v0.31.2
k8s.io/code-generator v0.31.2
k8s.io/component-base v0.31.2
k8s.io/klog/v2 v2.130.1
k8s.io/kube-openapi v0.0.0-20240827152857-f7e401e7b4c2
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
sigs.k8s.io/controller-runtime v0.19.0
sigs.k8s.io/structured-merge-diff/v4 v4.4.1
)
@@ -36,27 +31,23 @@ require (
github.com/coreos/go-semver v0.3.1 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fluxcd/helm-controller/api v1.1.0 // indirect
github.com/fluxcd/pkg/apis/kustomize v1.6.1 // indirect
github.com/fluxcd/pkg/apis/meta v1.6.1 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-logr/zapr v1.3.0 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/cel-go v0.21.0 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
@@ -93,6 +84,7 @@ require (
go.uber.org/zap v1.27.0 // indirect
golang.org/x/crypto v0.28.0 // indirect
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
golang.org/x/mod v0.21.0 // indirect
golang.org/x/net v0.30.0 // indirect
golang.org/x/oauth2 v0.23.0 // indirect
golang.org/x/sync v0.8.0 // indirect
@@ -101,7 +93,6 @@ require (
golang.org/x/text v0.19.0 // indirect
golang.org/x/time v0.7.0 // indirect
golang.org/x/tools v0.26.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
google.golang.org/grpc v1.65.0 // indirect
@@ -109,9 +100,14 @@ require (
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/api v0.31.2 // indirect
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/kms v0.31.2 // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect
sigs.k8s.io/controller-runtime v0.19.0 // indirect
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
)

12
go.sum
View File

@@ -26,10 +26,6 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k=
github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fluxcd/helm-controller/api v1.1.0 h1:NS5Wm3U6Kv4w7Cw2sDOV++vf2ecGfFV00x1+2Y3QcOY=
@@ -218,6 +214,8 @@ golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -254,8 +252,6 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY=
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4=
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw=
@@ -291,8 +287,12 @@ k8s.io/apiserver v0.31.2 h1:VUzOEUGRCDi6kX1OyQ801m4A7AUPglpsmGvdsekmcI4=
k8s.io/apiserver v0.31.2/go.mod h1:o3nKZR7lPlJqkU5I3Ove+Zx3JuoFjQobGX1Gctw6XuE=
k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc=
k8s.io/client-go v0.31.2/go.mod h1:NPa74jSVR/+eez2dFsEIHNa+3o09vtNaWwWwb1qSxSs=
k8s.io/code-generator v0.31.2 h1:xLWxG0HEpMSHfcM//3u3Ro2Hmc6AyyLINQS//Z2GEOI=
k8s.io/code-generator v0.31.2/go.mod h1:eEQHXgBU/m7LDaToDoiz3t97dUUVyOblQdwOr8rivqc=
k8s.io/component-base v0.31.2 h1:Z1J1LIaC0AV+nzcPRFqfK09af6bZ4D1nAOpWsy9owlA=
k8s.io/component-base v0.31.2/go.mod h1:9PeyyFN/drHjtJZMCTkSpQJS3U9OXORnHQqMLDz0sUQ=
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 h1:si3PfKm8dDYxgfbeA6orqrtLkvvIeH8UqffFJDl0bz4=
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kms v0.31.2 h1:pyx7l2qVOkClzFMIWMVF/FxsSkgd+OIGH7DecpbscJI=

View File

@@ -1,5 +1,5 @@
/*
Copyright 2025 The Cozystack Authors.
Copyright 2024 The Cozystack Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -21,7 +21,7 @@ fix_d8() {
}
swap_pvc_overview() {
jq '(.panels[] | select(.title=="PVC Detailed") | .panels[] | select(.title=="Overview")) as $a | del(.panels[] | select(.title=="PVC Detailed").panels[] | select(.title=="Overview")) | ( (.panels[] | select(.title=="PVC Detailed"))) as $b | del( .panels[] | select(.title=="PVC Detailed")) | (.panels[.panels|length]=($a|.gridPos.y=$b.gridPos.y)) | (.panels[.panels|length]=($b|.gridPos.y=$a.gridPos.y))'
jq '(.panels[] | select(.title=="PVC Detailed") | .panels[] | select(.title=="Overview")) as $a | del(.panels[] | select(.title=="PVC Detailed").panels[] | select(.title=="Overview")) | ( (.panels[] | select(.title=="PVC Detailed"))) as $b | del( .panels[] | select(.title=="PVC Detailed")) | (.panels[.panels|length]=($a|.gridPos.y=$b.gridPos.y)) | (.panels[.panels|length]=($b|.gridPos.y=$a.gridPos.y))'
}
deprectaed_remove_faq() {
@@ -68,7 +68,7 @@ modules/402-ingress-nginx/monitoring/grafana-dashboards/ingress-nginx/namespace/
modules/402-ingress-nginx/monitoring/grafana-dashboards/ingress-nginx/vhost/vhost_detail.json
modules/402-ingress-nginx/monitoring/grafana-dashboards/ingress-nginx/vhost/vhosts.json
modules/340-monitoring-kubernetes-control-plane/monitoring/grafana-dashboards/kubernetes-cluster/control-plane-status.json
modules/340-monitoring-kubernetes-control-plane/monitoring/grafana-dashboards/kubernetes-cluster/kube-etcd.json #TODO
modules/340-monitoring-kubernetes-control-plane/monitoring/grafana-dashboards/kubernetes-cluster/kube-etcd3.json #TODO
modules/340-monitoring-kubernetes-control-plane/monitoring/grafana-dashboards/kubernetes-cluster/deprecated-resources.json
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//kubernetes-cluster/nodes/ntp.json #TODO
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//kubernetes-cluster/nodes/nodes.json
@@ -78,10 +78,6 @@ modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//main/pod.json
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//main/namespace/namespaces.json
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//main/namespace/namespace.json
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//main/capacity-planning/capacity-planning.json
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//flux/flux-control-plane.json
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//flux/flux-stats.json
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//kafka/strimzi-kafka.json
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//goldpinger/goldpinger.json
EOT
@@ -113,3 +109,4 @@ done <<\EOT
https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-namespaces.json
https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-pods.json
EOT

View File

@@ -60,7 +60,7 @@ done
# Prepare system drive
if [ ! -f nocloud-amd64.raw ]; then
wget https://github.com/cozystack/cozystack/releases/latest/download/nocloud-amd64.raw.xz -O nocloud-amd64.raw.xz
wget https://github.com/aenix-io/cozystack/releases/latest/download/nocloud-amd64.raw.xz -O nocloud-amd64.raw.xz
rm -f nocloud-amd64.raw
xz --decompress nocloud-amd64.raw.xz
fi
@@ -84,7 +84,7 @@ done
# Start VMs
for i in 1 2 3; do
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 8 -m 16384 \
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 4 -m 8192 \
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5$i -netdev tap,id=net0,ifname=cozy-srv$i,script=no,downscript=no \
-drive file=srv$i/system.img,if=virtio,format=raw \
-drive file=srv$i/seed.img,if=virtio,format=raw \
@@ -113,11 +113,8 @@ machine:
- usermode_helper=disabled
- name: zfs
- name: spl
registries:
mirrors:
docker.io:
endpoints:
- https://mirror.gcr.io
install:
image: ghcr.io/aenix-io/cozystack/talos:v1.8.3
files:
- content: |
[plugins]
@@ -145,9 +142,6 @@ EOT
cat > patch-controlplane.yaml <<\EOT
machine:
nodeLabels:
node.kubernetes.io/exclude-from-external-load-balancers:
$patch: delete
network:
interfaces:
- interface: eth0
@@ -234,7 +228,6 @@ sleep 5
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n " $1 " hr/" $2 " &"} END{print "wait"}' | sh -x
# Wait for Cluster-API providers
timeout 30 sh -c 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager; do sleep 1; done'
kubectl wait deploy --timeout=30s --for=condition=available -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager
# Wait for linstor controller
@@ -303,9 +296,6 @@ spec:
avoidBuggyIPs: false
EOT
# Wait for cozystack-api
kubectl wait --for=condition=Available apiservices v1alpha1.apps.cozystack.io --timeout=2m
kubectl patch -n tenant-root tenants.apps.cozystack.io root --type=merge -p '{"spec":{
"host": "example.org",
"ingress": true,
@@ -318,12 +308,7 @@ kubectl patch -n tenant-root tenants.apps.cozystack.io root --type=merge -p '{"s
timeout 60 sh -c 'until kubectl get hr -n tenant-root etcd ingress monitoring tenant-root; do sleep 1; done'
# Wait for HelmReleases be installed
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr etcd ingress tenant-root
if ! kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr monitoring; then
flux reconcile hr monitoring -n tenant-root --force
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr monitoring
fi
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr etcd ingress monitoring tenant-root
kubectl patch -n tenant-root ingresses.apps.cozystack.io ingress --type=merge -p '{"spec":{
"dashboard": true
@@ -338,7 +323,7 @@ kubectl wait --timeout=5m --for=jsonpath=.status.readyReplicas=3 -n tenant-root
# Wait for Victoria metrics
kubectl wait --timeout=5m --for=jsonpath=.status.updateStatus=operational -n tenant-root vmalert/vmalert-shortterm vmalertmanager/alertmanager
kubectl wait --timeout=5m --for=jsonpath=.status.updateStatus=operational -n tenant-root vlogs/generic
kubectl wait --timeout=5m --for=jsonpath=.status.status=operational -n tenant-root vlogs/generic
kubectl wait --timeout=5m --for=jsonpath=.status.clusterStatus=operational -n tenant-root vmcluster/shortterm vmcluster/longterm
# Wait for grafana

View File

@@ -1,13 +1,12 @@
#!/bin/sh
set -e
file=versions_map
charts=$(find . -mindepth 2 -maxdepth 2 -name Chart.yaml | awk 'sub("/Chart.yaml", "")')
# <chart> <version> <commit>
new_map=$(
for chart in $charts; do
awk '/^name:/ {chart=$2} /^version:/ {version=$2} END{printf "%s %s %s\n", chart, version, "HEAD"}' "$chart/Chart.yaml"
awk '/^name:/ {chart=$2} /^version:/ {version=$2} END{printf "%s %s %s\n", chart, version, "HEAD"}' $chart/Chart.yaml
done
)
@@ -16,46 +15,47 @@ if [ ! -f "$file" ] || [ ! -s "$file" ]; then
exit 0
fi
miss_map=$(echo "$new_map" | awk 'NR==FNR { nm[$1 " " $2] = $3; next } { if (!($1 " " $2 in nm)) print $1, $2, $3}' - "$file")
# search accross all tags sorted by version
search_commits=$(git ls-remote --tags origin | awk -F/ '$3 ~ /v[0-9]+.[0-9]+.[0-9]+/ {print}' | sort -k2,2 -rV | awk '{print $1}')
miss_map=$(echo "$new_map" | awk 'NR==FNR { new_map[$1 " " $2] = $3; next } { if (!($1 " " $2 in new_map)) print $1, $2, $3}' - $file)
resolved_miss_map=$(
echo "$miss_map" | while read -r chart version commit; do
# if version is found in HEAD, it's HEAD
if [ $(awk '$1 == "version:" {print $2}' ./${chart}/Chart.yaml) = "${version}" ]; then
echo "$chart $version HEAD"
continue
fi
# if commit is not HEAD, check if it's valid
if [ $commit != "HEAD" ]; then
if [ $(git show "${commit}:./${chart}/Chart.yaml" 2>/dev/null | awk '$1 == "version:" {print $2}') != "${version}" ]; then
echo "Commit $commit for $chart $version is not valid" >&2
exit 1
echo "$miss_map" | while read chart version commit; do
if [ "$commit" = HEAD ]; then
line=$(awk '/^version:/ {print NR; exit}' "./$chart/Chart.yaml")
change_commit=$(git --no-pager blame -L"$line",+1 -- "$chart/Chart.yaml" | awk '{print $1}')
if [ "$change_commit" = "00000000" ]; then
# Not committed yet, use previous commit
line=$(git show HEAD:"./$chart/Chart.yaml" | awk '/^version:/ {print NR; exit}')
commit=$(git --no-pager blame -L"$line",+1 HEAD -- "$chart/Chart.yaml" | awk '{print $1}')
if [ $(echo $commit | cut -c1) = "^" ]; then
# Previous commit not exists
commit=$(echo $commit | cut -c2-)
fi
else
# Committed, but version_map wasn't updated
line=$(git show HEAD:"./$chart/Chart.yaml" | awk '/^version:/ {print NR; exit}')
change_commit=$(git --no-pager blame -L"$line",+1 HEAD -- "$chart/Chart.yaml" | awk '{print $1}')
if [ $(echo $change_commit | cut -c1) = "^" ]; then
# Previous commit not exists
commit=$(echo $change_commit | cut -c2-)
else
commit=$(git describe --always "$change_commit~1")
fi
fi
commit=$(git rev-parse --short "$commit")
echo "$chart $version $commit"
continue
fi
# if commit is HEAD, but version is not found in HEAD, check all tags
found_tag=""
for tag in $search_commits; do
if [ $(git show "${tag}:./${chart}/Chart.yaml" 2>/dev/null | awk '$1 == "version:" {print $2}') = "${version}" ]; then
found_tag=$(git rev-parse --short "${tag}")
break
# Check if the commit belongs to the main branch
if ! git merge-base --is-ancestor "$commit" main; then
# Find the closest parent commit that belongs to main
commit_in_main=$(git log --pretty=format:"%h" main -- "$chart" | head -n 1)
if [ -n "$commit_in_main" ]; then
commit="$commit_in_main"
else
# No valid commit found in main branch for $chart, skipping..."
continue
fi
fi
done
if [ -z "$found_tag" ]; then
echo "Can't find $chart $version in any version tag, removing it" >&2
continue
fi
echo "$chart $version $found_tag"
echo "$chart $version $commit"
done
)

View File

@@ -22,7 +22,6 @@ SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
CODEGEN_PKG=${CODEGEN_PKG:-$(cd "${SCRIPT_ROOT}"; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)}
API_KNOWN_VIOLATIONS_DIR="${API_KNOWN_VIOLATIONS_DIR:-"${SCRIPT_ROOT}/api/api-rules"}"
UPDATE_API_KNOWN_VIOLATIONS="${UPDATE_API_KNOWN_VIOLATIONS:-true}"
CONTROLLER_GEN="go run sigs.k8s.io/controller-tools/cmd/controller-gen@v0.16.4"
source "${CODEGEN_PKG}/kube_codegen.sh"
@@ -47,6 +46,3 @@ kube::codegen::gen_openapi \
${update_report:+"${update_report}"} \
--boilerplate "${SCRIPT_ROOT}/hack/boilerplate.go.txt" \
"${SCRIPT_ROOT}/pkg/apis"
$CONTROLLER_GEN object:headerFile="hack/boilerplate.go.txt" paths="./api/..."
$CONTROLLER_GEN rbac:roleName=manager-role crd paths="./api/..." output:crd:artifacts:config=packages/system/cozystack-controller/templates/crds

View File

@@ -1,11 +0,0 @@
#!/bin/bash
set -xe
version=${VERSION:-$(git describe --tags)}
gh release upload --clobber $version _out/assets/cozystack-installer.yaml
gh release upload --clobber $version _out/assets/metal-amd64.iso
gh release upload --clobber $version _out/assets/metal-amd64.raw.xz
gh release upload --clobber $version _out/assets/nocloud-amd64.raw.xz
gh release upload --clobber $version _out/assets/kernel-amd64
gh release upload --clobber $version _out/assets/initramfs-metal-amd64.xz

View File

@@ -1,96 +0,0 @@
/*
Copyright 2025.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"fmt"
"path/filepath"
"runtime"
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
cozystackiov1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
// +kubebuilder:scaffold:imports
)
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
var cfg *rest.Config
var k8sClient client.Client
var testEnv *envtest.Environment
var ctx context.Context
var cancel context.CancelFunc
func TestControllers(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Controller Suite")
}
var _ = BeforeSuite(func() {
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
ctx, cancel = context.WithCancel(context.TODO())
By("bootstrapping test environment")
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
ErrorIfCRDPathMissing: true,
// The BinaryAssetsDirectory is only required if you want to run the tests directly
// without call the makefile target test. If not informed it will look for the
// default path defined in controller-runtime which is /usr/local/kubebuilder/.
// Note that you must have the required binaries setup under the bin directory to perform
// the tests directly. When we run make test it will be setup and used automatically.
BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s",
fmt.Sprintf("1.31.0-%s-%s", runtime.GOOS, runtime.GOARCH)),
}
var err error
// cfg is defined in this file globally.
cfg, err = testEnv.Start()
Expect(err).NotTo(HaveOccurred())
Expect(cfg).NotTo(BeNil())
err = cozystackiov1alpha1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
// +kubebuilder:scaffold:scheme
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
Expect(err).NotTo(HaveOccurred())
Expect(k8sClient).NotTo(BeNil())
})
var _ = AfterSuite(func() {
By("tearing down the test environment")
cancel()
err := testEnv.Stop()
Expect(err).NotTo(HaveOccurred())
})

View File

@@ -1,87 +0,0 @@
package controller
import (
"context"
"strings"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
)
// WorkloadMonitorReconciler reconciles a WorkloadMonitor object
type WorkloadReconciler struct {
client.Client
Scheme *runtime.Scheme
}
func (r *WorkloadReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logger := log.FromContext(ctx)
w := &cozyv1alpha1.Workload{}
err := r.Get(ctx, req.NamespacedName, w)
if err != nil {
if apierrors.IsNotFound(err) {
return ctrl.Result{}, nil
}
logger.Error(err, "Unable to fetch Workload")
return ctrl.Result{}, err
}
// it's being deleted, nothing to handle
if w.DeletionTimestamp != nil {
return ctrl.Result{}, nil
}
t := getMonitoredObject(w)
err = r.Get(ctx, types.NamespacedName{Name: t.GetName(), Namespace: t.GetNamespace()}, t)
// found object, nothing to do
if err == nil {
return ctrl.Result{}, nil
}
// error getting object but not 404 -- requeue
if !apierrors.IsNotFound(err) {
logger.Error(err, "failed to get dependent object", "kind", t.GetObjectKind(), "dependent-object-name", t.GetName())
return ctrl.Result{}, err
}
err = r.Delete(ctx, w)
if err != nil {
logger.Error(err, "failed to delete workload")
}
return ctrl.Result{}, err
}
// SetupWithManager registers our controller with the Manager and sets up watches.
func (r *WorkloadReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
// Watch WorkloadMonitor objects
For(&cozyv1alpha1.Workload{}).
Complete(r)
}
func getMonitoredObject(w *cozyv1alpha1.Workload) client.Object {
if strings.HasPrefix(w.Name, "pvc-") {
obj := &corev1.PersistentVolumeClaim{}
obj.Name = strings.TrimPrefix(w.Name, "pvc-")
obj.Namespace = w.Namespace
return obj
}
if strings.HasPrefix(w.Name, "svc-") {
obj := &corev1.Service{}
obj.Name = strings.TrimPrefix(w.Name, "svc-")
obj.Namespace = w.Namespace
return obj
}
obj := &corev1.Pod{}
obj.Name = w.Name
obj.Namespace = w.Namespace
return obj
}

View File

@@ -1,424 +0,0 @@
package controller
import (
"context"
"encoding/json"
"fmt"
"sort"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/pointer"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
)
// WorkloadMonitorReconciler reconciles a WorkloadMonitor object
type WorkloadMonitorReconciler struct {
client.Client
Scheme *runtime.Scheme
}
// +kubebuilder:rbac:groups=cozystack.io,resources=workloadmonitors,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=cozystack.io,resources=workloadmonitors/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=cozystack.io,resources=workloads,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=cozystack.io,resources=workloads/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch
// +kubebuilder:rbac:groups=core,resources=persistentvolumeclaims,verbs=get;list;watch
// isServiceReady checks if the service has an external IP bound
func (r *WorkloadMonitorReconciler) isServiceReady(svc *corev1.Service) bool {
return len(svc.Status.LoadBalancer.Ingress) > 0
}
// isPVCReady checks if the PVC is bound
func (r *WorkloadMonitorReconciler) isPVCReady(pvc *corev1.PersistentVolumeClaim) bool {
return pvc.Status.Phase == corev1.ClaimBound
}
// isPodReady checks if the Pod is in the Ready condition.
func (r *WorkloadMonitorReconciler) isPodReady(pod *corev1.Pod) bool {
for _, c := range pod.Status.Conditions {
if c.Type == corev1.PodReady && c.Status == corev1.ConditionTrue {
return true
}
}
return false
}
// updateOwnerReferences adds the given monitor as a new owner reference to the object if not already present.
// It then sorts the owner references to enforce a consistent order.
func updateOwnerReferences(obj metav1.Object, monitor client.Object) {
// Retrieve current owner references
owners := obj.GetOwnerReferences()
// Check if current monitor is already in owner references
var alreadyOwned bool
for _, ownerRef := range owners {
if ownerRef.UID == monitor.GetUID() {
alreadyOwned = true
break
}
}
runtimeObj, ok := monitor.(runtime.Object)
if !ok {
return
}
gvk := runtimeObj.GetObjectKind().GroupVersionKind()
// If not already present, add new owner reference without controller flag
if !alreadyOwned {
newOwnerRef := metav1.OwnerReference{
APIVersion: gvk.GroupVersion().String(),
Kind: gvk.Kind,
Name: monitor.GetName(),
UID: monitor.GetUID(),
// Set Controller to false to avoid conflict as multiple controllers are not allowed
Controller: pointer.BoolPtr(false),
BlockOwnerDeletion: pointer.BoolPtr(true),
}
owners = append(owners, newOwnerRef)
}
// Sort owner references to enforce a consistent order by UID
sort.SliceStable(owners, func(i, j int) bool {
return owners[i].UID < owners[j].UID
})
// Update the owner references of the object
obj.SetOwnerReferences(owners)
}
// reconcileServiceForMonitor creates or updates a Workload object for the given Service and WorkloadMonitor.
func (r *WorkloadMonitorReconciler) reconcileServiceForMonitor(
ctx context.Context,
monitor *cozyv1alpha1.WorkloadMonitor,
svc corev1.Service,
) error {
logger := log.FromContext(ctx)
workload := &cozyv1alpha1.Workload{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("svc-%s", svc.Name),
Namespace: svc.Namespace,
},
}
resources := make(map[string]resource.Quantity)
q := resource.MustParse("0")
for _, ing := range svc.Status.LoadBalancer.Ingress {
if ing.IP != "" {
q.Add(resource.MustParse("1"))
}
}
resources["public-ips"] = q
_, err := ctrl.CreateOrUpdate(ctx, r.Client, workload, func() error {
// Update owner references with the new monitor
updateOwnerReferences(workload.GetObjectMeta(), monitor)
workload.Labels = svc.Labels
// Fill Workload status fields:
workload.Status.Kind = monitor.Spec.Kind
workload.Status.Type = monitor.Spec.Type
workload.Status.Resources = resources
workload.Status.Operational = r.isServiceReady(&svc)
return nil
})
if err != nil {
logger.Error(err, "Failed to CreateOrUpdate Workload", "workload", workload.Name)
return err
}
return nil
}
// reconcilePVCForMonitor creates or updates a Workload object for the given PVC and WorkloadMonitor.
func (r *WorkloadMonitorReconciler) reconcilePVCForMonitor(
ctx context.Context,
monitor *cozyv1alpha1.WorkloadMonitor,
pvc corev1.PersistentVolumeClaim,
) error {
logger := log.FromContext(ctx)
workload := &cozyv1alpha1.Workload{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("pvc-%s", pvc.Name),
Namespace: pvc.Namespace,
},
}
resources := make(map[string]resource.Quantity)
for resourceName, resourceQuantity := range pvc.Status.Capacity {
resources[resourceName.String()] = resourceQuantity
}
_, err := ctrl.CreateOrUpdate(ctx, r.Client, workload, func() error {
// Update owner references with the new monitor
updateOwnerReferences(workload.GetObjectMeta(), monitor)
workload.Labels = pvc.Labels
// Fill Workload status fields:
workload.Status.Kind = monitor.Spec.Kind
workload.Status.Type = monitor.Spec.Type
workload.Status.Resources = resources
workload.Status.Operational = r.isPVCReady(&pvc)
return nil
})
if err != nil {
logger.Error(err, "Failed to CreateOrUpdate Workload", "workload", workload.Name)
return err
}
return nil
}
// reconcilePodForMonitor creates or updates a Workload object for the given Pod and WorkloadMonitor.
func (r *WorkloadMonitorReconciler) reconcilePodForMonitor(
ctx context.Context,
monitor *cozyv1alpha1.WorkloadMonitor,
pod corev1.Pod,
) error {
logger := log.FromContext(ctx)
// Combine both init containers and normal containers to sum resources properly
combinedContainers := append(pod.Spec.InitContainers, pod.Spec.Containers...)
// totalResources will store the sum of all container resource limits
totalResources := make(map[string]resource.Quantity)
// Iterate over all containers to aggregate their Limits
for _, container := range combinedContainers {
for name, qty := range container.Resources.Limits {
if existing, exists := totalResources[name.String()]; exists {
existing.Add(qty)
totalResources[name.String()] = existing
} else {
totalResources[name.String()] = qty.DeepCopy()
}
}
}
// If annotation "workload.cozystack.io/resources" is present, parse and merge
if resourcesStr, ok := pod.Annotations["workload.cozystack.io/resources"]; ok {
annRes := map[string]string{}
if err := json.Unmarshal([]byte(resourcesStr), &annRes); err != nil {
logger.Error(err, "Failed to parse resources annotation", "pod", pod.Name)
} else {
for k, v := range annRes {
parsed, err := resource.ParseQuantity(v)
if err != nil {
logger.Error(err, "Failed to parse resource quantity from annotation", "key", k, "value", v)
continue
}
totalResources[k] = parsed
}
}
}
workload := &cozyv1alpha1.Workload{
ObjectMeta: metav1.ObjectMeta{
Name: pod.Name,
Namespace: pod.Namespace,
},
}
_, err := ctrl.CreateOrUpdate(ctx, r.Client, workload, func() error {
// Update owner references with the new monitor
updateOwnerReferences(workload.GetObjectMeta(), monitor)
// Copy labels from the Pod if needed
workload.Labels = pod.Labels
// Fill Workload status fields:
workload.Status.Kind = monitor.Spec.Kind
workload.Status.Type = monitor.Spec.Type
workload.Status.Resources = totalResources
workload.Status.Operational = r.isPodReady(&pod)
return nil
})
if err != nil {
logger.Error(err, "Failed to CreateOrUpdate Workload", "workload", workload.Name)
return err
}
return nil
}
// Reconcile is the main reconcile loop.
// 1. It reconciles WorkloadMonitor objects themselves (create/update/delete).
// 2. It also reconciles Pod events mapped to WorkloadMonitor via label selector.
func (r *WorkloadMonitorReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logger := log.FromContext(ctx)
// Fetch the WorkloadMonitor object if it exists
monitor := &cozyv1alpha1.WorkloadMonitor{}
err := r.Get(ctx, req.NamespacedName, monitor)
if err != nil {
// If the resource is not found, it may be a Pod event (mapFunc).
if apierrors.IsNotFound(err) {
return ctrl.Result{}, nil
}
logger.Error(err, "Unable to fetch WorkloadMonitor")
return ctrl.Result{}, err
}
// List Pods that match the WorkloadMonitor's selector
podList := &corev1.PodList{}
if err := r.List(
ctx,
podList,
client.InNamespace(monitor.Namespace),
client.MatchingLabels(monitor.Spec.Selector),
); err != nil {
logger.Error(err, "Unable to list Pods for WorkloadMonitor", "monitor", monitor.Name)
return ctrl.Result{}, err
}
var observedReplicas, availableReplicas int32
// For each matching Pod, reconcile the corresponding Workload
for _, pod := range podList.Items {
observedReplicas++
if err := r.reconcilePodForMonitor(ctx, monitor, pod); err != nil {
logger.Error(err, "Failed to reconcile Workload for Pod", "pod", pod.Name)
continue
}
if r.isPodReady(&pod) {
availableReplicas++
}
}
pvcList := &corev1.PersistentVolumeClaimList{}
if err := r.List(
ctx,
pvcList,
client.InNamespace(monitor.Namespace),
client.MatchingLabels(monitor.Spec.Selector),
); err != nil {
logger.Error(err, "Unable to list PVCs for WorkloadMonitor", "monitor", monitor.Name)
return ctrl.Result{}, err
}
for _, pvc := range pvcList.Items {
if err := r.reconcilePVCForMonitor(ctx, monitor, pvc); err != nil {
logger.Error(err, "Failed to reconcile Workload for PVC", "PVC", pvc.Name)
continue
}
}
svcList := &corev1.ServiceList{}
if err := r.List(
ctx,
svcList,
client.InNamespace(monitor.Namespace),
client.MatchingLabels(monitor.Spec.Selector),
); err != nil {
logger.Error(err, "Unable to list Services for WorkloadMonitor", "monitor", monitor.Name)
return ctrl.Result{}, err
}
for _, svc := range svcList.Items {
if svc.Spec.Type != corev1.ServiceTypeLoadBalancer {
continue
}
if err := r.reconcileServiceForMonitor(ctx, monitor, svc); err != nil {
logger.Error(err, "Failed to reconcile Workload for Service", "Service", svc.Name)
continue
}
}
// Update WorkloadMonitor status based on observed pods
monitor.Status.ObservedReplicas = observedReplicas
monitor.Status.AvailableReplicas = availableReplicas
// Default to operational = true, but check MinReplicas if set
monitor.Status.Operational = pointer.Bool(true)
if monitor.Spec.MinReplicas != nil && availableReplicas < *monitor.Spec.MinReplicas {
monitor.Status.Operational = pointer.Bool(false)
}
// Update the WorkloadMonitor status in the cluster
if err := r.Status().Update(ctx, monitor); err != nil {
logger.Error(err, "Unable to update WorkloadMonitor status", "monitor", monitor.Name)
return ctrl.Result{}, err
}
// Return without requeue if we want purely event-driven reconciliations
return ctrl.Result{}, nil
}
// SetupWithManager registers our controller with the Manager and sets up watches.
func (r *WorkloadMonitorReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
// Watch WorkloadMonitor objects
For(&cozyv1alpha1.WorkloadMonitor{}).
// Also watch Pod objects and map them back to WorkloadMonitor if labels match
Watches(
&corev1.Pod{},
handler.EnqueueRequestsFromMapFunc(mapObjectToMonitor(&corev1.Pod{}, r.Client)),
).
// Watch PVCs as well
Watches(
&corev1.PersistentVolumeClaim{},
handler.EnqueueRequestsFromMapFunc(mapObjectToMonitor(&corev1.PersistentVolumeClaim{}, r.Client)),
).
// Watch for changes to Workload objects we create (owned by WorkloadMonitor)
Owns(&cozyv1alpha1.Workload{}).
Complete(r)
}
func mapObjectToMonitor[T client.Object](_ T, c client.Client) func(ctx context.Context, obj client.Object) []reconcile.Request {
return func(ctx context.Context, obj client.Object) []reconcile.Request {
concrete, ok := obj.(T)
if !ok {
return nil
}
var monitorList cozyv1alpha1.WorkloadMonitorList
// List all WorkloadMonitors in the same namespace
if err := c.List(ctx, &monitorList, client.InNamespace(concrete.GetNamespace())); err != nil {
return nil
}
labels := concrete.GetLabels()
// Match each monitor's selector with the Pod's labels
var requests []reconcile.Request
for _, m := range monitorList.Items {
matches := true
for k, v := range m.Spec.Selector {
if labelVal, exists := labels[k]; !exists || labelVal != v {
matches = false
break
}
}
if matches {
requests = append(requests, reconcile.Request{
NamespacedName: types.NamespacedName{
Namespace: m.Namespace,
Name: m.Name,
},
})
}
}
return requests
}
}

View File

@@ -1,292 +0,0 @@
package telemetry
import (
"bytes"
"context"
"fmt"
"net/http"
"strings"
"time"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/discovery"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
)
// Collector handles telemetry data collection and sending
type Collector struct {
client client.Client
discoveryClient discovery.DiscoveryInterface
config *Config
ticker *time.Ticker
stopCh chan struct{}
}
// NewCollector creates a new telemetry collector
func NewCollector(client client.Client, config *Config, kubeConfig *rest.Config) (*Collector, error) {
discoveryClient, err := discovery.NewDiscoveryClientForConfig(kubeConfig)
if err != nil {
return nil, fmt.Errorf("failed to create discovery client: %w", err)
}
return &Collector{
client: client,
discoveryClient: discoveryClient,
config: config,
}, nil
}
// Start implements manager.Runnable
func (c *Collector) Start(ctx context.Context) error {
if c.config.Disabled {
return nil
}
c.ticker = time.NewTicker(c.config.Interval)
c.stopCh = make(chan struct{})
// Initial collection
c.collect(ctx)
for {
select {
case <-ctx.Done():
c.ticker.Stop()
close(c.stopCh)
return nil
case <-c.ticker.C:
c.collect(ctx)
}
}
}
// NeedLeaderElection implements manager.LeaderElectionRunnable
func (c *Collector) NeedLeaderElection() bool {
// Only run telemetry collector on the leader
return true
}
// Stop halts telemetry collection
func (c *Collector) Stop() {
close(c.stopCh)
}
// getSizeGroup returns the exponential size group for PVC
func getSizeGroup(size resource.Quantity) string {
gb := size.Value() / (1024 * 1024 * 1024)
switch {
case gb <= 1:
return "1Gi"
case gb <= 5:
return "5Gi"
case gb <= 10:
return "10Gi"
case gb <= 25:
return "25Gi"
case gb <= 50:
return "50Gi"
case gb <= 100:
return "100Gi"
case gb <= 250:
return "250Gi"
case gb <= 500:
return "500Gi"
case gb <= 1024:
return "1Ti"
case gb <= 2048:
return "2Ti"
case gb <= 5120:
return "5Ti"
default:
return "10Ti"
}
}
// collect gathers and sends telemetry data
func (c *Collector) collect(ctx context.Context) {
logger := log.FromContext(ctx).V(1)
// Get cluster ID from kube-system namespace
var kubeSystemNS corev1.Namespace
if err := c.client.Get(ctx, types.NamespacedName{Name: "kube-system"}, &kubeSystemNS); err != nil {
logger.Info(fmt.Sprintf("Failed to get kube-system namespace: %v", err))
return
}
clusterID := string(kubeSystemNS.UID)
var cozystackCM corev1.ConfigMap
if err := c.client.Get(ctx, types.NamespacedName{Namespace: "cozy-system", Name: "cozystack"}, &cozystackCM); err != nil {
logger.Info(fmt.Sprintf("Failed to get cozystack configmap in cozy-system namespace: %v", err))
return
}
oidcEnabled := cozystackCM.Data["oidc-enabled"]
bundle := cozystackCM.Data["bundle-name"]
bundleEnable := cozystackCM.Data["bundle-enable"]
bundleDisable := cozystackCM.Data["bundle-disable"]
// Get Kubernetes version from nodes
var nodeList corev1.NodeList
if err := c.client.List(ctx, &nodeList); err != nil {
logger.Info(fmt.Sprintf("Failed to list nodes: %v", err))
return
}
// Create metrics buffer
var metrics strings.Builder
// Add Cozystack info metric
if len(nodeList.Items) > 0 {
k8sVersion, _ := c.discoveryClient.ServerVersion()
metrics.WriteString(fmt.Sprintf(
"cozy_cluster_info{cozystack_version=\"%s\",kubernetes_version=\"%s\",oidc_enabled=\"%s\",bundle_name=\"%s\",bunde_enable=\"%s\",bunde_disable=\"%s\"} 1\n",
c.config.CozystackVersion,
k8sVersion,
oidcEnabled,
bundle,
bundleEnable,
bundleDisable,
))
}
// Collect node metrics
nodeOSCount := make(map[string]int)
for _, node := range nodeList.Items {
key := fmt.Sprintf("%s (%s)", node.Status.NodeInfo.OperatingSystem, node.Status.NodeInfo.OSImage)
nodeOSCount[key] = nodeOSCount[key] + 1
}
for osKey, count := range nodeOSCount {
metrics.WriteString(fmt.Sprintf(
"cozy_nodes_count{os=\"%s\",kernel=\"%s\"} %d\n",
osKey,
nodeList.Items[0].Status.NodeInfo.KernelVersion,
count,
))
}
// Collect LoadBalancer services metrics
var serviceList corev1.ServiceList
if err := c.client.List(ctx, &serviceList); err != nil {
logger.Info(fmt.Sprintf("Failed to list Services: %v", err))
} else {
lbCount := 0
for _, svc := range serviceList.Items {
if svc.Spec.Type == corev1.ServiceTypeLoadBalancer {
lbCount++
}
}
metrics.WriteString(fmt.Sprintf("cozy_loadbalancers_count %d\n", lbCount))
}
// Count tenant namespaces
var nsList corev1.NamespaceList
if err := c.client.List(ctx, &nsList); err != nil {
logger.Info(fmt.Sprintf("Failed to list Namespaces: %v", err))
} else {
tenantCount := 0
for _, ns := range nsList.Items {
if strings.HasPrefix(ns.Name, "tenant-") {
tenantCount++
}
}
metrics.WriteString(fmt.Sprintf("cozy_tenants_count %d\n", tenantCount))
}
// Collect PV metrics grouped by driver and size
var pvList corev1.PersistentVolumeList
if err := c.client.List(ctx, &pvList); err != nil {
logger.Info(fmt.Sprintf("Failed to list PVs: %v", err))
} else {
// Map to store counts by size and driver
pvMetrics := make(map[string]map[string]int)
for _, pv := range pvList.Items {
if capacity, ok := pv.Spec.Capacity[corev1.ResourceStorage]; ok {
sizeGroup := getSizeGroup(capacity)
// Get the CSI driver name
driver := "unknown"
if pv.Spec.CSI != nil {
driver = pv.Spec.CSI.Driver
} else if pv.Spec.HostPath != nil {
driver = "hostpath"
} else if pv.Spec.NFS != nil {
driver = "nfs"
}
// Initialize nested map if needed
if _, exists := pvMetrics[sizeGroup]; !exists {
pvMetrics[sizeGroup] = make(map[string]int)
}
// Increment count for this size/driver combination
pvMetrics[sizeGroup][driver]++
}
}
// Write metrics
for size, drivers := range pvMetrics {
for driver, count := range drivers {
metrics.WriteString(fmt.Sprintf(
"cozy_pvs_count{driver=\"%s\",size=\"%s\"} %d\n",
driver,
size,
count,
))
}
}
}
// Collect workload metrics
var monitorList cozyv1alpha1.WorkloadMonitorList
if err := c.client.List(ctx, &monitorList); err != nil {
logger.Info(fmt.Sprintf("Failed to list WorkloadMonitors: %v", err))
return
}
for _, monitor := range monitorList.Items {
metrics.WriteString(fmt.Sprintf(
"cozy_workloads_count{uid=\"%s\",kind=\"%s\",type=\"%s\",version=\"%s\"} %d\n",
monitor.UID,
monitor.Spec.Kind,
monitor.Spec.Type,
monitor.Spec.Version,
monitor.Status.ObservedReplicas,
))
}
// Send metrics
if err := c.sendMetrics(clusterID, metrics.String()); err != nil {
logger.Info(fmt.Sprintf("Failed to send metrics: %v", err))
}
}
// sendMetrics sends collected metrics to the configured endpoint
func (c *Collector) sendMetrics(clusterID, metrics string) error {
req, err := http.NewRequest("POST", c.config.Endpoint, bytes.NewBufferString(metrics))
if err != nil {
return fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Content-Type", "text/plain")
req.Header.Set("X-Cluster-ID", clusterID)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return fmt.Errorf("failed to send request: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("unexpected status code: %d", resp.StatusCode)
}
return nil
}

View File

@@ -1,27 +0,0 @@
package telemetry
import (
"time"
)
// Config holds telemetry configuration
type Config struct {
// Disable telemetry collection if set to true
Disabled bool
// Endpoint to send telemetry data to
Endpoint string
// Interval between telemetry data collection
Interval time.Duration
// CozystackVersion represents the current version of Cozystack
CozystackVersion string
}
// DefaultConfig returns default telemetry configuration
func DefaultConfig() *Config {
return &Config{
Disabled: false,
Endpoint: "https://telemetry.cozystack.io",
Interval: 15 * time.Minute,
CozystackVersion: "unknown",
}
}

View File

@@ -0,0 +1,105 @@
---
# Source: cozy-installer/templates/cozystack.yaml
apiVersion: v1
kind: Namespace
metadata:
name: cozy-system
labels:
pod-security.kubernetes.io/enforce: privileged
---
# Source: cozy-installer/templates/cozystack.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: cozystack
namespace: cozy-system
---
# Source: cozy-installer/templates/cozystack.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cozystack
subjects:
- kind: ServiceAccount
name: cozystack
namespace: cozy-system
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
# Source: cozy-installer/templates/cozystack.yaml
apiVersion: v1
kind: Service
metadata:
name: cozystack
namespace: cozy-system
spec:
ports:
- name: http
port: 80
targetPort: 8123
selector:
app: cozystack
type: ClusterIP
---
# Source: cozy-installer/templates/cozystack.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: cozystack
namespace: cozy-system
spec:
replicas: 1
selector:
matchLabels:
app: cozystack
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
template:
metadata:
labels:
app: cozystack
spec:
hostNetwork: true
serviceAccountName: cozystack
containers:
- name: cozystack
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.20.2"
env:
- name: KUBERNETES_SERVICE_HOST
value: localhost
- name: KUBERNETES_SERVICE_PORT
value: "7445"
- name: K8S_AWAIT_ELECTION_ENABLED
value: "1"
- name: K8S_AWAIT_ELECTION_NAME
value: cozystack
- name: K8S_AWAIT_ELECTION_LOCK_NAME
value: cozystack
- name: K8S_AWAIT_ELECTION_LOCK_NAMESPACE
value: cozy-system
- name: K8S_AWAIT_ELECTION_IDENTITY
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: darkhttpd
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.20.2"
command:
- /usr/bin/darkhttpd
- /cozystack/assets
- --port
- "8123"
ports:
- name: http
containerPort: 8123
tolerations:
- key: "node.kubernetes.io/not-ready"
operator: "Exists"
effect: "NoSchedule"
- key: "node.cilium.io/agent-not-ready"
operator: "Exists"
effect: "NoSchedule"

View File

@@ -16,7 +16,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.7.0
version: 0.6.1
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to

View File

@@ -14,7 +14,6 @@ image:
--cache-to type=inline \
--metadata-file images/clickhouse-backup.json \
--push=$(PUSH) \
--label "org.opencontainers.image.source=https://github.com/cozystack/cozystack" \
--load=$(LOAD)
echo "$(REGISTRY)/clickhouse-backup:$(call settag,$(CLICKHOUSE_BACKUP_TAG))@$$(yq e '."containerimage.digest"' images/clickhouse-backup.json -o json -r)" \
> images/clickhouse-backup.tag

View File

@@ -36,15 +36,13 @@ more details:
### Backup parameters
| Name | Description | Value |
| ------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ |
| `backup.enabled` | Enable pereiodic backups | `false` |
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/clickhouse-backups` |
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
| `backup.cleanupStrategy` | The strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
| `resources` | Resources | `{}` |
| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
| Name | Description | Value |
| ------------------------ | ---------------------------------------------- | ------------------------------------------------------ |
| `backup.enabled` | Enable pereiodic backups | `false` |
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/clickhouse-backups` |
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
| `backup.cleanupStrategy` | The strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/clickhouse-backup:0.7.0@sha256:3faf7a4cebf390b9053763107482de175aa0fdb88c1e77424fd81100b1c3a205
ghcr.io/aenix-io/cozystack/clickhouse-backup:0.6.1@sha256:dda84420cb8648721299221268a00d72a05c7af5b7fb452619bac727068b9e61

View File

@@ -1,50 +0,0 @@
{{/*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{/* vim: set filetype=mustache: */}}
{{/*
Return a resource request/limit object based on a given preset.
These presets are for basic testing and not meant to be used in production
{{ include "resources.preset" (dict "type" "nano") -}}
*/}}
{{- define "resources.preset" -}}
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
{{- $presets := dict
"nano" (dict
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
)
"micro" (dict
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
)
"small" (dict
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
)
"medium" (dict
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
)
"large" (dict
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
)
"xlarge" (dict
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
)
"2xlarge" (dict
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
)
}}
{{- if hasKey $presets .type -}}
{{- index $presets .type | toYaml -}}
{{- else -}}
{{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}}
{{- end -}}
{{- end -}}

View File

@@ -121,11 +121,6 @@ spec:
containers:
- name: clickhouse
image: clickhouse/clickhouse-server:24.9.2.42
{{- if .Values.resources }}
resources: {{- toYaml .Values.resources | nindent 16 }}
{{- else if ne .Values.resourcesPreset "none" }}
resources: {{- include "resources.preset" (dict "type" .Values.resourcesPreset "Release" .Release) | nindent 16 }}
{{- end }}
volumeMounts:
- name: data-volume-template
mountPath: /var/lib/clickhouse

View File

@@ -17,10 +17,3 @@ rules:
resourceNames:
- {{ .Release.Name }}-credentials
verbs: ["get", "list", "watch"]
- apiGroups:
- cozystack.io
resources:
- workloadmonitors
resourceNames:
- {{ .Release.Name }}
verbs: ["get", "list", "watch"]

View File

@@ -1,13 +0,0 @@
---
apiVersion: cozystack.io/v1alpha1
kind: WorkloadMonitor
metadata:
name: {{ $.Release.Name }}
spec:
replicas: {{ .Values.replicas }}
minReplicas: 1
kind: clickhouse
type: clickhouse
selector:
clickhouse.altinity.com/chi: {{ $.Release.Name }}
version: {{ $.Chart.Version }}

View File

@@ -76,16 +76,6 @@
"default": "ChaXoveekoh6eigh4siesheeda2quai0"
}
}
},
"resources": {
"type": "object",
"description": "Resources",
"default": {}
},
"resourcesPreset": {
"type": "string",
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
"default": "nano"
}
}
}

View File

@@ -46,16 +46,3 @@ backup:
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
## @param resources Resources
resources: {}
# resources:
# limits:
# cpu: 4000m
# memory: 4Gi
# requests:
# cpu: 100m
# memory: 512Mi
## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
resourcesPreset: "nano"

View File

@@ -16,7 +16,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.5.0
version: 0.4.1
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to

View File

@@ -21,17 +21,15 @@
### Backup parameters
| Name | Description | Value |
| ------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ |
| `backup.enabled` | Enable pereiodic backups | `false` |
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/postgres-backups` |
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
| `backup.cleanupStrategy` | The strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
| `resources` | Resources | `{}` |
| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
| Name | Description | Value |
| ------------------------ | ---------------------------------------------- | ------------------------------------------------------ |
| `backup.enabled` | Enable pereiodic backups | `false` |
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/postgres-backups` |
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
| `backup.cleanupStrategy` | The strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/postgres-backup:0.10.0@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f
ghcr.io/aenix-io/cozystack/postgres-backup:0.7.1@sha256:406d2c5a30fa8b6fe10eab3cba45c06fea3876e81fd123ead6dc3c19347762d0

View File

@@ -1,50 +0,0 @@
{{/*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{/* vim: set filetype=mustache: */}}
{{/*
Return a resource request/limit object based on a given preset.
These presets are for basic testing and not meant to be used in production
{{ include "resources.preset" (dict "type" "nano") -}}
*/}}
{{- define "resources.preset" -}}
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
{{- $presets := dict
"nano" (dict
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
)
"micro" (dict
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
)
"small" (dict
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
)
"medium" (dict
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
)
"large" (dict
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
)
"xlarge" (dict
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
)
"2xlarge" (dict
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
)
}}
{{- if hasKey $presets .type -}}
{{- index $presets .type | toYaml -}}
{{- else -}}
{{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}}
{{- end -}}
{{- end -}}

View File

@@ -17,10 +17,3 @@ rules:
resourceNames:
- {{ .Release.Name }}-credentials
verbs: ["get", "list", "watch"]
- apiGroups:
- cozystack.io
resources:
- workloadmonitors
resourceNames:
- {{ .Release.Name }}
verbs: ["get", "list", "watch"]

View File

@@ -6,20 +6,10 @@ metadata:
spec:
instances: {{ .Values.replicas }}
enableSuperuserAccess: true
{{- $configMap := lookup "v1" "ConfigMap" "cozy-system" "cozystack-scheduling" }}
{{- if $configMap }}
{{- $rawConstraints := get $configMap.data "globalAppTopologySpreadConstraints" }}
{{- if $rawConstraints }}
{{- $rawConstraints | fromYaml | toYaml | nindent 2 }}
{{- end }}
{{- end }}
minSyncReplicas: {{ .Values.quorum.minSyncReplicas }}
maxSyncReplicas: {{ .Values.quorum.maxSyncReplicas }}
{{- if .Values.resources }}
resources: {{- toYaml .Values.resources | nindent 4 }}
{{- else if ne .Values.resourcesPreset "none" }}
resources: {{- include "resources.preset" (dict "type" .Values.resourcesPreset "Release" .Release) | nindent 4 }}
{{- end }}
monitoring:
enablePodMonitor: true

View File

@@ -1,13 +0,0 @@
---
apiVersion: cozystack.io/v1alpha1
kind: WorkloadMonitor
metadata:
name: {{ $.Release.Name }}
spec:
replicas: {{ .Values.replicas }}
minReplicas: 1
kind: ferretdb
type: ferretdb
selector:
app: {{ $.Release.Name }}
version: {{ $.Chart.Version }}

View File

@@ -81,16 +81,6 @@
"default": "ChaXoveekoh6eigh4siesheeda2quai0"
}
}
},
"resources": {
"type": "object",
"description": "Resources",
"default": {}
},
"resourcesPreset": {
"type": "string",
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
"default": "nano"
}
}
}

View File

@@ -48,16 +48,3 @@ backup:
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
## @param resources Resources
resources: {}
# resources:
# limits:
# cpu: 4000m
# memory: 4Gi
# requests:
# cpu: 100m
# memory: 512Mi
## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
resourcesPreset: "nano"

View File

@@ -16,7 +16,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.4.0
version: 0.3.1
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to

View File

@@ -13,7 +13,6 @@ image-nginx:
--cache-to type=inline \
--metadata-file images/nginx-cache.json \
--push=$(PUSH) \
--label "org.opencontainers.image.source=https://github.com/cozystack/cozystack" \
--load=$(LOAD)
echo "$(REGISTRY)/nginx-cache:$(call settag,$(NGINX_CACHE_TAG))@$$(yq e '."containerimage.digest"' images/nginx-cache.json -o json -r)" \
> images/nginx-cache.tag

View File

@@ -60,17 +60,13 @@ VTS module shows wrong upstream resonse time
### Common parameters
| Name | Description | Value |
| ------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| `external` | Enable external access from outside the cluster | `false` |
| `size` | Persistent Volume size | `10Gi` |
| `storageClass` | StorageClass used to store the data | `""` |
| `haproxy.replicas` | Number of HAProxy replicas | `2` |
| `nginx.replicas` | Number of Nginx replicas | `2` |
| `haproxy.resources` | Resources | `{}` |
| `haproxy.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
| `nginx.resources` | Resources | `{}` |
| `nginx.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
| Name | Description | Value |
| ------------------ | ----------------------------------------------- | ------- |
| `external` | Enable external access from outside the cluster | `false` |
| `size` | Persistent Volume size | `10Gi` |
| `storageClass` | StorageClass used to store the data | `""` |
| `haproxy.replicas` | Number of HAProxy replicas | `2` |
| `nginx.replicas` | Number of Nginx replicas | `2` |
### Configuration parameters

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/nginx-cache:0.4.0@sha256:bef7344da098c4dc400a9e20ffad10ac991df67d09a30026207454abbc91f28b
ghcr.io/aenix-io/cozystack/nginx-cache:0.3.1@sha256:27112d470a31725b75b29b29919af06b4ce1339e3b502b08889a92ab7099adde

View File

@@ -1,50 +0,0 @@
{{/*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{/* vim: set filetype=mustache: */}}
{{/*
Return a resource request/limit object based on a given preset.
These presets are for basic testing and not meant to be used in production
{{ include "resources.preset" (dict "type" "nano") -}}
*/}}
{{- define "resources.preset" -}}
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
{{- $presets := dict
"nano" (dict
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
)
"micro" (dict
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
)
"small" (dict
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
)
"medium" (dict
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
)
"large" (dict
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
)
"xlarge" (dict
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
)
"2xlarge" (dict
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
)
}}
{{- if hasKey $presets .type -}}
{{- index $presets .type | toYaml -}}
{{- else -}}
{{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}}
{{- end -}}
{{- end -}}

View File

@@ -33,11 +33,6 @@ spec:
containers:
- image: haproxy:latest
name: haproxy
{{- if .Values.haproxy.resources }}
resources: {{- toYaml .Values.haproxy.resources | nindent 10 }}
{{- else if ne .Values.haproxy.resourcesPreset "none" }}
resources: {{- include "resources.preset" (dict "type" .Values.haproxy.resourcesPreset "Release" .Release) | nindent 10 }}
{{- end }}
ports:
- containerPort: 8080
name: http

View File

@@ -52,11 +52,6 @@ spec:
shareProcessNamespace: true
containers:
- name: nginx
{{- if $.Values.nginx.resources }}
resources: {{- toYaml $.Values.nginx.resources | nindent 10 }}
{{- else if ne $.Values.nginx.resourcesPreset "none" }}
resources: {{- include "resources.preset" (dict "type" $.Values.nginx.resourcesPreset "Release" $.Release) | nindent 10 }}
{{- end }}
image: "{{ $.Files.Get "images/nginx-cache.tag" | trim }}"
readinessProbe:
httpGet:
@@ -88,13 +83,6 @@ spec:
- name: reloader
image: "{{ $.Files.Get "images/nginx-cache.tag" | trim }}"
command: ["/usr/bin/nginx-reloader.sh"]
resources:
limits:
cpu: 50m
memory: 50Mi
requests:
cpu: 50m
memory: 50Mi
#command: ["sleep", "infinity"]
volumeMounts:
- mountPath: /etc/nginx/nginx.conf

View File

@@ -24,16 +24,6 @@
"type": "number",
"description": "Number of HAProxy replicas",
"default": 2
},
"resources": {
"type": "object",
"description": "Resources",
"default": {}
},
"resourcesPreset": {
"type": "string",
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
"default": "nano"
}
}
},
@@ -44,16 +34,6 @@
"type": "number",
"description": "Number of Nginx replicas",
"default": 2
},
"resources": {
"type": "object",
"description": "Resources",
"default": {}
},
"resourcesPreset": {
"type": "string",
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
"default": "nano"
}
}
},

View File

@@ -12,32 +12,8 @@ size: 10Gi
storageClass: ""
haproxy:
replicas: 2
## @param haproxy.resources Resources
resources: {}
# resources:
# limits:
# cpu: 4000m
# memory: 4Gi
# requests:
# cpu: 100m
# memory: 512Mi
## @param haproxy.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
resourcesPreset: "nano"
nginx:
replicas: 2
## @param nginx.resources Resources
resources: {}
# resources:
# limits:
# cpu: 4000m
# memory: 4Gi
# requests:
# cpu: 100m
# memory: 512Mi
## @param nginx.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
resourcesPreset: "nano"
## @section Configuration parameters

View File

@@ -16,7 +16,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.5.0
version: 0.3.1
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to

View File

@@ -4,19 +4,15 @@
### Common parameters
| Name | Description | Value |
| --------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| `external` | Enable external access from outside the cluster | `false` |
| `kafka.size` | Persistent Volume size for Kafka | `10Gi` |
| `kafka.replicas` | Number of Kafka replicas | `3` |
| `kafka.storageClass` | StorageClass used to store the Kafka data | `""` |
| `zookeeper.size` | Persistent Volume size for ZooKeeper | `5Gi` |
| `zookeeper.replicas` | Number of ZooKeeper replicas | `3` |
| `zookeeper.storageClass` | StorageClass used to store the ZooKeeper data | `""` |
| `kafka.resources` | Resources | `{}` |
| `kafka.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
| `zookeeper.resources` | Resources | `{}` |
| `zookeeper.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
| Name | Description | Value |
| ------------------------ | ----------------------------------------------- | ------- |
| `external` | Enable external access from outside the cluster | `false` |
| `kafka.size` | Persistent Volume size for Kafka | `10Gi` |
| `kafka.replicas` | Number of Kafka replicas | `3` |
| `kafka.storageClass` | StorageClass used to store the Kafka data | `""` |
| `zookeeper.size` | Persistent Volume size for ZooKeeper | `5Gi` |
| `zookeeper.replicas` | Number of ZooKeeper replicas | `3` |
| `zookeeper.storageClass` | StorageClass used to store the ZooKeeper data | `""` |
### Configuration parameters

View File

@@ -1,50 +0,0 @@
{{/*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{/* vim: set filetype=mustache: */}}
{{/*
Return a resource request/limit object based on a given preset.
These presets are for basic testing and not meant to be used in production
{{ include "resources.preset" (dict "type" "nano") -}}
*/}}
{{- define "resources.preset" -}}
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
{{- $presets := dict
"nano" (dict
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
)
"micro" (dict
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
)
"small" (dict
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
)
"medium" (dict
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
)
"large" (dict
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
)
"xlarge" (dict
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
)
"2xlarge" (dict
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
)
}}
{{- if hasKey $presets .type -}}
{{- index $presets .type | toYaml -}}
{{- else -}}
{{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}}
{{- end -}}
{{- end -}}

View File

@@ -17,11 +17,3 @@ rules:
resourceNames:
- {{ .Release.Name }}-clients-ca
verbs: ["get", "list", "watch"]
- apiGroups:
- cozystack.io
resources:
- workloadmonitors
resourceNames:
- {{ .Release.Name }}
- {{ $.Release.Name }}-zookeeper
verbs: ["get", "list", "watch"]

View File

@@ -8,11 +8,6 @@ metadata:
spec:
kafka:
replicas: {{ .Values.kafka.replicas }}
{{- if .Values.kafka.resources }}
resources: {{- toYaml .Values.kafka.resources | nindent 6 }}
{{- else if ne .Values.kafka.resourcesPreset "none" }}
resources: {{- include "resources.preset" (dict "type" .Values.kafka.resourcesPreset "Release" .Release) | nindent 6 }}
{{- end }}
listeners:
- name: plain
port: 9092
@@ -62,19 +57,8 @@ spec:
class: {{ . }}
{{- end }}
deleteClaim: true
metricsConfig:
type: jmxPrometheusExporter
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-metrics
key: kafka-metrics-config.yml
zookeeper:
replicas: {{ .Values.zookeeper.replicas }}
{{- if .Values.zookeeper.resources }}
resources: {{- toYaml .Values.zookeeper.resources | nindent 6 }}
{{- else if ne .Values.zookeeper.resourcesPreset "none" }}
resources: {{- include "resources.preset" (dict "type" .Values.zookeeper.resourcesPreset "Release" .Release) | nindent 6 }}
{{- end }}
storage:
type: persistent-claim
{{- with .Values.zookeeper.size }}
@@ -84,12 +68,6 @@ spec:
class: {{ . }}
{{- end }}
deleteClaim: false
metricsConfig:
type: jmxPrometheusExporter
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-metrics
key: kafka-metrics-config.yml
entityOperator:
topicOperator: {}
userOperator: {}

View File

@@ -1,198 +0,0 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: {{ .Release.Name }}-metrics
data:
kafka-metrics-config.yml: |
# See https://github.com/prometheus/jmx_exporter for more info about JMX Prometheus Exporter metrics
lowercaseOutputName: true
rules:
# Special cases and very specific rules
- pattern: kafka.server<type=(.+), name=(.+), clientId=(.+), topic=(.+), partition=(.*)><>Value
name: kafka_server_$1_$2
type: GAUGE
labels:
clientId: "$3"
topic: "$4"
partition: "$5"
- pattern: kafka.server<type=(.+), name=(.+), clientId=(.+), brokerHost=(.+), brokerPort=(.+)><>Value
name: kafka_server_$1_$2
type: GAUGE
labels:
clientId: "$3"
broker: "$4:$5"
- pattern: kafka.server<type=(.+), cipher=(.+), protocol=(.+), listener=(.+), networkProcessor=(.+)><>connections
name: kafka_server_$1_connections_tls_info
type: GAUGE
labels:
cipher: "$2"
protocol: "$3"
listener: "$4"
networkProcessor: "$5"
- pattern: kafka.server<type=(.+), clientSoftwareName=(.+), clientSoftwareVersion=(.+), listener=(.+), networkProcessor=(.+)><>connections
name: kafka_server_$1_connections_software
type: GAUGE
labels:
clientSoftwareName: "$2"
clientSoftwareVersion: "$3"
listener: "$4"
networkProcessor: "$5"
- pattern: "kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+-total):"
name: kafka_server_$1_$4
type: COUNTER
labels:
listener: "$2"
networkProcessor: "$3"
- pattern: "kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+):"
name: kafka_server_$1_$4
type: GAUGE
labels:
listener: "$2"
networkProcessor: "$3"
- pattern: kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+-total)
name: kafka_server_$1_$4
type: COUNTER
labels:
listener: "$2"
networkProcessor: "$3"
- pattern: kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+)
name: kafka_server_$1_$4
type: GAUGE
labels:
listener: "$2"
networkProcessor: "$3"
# Some percent metrics use MeanRate attribute
# Ex) kafka.server<type=(KafkaRequestHandlerPool), name=(RequestHandlerAvgIdlePercent)><>MeanRate
- pattern: kafka.(\w+)<type=(.+), name=(.+)Percent\w*><>MeanRate
name: kafka_$1_$2_$3_percent
type: GAUGE
# Generic gauges for percents
- pattern: kafka.(\w+)<type=(.+), name=(.+)Percent\w*><>Value
name: kafka_$1_$2_$3_percent
type: GAUGE
- pattern: kafka.(\w+)<type=(.+), name=(.+)Percent\w*, (.+)=(.+)><>Value
name: kafka_$1_$2_$3_percent
type: GAUGE
labels:
"$4": "$5"
# Generic per-second counters with 0-2 key/value pairs
- pattern: kafka.(\w+)<type=(.+), name=(.+)PerSec\w*, (.+)=(.+), (.+)=(.+)><>Count
name: kafka_$1_$2_$3_total
type: COUNTER
labels:
"$4": "$5"
"$6": "$7"
- pattern: kafka.(\w+)<type=(.+), name=(.+)PerSec\w*, (.+)=(.+)><>Count
name: kafka_$1_$2_$3_total
type: COUNTER
labels:
"$4": "$5"
- pattern: kafka.(\w+)<type=(.+), name=(.+)PerSec\w*><>Count
name: kafka_$1_$2_$3_total
type: COUNTER
# Generic gauges with 0-2 key/value pairs
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+), (.+)=(.+)><>Value
name: kafka_$1_$2_$3
type: GAUGE
labels:
"$4": "$5"
"$6": "$7"
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+)><>Value
name: kafka_$1_$2_$3
type: GAUGE
labels:
"$4": "$5"
- pattern: kafka.(\w+)<type=(.+), name=(.+)><>Value
name: kafka_$1_$2_$3
type: GAUGE
# Emulate Prometheus 'Summary' metrics for the exported 'Histogram's.
# Note that these are missing the '_sum' metric!
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+), (.+)=(.+)><>Count
name: kafka_$1_$2_$3_count
type: COUNTER
labels:
"$4": "$5"
"$6": "$7"
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.*), (.+)=(.+)><>(\d+)thPercentile
name: kafka_$1_$2_$3
type: GAUGE
labels:
"$4": "$5"
"$6": "$7"
quantile: "0.$8"
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+)><>Count
name: kafka_$1_$2_$3_count
type: COUNTER
labels:
"$4": "$5"
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.*)><>(\d+)thPercentile
name: kafka_$1_$2_$3
type: GAUGE
labels:
"$4": "$5"
quantile: "0.$6"
- pattern: kafka.(\w+)<type=(.+), name=(.+)><>Count
name: kafka_$1_$2_$3_count
type: COUNTER
- pattern: kafka.(\w+)<type=(.+), name=(.+)><>(\d+)thPercentile
name: kafka_$1_$2_$3
type: GAUGE
labels:
quantile: "0.$4"
# KRaft overall related metrics
# distinguish between always increasing COUNTER (total and max) and variable GAUGE (all others) metrics
- pattern: "kafka.server<type=raft-metrics><>(.+-total|.+-max):"
name: kafka_server_raftmetrics_$1
type: COUNTER
- pattern: "kafka.server<type=raft-metrics><>(current-state): (.+)"
name: kafka_server_raftmetrics_$1
value: 1
type: UNTYPED
labels:
$1: "$2"
- pattern: "kafka.server<type=raft-metrics><>(.+):"
name: kafka_server_raftmetrics_$1
type: GAUGE
# KRaft "low level" channels related metrics
# distinguish between always increasing COUNTER (total and max) and variable GAUGE (all others) metrics
- pattern: "kafka.server<type=raft-channel-metrics><>(.+-total|.+-max):"
name: kafka_server_raftchannelmetrics_$1
type: COUNTER
- pattern: "kafka.server<type=raft-channel-metrics><>(.+):"
name: kafka_server_raftchannelmetrics_$1
type: GAUGE
# Broker metrics related to fetching metadata topic records in KRaft mode
- pattern: "kafka.server<type=broker-metadata-metrics><>(.+):"
name: kafka_server_brokermetadatametrics_$1
type: GAUGE
zookeeper-metrics-config.yml: |
# See https://github.com/prometheus/jmx_exporter for more info about JMX Prometheus Exporter metrics
lowercaseOutputName: true
rules:
# replicated Zookeeper
- pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+)><>(\\w+)"
name: "zookeeper_$2"
type: GAUGE
- pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+)><>(\\w+)"
name: "zookeeper_$3"
type: GAUGE
labels:
replicaId: "$2"
- pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+), name2=(\\w+)><>(Packets\\w+)"
name: "zookeeper_$4"
type: COUNTER
labels:
replicaId: "$2"
memberType: "$3"
- pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+), name2=(\\w+)><>(\\w+)"
name: "zookeeper_$4"
type: GAUGE
labels:
replicaId: "$2"
memberType: "$3"
- pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+), name2=(\\w+), name3=(\\w+)><>(\\w+)"
name: "zookeeper_$4_$5"
type: GAUGE
labels:
replicaId: "$2"
memberType: "$3"

View File

@@ -1,40 +0,0 @@
apiVersion: operator.victoriametrics.com/v1beta1
kind: VMPodScrape
metadata:
name: {{ .Release.Name }}
spec:
podMetricsEndpoints:
- port: tcp-prometheus
scheme: http
relabelConfigs:
- separator: ;
regex: __meta_kubernetes_pod_label_(strimzi_io_.+)
replacement: $1
action: labelmap
- sourceLabels: [__meta_kubernetes_namespace]
separator: ;
regex: (.*)
targetLabel: namespace
replacement: $1
action: replace
- sourceLabels: [__meta_kubernetes_pod_name]
separator: ;
regex: (.*)
targetLabel: pod
replacement: $1
action: replace
- sourceLabels: [__meta_kubernetes_pod_node_name]
separator: ;
regex: (.*)
targetLabel: node
replacement: $1
action: replace
- sourceLabels: [__meta_kubernetes_pod_host_ip]
separator: ;
regex: (.*)
targetLabel: node_ip
replacement: $1
action: replace
selector:
matchLabels:
app.kubernetes.io/instance: {{ .Release.Name }}

View File

@@ -1,30 +0,0 @@
---
apiVersion: cozystack.io/v1alpha1
kind: WorkloadMonitor
metadata:
name: {{ $.Release.Name }}
spec:
replicas: {{ .Values.replicas }}
minReplicas: 1
kind: kafka
type: kafka
selector:
app.kubernetes.io/instance: {{ $.Release.Name }}
app.kubernetes.io/name: kafka
version: {{ $.Chart.Version }}
---
apiVersion: cozystack.io/v1alpha1
kind: WorkloadMonitor
metadata:
name: {{ $.Release.Name }}-zookeeper
spec:
replicas: {{ .Values.replicas }}
minReplicas: 1
kind: kafka
type: zookeeper
selector:
app.kubernetes.io/instance: {{ $.Release.Name }}
app.kubernetes.io/name: zookeeper
version: {{ $.Chart.Version }}

View File

@@ -24,16 +24,6 @@
"type": "string",
"description": "StorageClass used to store the Kafka data",
"default": ""
},
"resources": {
"type": "object",
"description": "Resources",
"default": {}
},
"resourcesPreset": {
"type": "string",
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
"default": "nano"
}
}
},
@@ -54,16 +44,6 @@
"type": "string",
"description": "StorageClass used to store the ZooKeeper data",
"default": ""
},
"resources": {
"type": "object",
"description": "Resources",
"default": {}
},
"resourcesPreset": {
"type": "string",
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
"default": "nano"
}
}
},

View File

@@ -14,35 +14,10 @@ kafka:
size: 10Gi
replicas: 3
storageClass: ""
## @param kafka.resources Resources
resources: {}
# resources:
# limits:
# cpu: 4000m
# memory: 4Gi
# requests:
# cpu: 100m
# memory: 512Mi
## @param kafka.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
resourcesPreset: "nano"
zookeeper:
size: 5Gi
replicas: 3
storageClass: ""
## @param zookeeper.resources Resources
resources: {}
# resources:
# limits:
# cpu: 4000m
# memory: 4Gi
# requests:
# cpu: 100m
# memory: 512Mi
## @param zookeeper.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
resourcesPreset: "nano"
## @section Configuration parameters

View File

@@ -16,7 +16,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.18.0
version: 0.14.1
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to

View File

@@ -18,7 +18,6 @@ image-ubuntu-container-disk:
--cache-to type=inline \
--metadata-file images/ubuntu-container-disk.json \
--push=$(PUSH) \
--label "org.opencontainers.image.source=https://github.com/cozystack/cozystack" \
--load=$(LOAD)
echo "$(REGISTRY)/ubuntu-container-disk:$(call settag,$(UBUNTU_CONTAINER_DISK_TAG))@$$(yq e '."containerimage.digest"' images/ubuntu-container-disk.json -o json -r)" \
> images/ubuntu-container-disk.tag
@@ -33,7 +32,6 @@ image-kubevirt-cloud-provider:
--cache-to type=inline \
--metadata-file images/kubevirt-cloud-provider.json \
--push=$(PUSH) \
--label "org.opencontainers.image.source=https://github.com/cozystack/cozystack" \
--load=$(LOAD)
echo "$(REGISTRY)/kubevirt-cloud-provider:$(call settag,$(KUBERNETES_PKG_TAG))@$$(yq e '."containerimage.digest"' images/kubevirt-cloud-provider.json -o json -r)" \
> images/kubevirt-cloud-provider.tag
@@ -48,7 +46,6 @@ image-kubevirt-csi-driver:
--cache-to type=inline \
--metadata-file images/kubevirt-csi-driver.json \
--push=$(PUSH) \
--label "org.opencontainers.image.source=https://github.com/cozystack/cozystack" \
--load=$(LOAD)
echo "$(REGISTRY)/kubevirt-csi-driver:$(call settag,$(KUBERNETES_PKG_TAG))@$$(yq e '."containerimage.digest"' images/kubevirt-csi-driver.json -o json -r)" \
> images/kubevirt-csi-driver.tag
@@ -64,7 +61,6 @@ image-cluster-autoscaler:
--cache-to type=inline \
--metadata-file images/cluster-autoscaler.json \
--push=$(PUSH) \
--label "org.opencontainers.image.source=https://github.com/cozystack/cozystack" \
--load=$(LOAD)
echo "$(REGISTRY)/cluster-autoscaler:$(call settag,$(KUBERNETES_PKG_TAG))@$$(yq e '."containerimage.digest"' images/cluster-autoscaler.json -o json -r)" \
> images/cluster-autoscaler.tag

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.18.0@sha256:85371c6aabf5a7fea2214556deac930c600e362f92673464fe2443784e2869c3
ghcr.io/aenix-io/cozystack/cluster-autoscaler:0.14.1@sha256:b63293bc295e8c04574900bb711ebfe51db6774beb6bc3a58791562ec11b406b

View File

@@ -1,14 +1,12 @@
# Source: https://raw.githubusercontent.com/kubernetes/autoscaler/refs/heads/master/cluster-autoscaler/Dockerfile.amd64
ARG builder_image=docker.io/library/golang:1.23.4
ARG builder_image=docker.io/library/golang:1.22.5
ARG BASEIMAGE=gcr.io/distroless/static:nonroot-amd64
FROM ${builder_image} AS builder
RUN git clone https://github.com/kubernetes/autoscaler /src/autoscaler \
&& cd /src/autoscaler/cluster-autoscaler \
&& git checkout cluster-autoscaler-1.32.0
&& git checkout cluster-autoscaler-1.31.0
WORKDIR /src/autoscaler/cluster-autoscaler
COPY fix-downscale.diff /fix-downscale.diff
RUN git apply /fix-downscale.diff
RUN make build
FROM $BASEIMAGE

View File

@@ -1,13 +0,0 @@
diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured.go
index 4eec0e4bf..f28fd9241 100644
--- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured.go
+++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured.go
@@ -106,8 +106,6 @@ func (r unstructuredScalableResource) Replicas() (int, error) {
func (r unstructuredScalableResource) SetSize(nreplicas int) error {
switch {
- case nreplicas > r.maxSize:
- return fmt.Errorf("size increase too large - desired:%d max:%d", nreplicas, r.maxSize)
case nreplicas < r.minSize:
return fmt.Errorf("size decrease too large - desired:%d min:%d", nreplicas, r.minSize)
}

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.18.0@sha256:795d8e1ef4b2b0df2aa1e09d96cd13476ebb545b4bf4b5779b7547a70ef64cf9
ghcr.io/aenix-io/cozystack/kubevirt-cloud-provider:0.14.1@sha256:c0561a342e6b55d066f3363182f442e8fa30a0b6b448d89d15a1a855c999b98e

View File

@@ -3,11 +3,12 @@ FROM --platform=linux/amd64 golang:1.20.6 AS builder
RUN git clone https://github.com/kubevirt/cloud-provider-kubevirt /go/src/kubevirt.io/cloud-provider-kubevirt \
&& cd /go/src/kubevirt.io/cloud-provider-kubevirt \
&& git checkout 443a1fe
&& git checkout da9e0cf
WORKDIR /go/src/kubevirt.io/cloud-provider-kubevirt
# see: https://github.com/kubevirt/cloud-provider-kubevirt/pull/335
# see: https://github.com/kubevirt/cloud-provider-kubevirt/pull/336
ADD patches /patches
RUN git apply /patches/*.diff
RUN go get 'k8s.io/endpointslice/util@v0.28' 'k8s.io/apiserver@v0.28'

View File

@@ -0,0 +1,20 @@
diff --git a/pkg/controller/kubevirteps/kubevirteps_controller.go b/pkg/controller/kubevirteps/kubevirteps_controller.go
index a3c1aa33..95c31438 100644
--- a/pkg/controller/kubevirteps/kubevirteps_controller.go
+++ b/pkg/controller/kubevirteps/kubevirteps_controller.go
@@ -412,11 +412,11 @@ func (c *Controller) reconcileByAddressType(service *v1.Service, tenantSlices []
// Create the desired port configuration
var desiredPorts []discovery.EndpointPort
- for _, port := range service.Spec.Ports {
+ for i := range service.Spec.Ports {
desiredPorts = append(desiredPorts, discovery.EndpointPort{
- Port: &port.TargetPort.IntVal,
- Protocol: &port.Protocol,
- Name: &port.Name,
+ Port: &service.Spec.Ports[i].TargetPort.IntVal,
+ Protocol: &service.Spec.Ports[i].Protocol,
+ Name: &service.Spec.Ports[i].Name,
})
}

View File

@@ -0,0 +1,129 @@
diff --git a/pkg/controller/kubevirteps/kubevirteps_controller.go b/pkg/controller/kubevirteps/kubevirteps_controller.go
index a3c1aa33..6f6e3d32 100644
--- a/pkg/controller/kubevirteps/kubevirteps_controller.go
+++ b/pkg/controller/kubevirteps/kubevirteps_controller.go
@@ -108,32 +108,24 @@ func newRequest(reqType ReqType, obj interface{}, oldObj interface{}) *Request {
}
func (c *Controller) Init() error {
-
- // Act on events from Services on the infra cluster. These are created by the EnsureLoadBalancer function.
- // We need to watch for these events so that we can update the EndpointSlices in the infra cluster accordingly.
+ // Existing Service event handlers...
_, err := c.infraFactory.Core().V1().Services().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
- // cast obj to Service
svc := obj.(*v1.Service)
- // Only act on Services of type LoadBalancer
if svc.Spec.Type == v1.ServiceTypeLoadBalancer {
klog.Infof("Service added: %v/%v", svc.Namespace, svc.Name)
c.queue.Add(newRequest(AddReq, obj, nil))
}
},
UpdateFunc: func(oldObj, newObj interface{}) {
- // cast obj to Service
newSvc := newObj.(*v1.Service)
- // Only act on Services of type LoadBalancer
if newSvc.Spec.Type == v1.ServiceTypeLoadBalancer {
klog.Infof("Service updated: %v/%v", newSvc.Namespace, newSvc.Name)
c.queue.Add(newRequest(UpdateReq, newObj, oldObj))
}
},
DeleteFunc: func(obj interface{}) {
- // cast obj to Service
svc := obj.(*v1.Service)
- // Only act on Services of type LoadBalancer
if svc.Spec.Type == v1.ServiceTypeLoadBalancer {
klog.Infof("Service deleted: %v/%v", svc.Namespace, svc.Name)
c.queue.Add(newRequest(DeleteReq, obj, nil))
@@ -144,7 +136,7 @@ func (c *Controller) Init() error {
return err
}
- // Monitor endpoint slices that we are interested in based on known services in the infra cluster
+ // Existing EndpointSlice event handlers in tenant cluster...
_, err = c.tenantFactory.Discovery().V1().EndpointSlices().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
eps := obj.(*discovery.EndpointSlice)
@@ -194,10 +186,80 @@ func (c *Controller) Init() error {
return err
}
- //TODO: Add informer for EndpointSlices in the infra cluster to watch for (unwanted) changes
+ // Add an informer for EndpointSlices in the infra cluster
+ _, err = c.infraFactory.Discovery().V1().EndpointSlices().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
+ AddFunc: func(obj interface{}) {
+ eps := obj.(*discovery.EndpointSlice)
+ if c.managedByController(eps) {
+ svc, svcErr := c.getInfraServiceForEPS(context.TODO(), eps)
+ if svcErr != nil {
+ klog.Errorf("Failed to get infra Service for EndpointSlice %s/%s: %v", eps.Namespace, eps.Name, svcErr)
+ return
+ }
+ if svc != nil {
+ klog.Infof("Infra EndpointSlice added: %v/%v, requeuing Service: %v/%v", eps.Namespace, eps.Name, svc.Namespace, svc.Name)
+ c.queue.Add(newRequest(AddReq, svc, nil))
+ }
+ }
+ },
+ UpdateFunc: func(oldObj, newObj interface{}) {
+ eps := newObj.(*discovery.EndpointSlice)
+ if c.managedByController(eps) {
+ svc, svcErr := c.getInfraServiceForEPS(context.TODO(), eps)
+ if svcErr != nil {
+ klog.Errorf("Failed to get infra Service for EndpointSlice %s/%s: %v", eps.Namespace, eps.Name, svcErr)
+ return
+ }
+ if svc != nil {
+ klog.Infof("Infra EndpointSlice updated: %v/%v, requeuing Service: %v/%v", eps.Namespace, eps.Name, svc.Namespace, svc.Name)
+ c.queue.Add(newRequest(UpdateReq, svc, nil))
+ }
+ }
+ },
+ DeleteFunc: func(obj interface{}) {
+ eps := obj.(*discovery.EndpointSlice)
+ if c.managedByController(eps) {
+ svc, svcErr := c.getInfraServiceForEPS(context.TODO(), eps)
+ if svcErr != nil {
+ klog.Errorf("Failed to get infra Service for EndpointSlice %s/%s on delete: %v", eps.Namespace, eps.Name, svcErr)
+ return
+ }
+ if svc != nil {
+ klog.Infof("Infra EndpointSlice deleted: %v/%v, requeuing Service: %v/%v", eps.Namespace, eps.Name, svc.Namespace, svc.Name)
+ c.queue.Add(newRequest(DeleteReq, svc, nil))
+ }
+ }
+ },
+ })
+ if err != nil {
+ return err
+ }
+
return nil
}
+// getInfraServiceForEPS returns the Service in the infra cluster associated with the given EndpointSlice.
+// It does this by reading the "kubernetes.io/service-name" label from the EndpointSlice, which should correspond
+// to the Service name. If not found or if the Service doesn't exist, it returns nil.
+func (c *Controller) getInfraServiceForEPS(ctx context.Context, eps *discovery.EndpointSlice) (*v1.Service, error) {
+ svcName := eps.Labels[discovery.LabelServiceName]
+ if svcName == "" {
+ // No service name label found, can't determine infra service.
+ return nil, nil
+ }
+
+ svc, err := c.infraClient.CoreV1().Services(c.infraNamespace).Get(ctx, svcName, metav1.GetOptions{})
+ if err != nil {
+ if k8serrors.IsNotFound(err) {
+ // Service doesn't exist
+ return nil, nil
+ }
+ return nil, err
+ }
+
+ return svc, nil
+}
+
// Run starts an asynchronous loop that monitors and updates GKENetworkParamSet in the cluster.
func (c *Controller) Run(numWorkers int, stopCh <-chan struct{}, controllerManagerMetrics *controllersmetrics.ControllerManagerMetrics) {
defer utilruntime.HandleCrash()

View File

@@ -1,689 +0,0 @@
diff --git a/.golangci.yml b/.golangci.yml
index cf72a41a2..1c9237e83 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -122,3 +122,9 @@ linters:
# - testpackage
# - revive
# - wsl
+issues:
+ exclude-rules:
+ - filename: "kubevirteps_controller_test.go"
+ linters:
+ - govet
+ text: "declaration of \"err\" shadows"
diff --git a/cmd/kubevirt-cloud-controller-manager/kubevirteps.go b/cmd/kubevirt-cloud-controller-manager/kubevirteps.go
index 74166b5d9..4e744f8de 100644
--- a/cmd/kubevirt-cloud-controller-manager/kubevirteps.go
+++ b/cmd/kubevirt-cloud-controller-manager/kubevirteps.go
@@ -101,7 +101,18 @@ func startKubevirtCloudController(
klog.Infof("Setting up kubevirtEPSController")
- kubevirtEPSController := kubevirteps.NewKubevirtEPSController(tenantClient, infraClient, infraDynamic, kubevirtCloud.Namespace())
+ clusterName := ccmConfig.ComponentConfig.KubeCloudShared.ClusterName
+ if clusterName == "" {
+ klog.Fatalf("Required flag --cluster-name is missing")
+ }
+
+ kubevirtEPSController := kubevirteps.NewKubevirtEPSController(
+ tenantClient,
+ infraClient,
+ infraDynamic,
+ kubevirtCloud.Namespace(),
+ clusterName,
+ )
klog.Infof("Initializing kubevirtEPSController")
diff --git a/pkg/controller/kubevirteps/kubevirteps_controller.go b/pkg/controller/kubevirteps/kubevirteps_controller.go
index 6f6e3d322..b56882c12 100644
--- a/pkg/controller/kubevirteps/kubevirteps_controller.go
+++ b/pkg/controller/kubevirteps/kubevirteps_controller.go
@@ -54,10 +54,10 @@ type Controller struct {
infraDynamic dynamic.Interface
infraFactory informers.SharedInformerFactory
- infraNamespace string
- queue workqueue.RateLimitingInterface
- maxRetries int
-
+ infraNamespace string
+ clusterName string
+ queue workqueue.RateLimitingInterface
+ maxRetries int
maxEndPointsPerSlice int
}
@@ -65,8 +65,9 @@ func NewKubevirtEPSController(
tenantClient kubernetes.Interface,
infraClient kubernetes.Interface,
infraDynamic dynamic.Interface,
- infraNamespace string) *Controller {
-
+ infraNamespace string,
+ clusterName string,
+) *Controller {
tenantFactory := informers.NewSharedInformerFactory(tenantClient, 0)
infraFactory := informers.NewSharedInformerFactoryWithOptions(infraClient, 0, informers.WithNamespace(infraNamespace))
queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
@@ -79,6 +80,7 @@ func NewKubevirtEPSController(
infraDynamic: infraDynamic,
infraFactory: infraFactory,
infraNamespace: infraNamespace,
+ clusterName: clusterName,
queue: queue,
maxRetries: 25,
maxEndPointsPerSlice: 100,
@@ -320,22 +322,30 @@ func (c *Controller) processNextItem(ctx context.Context) bool {
// getInfraServiceFromTenantEPS returns the Service in the infra cluster that is associated with the given tenant endpoint slice.
func (c *Controller) getInfraServiceFromTenantEPS(ctx context.Context, slice *discovery.EndpointSlice) (*v1.Service, error) {
- infraServices, err := c.infraClient.CoreV1().Services(c.infraNamespace).List(ctx,
- metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s,%s=%s", kubevirt.TenantServiceNameLabelKey, slice.Labels["kubernetes.io/service-name"],
- kubevirt.TenantServiceNamespaceLabelKey, slice.Namespace)})
+ tenantServiceName := slice.Labels[discovery.LabelServiceName]
+ tenantServiceNamespace := slice.Namespace
+
+ labelSelector := fmt.Sprintf(
+ "%s=%s,%s=%s,%s=%s",
+ kubevirt.TenantServiceNameLabelKey, tenantServiceName,
+ kubevirt.TenantServiceNamespaceLabelKey, tenantServiceNamespace,
+ kubevirt.TenantClusterNameLabelKey, c.clusterName,
+ )
+
+ svcList, err := c.infraClient.CoreV1().Services(c.infraNamespace).List(ctx, metav1.ListOptions{
+ LabelSelector: labelSelector,
+ })
if err != nil {
- klog.Errorf("Failed to get Service in Infra for EndpointSlice %s in namespace %s: %v", slice.Name, slice.Namespace, err)
+ klog.Errorf("Failed to get Service in Infra for EndpointSlice %s in namespace %s: %v", slice.Name, tenantServiceNamespace, err)
return nil, err
}
- if len(infraServices.Items) > 1 {
- // This should never be possible, only one service should exist for a given tenant endpoint slice
- klog.Errorf("Multiple services found for tenant endpoint slice %s in namespace %s", slice.Name, slice.Namespace)
+ if len(svcList.Items) > 1 {
+ klog.Errorf("Multiple services found for tenant endpoint slice %s in namespace %s", slice.Name, tenantServiceNamespace)
return nil, errors.New("multiple services found for tenant endpoint slice")
}
- if len(infraServices.Items) == 1 {
- return &infraServices.Items[0], nil
+ if len(svcList.Items) == 1 {
+ return &svcList.Items[0], nil
}
- // No service found, possible if service is deleted.
return nil, nil
}
@@ -363,16 +373,27 @@ func (c *Controller) getTenantEPSFromInfraService(ctx context.Context, svc *v1.S
// getInfraEPSFromInfraService returns the EndpointSlices in the infra cluster that are associated with the given infra service.
func (c *Controller) getInfraEPSFromInfraService(ctx context.Context, svc *v1.Service) ([]*discovery.EndpointSlice, error) {
var infraEPSSlices []*discovery.EndpointSlice
- klog.Infof("Searching for endpoints on infra cluster for service %s in namespace %s.", svc.Name, svc.Namespace)
- result, err := c.infraClient.DiscoveryV1().EndpointSlices(svc.Namespace).List(ctx,
- metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", discovery.LabelServiceName, svc.Name)})
+
+ klog.Infof("Searching for EndpointSlices in infra cluster for service %s/%s", svc.Namespace, svc.Name)
+
+ labelSelector := fmt.Sprintf(
+ "%s=%s,%s=%s",
+ discovery.LabelServiceName, svc.Name,
+ kubevirt.TenantClusterNameLabelKey, c.clusterName,
+ )
+
+ result, err := c.infraClient.DiscoveryV1().EndpointSlices(svc.Namespace).List(ctx, metav1.ListOptions{
+ LabelSelector: labelSelector,
+ })
if err != nil {
klog.Errorf("Failed to get EndpointSlices for Service %s in namespace %s: %v", svc.Name, svc.Namespace, err)
return nil, err
}
+
for _, eps := range result.Items {
infraEPSSlices = append(infraEPSSlices, &eps)
}
+
return infraEPSSlices, nil
}
@@ -382,74 +403,117 @@ func (c *Controller) reconcile(ctx context.Context, r *Request) error {
return errors.New("could not cast object to service")
}
+ // Skip services not managed by this controller (missing required labels)
if service.Labels[kubevirt.TenantServiceNameLabelKey] == "" ||
service.Labels[kubevirt.TenantServiceNamespaceLabelKey] == "" ||
service.Labels[kubevirt.TenantClusterNameLabelKey] == "" {
- klog.Infof("This LoadBalancer Service: %s is not managed by the %s. Skipping.", service.Name, ControllerName)
+ klog.Infof("Service %s is not managed by this controller. Skipping.", service.Name)
+ return nil
+ }
+
+ // Skip services for other clusters
+ if service.Labels[kubevirt.TenantClusterNameLabelKey] != c.clusterName {
+ klog.Infof("Skipping Service %s: cluster label %q doesn't match our clusterName %q", service.Name, service.Labels[kubevirt.TenantClusterNameLabelKey], c.clusterName)
return nil
}
+
klog.Infof("Reconciling: %v", service.Name)
+ /*
+ 1) Check if Service in the infra cluster is actually present.
+ If it's not found, mark it as 'deleted' so that we don't create new slices.
+ */
serviceDeleted := false
- svc, err := c.infraFactory.Core().V1().Services().Lister().Services(c.infraNamespace).Get(service.Name)
+ infraSvc, err := c.infraFactory.Core().V1().Services().Lister().Services(c.infraNamespace).Get(service.Name)
if err != nil {
- klog.Infof("Service %s in namespace %s is deleted.", service.Name, service.Namespace)
+ // The Service is not present in the infra lister => treat as deleted
+ klog.Infof("Service %s in namespace %s is deleted (or not found).", service.Name, service.Namespace)
serviceDeleted = true
} else {
- service = svc
+ // Use the actual object from the lister, so we have the latest state
+ service = infraSvc
}
+ /*
+ 2) Get all existing EndpointSlices in the infra cluster that belong to this LB Service.
+ We'll decide which of them should be updated or deleted.
+ */
infraExistingEpSlices, err := c.getInfraEPSFromInfraService(ctx, service)
if err != nil {
return err
}
- // At this point we have the current state of the 3 main objects we are interested in:
- // 1. The Service in the infra cluster, the one created by the KubevirtCloudController.
- // 2. The EndpointSlices in the tenant cluster, created for the tenant cluster's Service.
- // 3. The EndpointSlices in the infra cluster, managed by this controller.
-
slicesToDelete := []*discovery.EndpointSlice{}
slicesByAddressType := make(map[discovery.AddressType][]*discovery.EndpointSlice)
+ // For example, if the service is single-stack IPv4 => only AddressTypeIPv4
+ // or if dual-stack => IPv4 and IPv6, etc.
serviceSupportedAddressesTypes := getAddressTypesForService(service)
- // If the services switched to a different address type, we need to delete the old ones, because it's immutable.
- // If the services switched to a different externalTrafficPolicy, we need to delete the old ones.
+
+ /*
+ 3) Determine which slices to delete, and which to pass on to the normal
+ "reconcileByAddressType" logic.
+
+ - If 'serviceDeleted' is true OR service.Spec.Selector != nil, we remove them.
+ - Also, if the slice's address type is unsupported by the Service, we remove it.
+ */
for _, eps := range infraExistingEpSlices {
- if service.Spec.Selector != nil || serviceDeleted {
- klog.Infof("Added for deletion EndpointSlice %s in namespace %s because it has a selector", eps.Name, eps.Namespace)
- // to be sure we don't delete any slice that is not managed by us
+ // If service is deleted or has a non-nil selector => remove slices
+ if serviceDeleted || service.Spec.Selector != nil {
+ /*
+ Only remove if it is clearly labeled as managed by us:
+ we do not want to accidentally remove slices that are not
+ created by this controller.
+ */
if c.managedByController(eps) {
+ klog.Infof("Added for deletion EndpointSlice %s in namespace %s because service is deleted or has a selector",
+ eps.Name, eps.Namespace)
slicesToDelete = append(slicesToDelete, eps)
}
continue
}
+
+ // If the Service does not support this slice's AddressType => remove
if !serviceSupportedAddressesTypes.Has(eps.AddressType) {
- klog.Infof("Added for deletion EndpointSlice %s in namespace %s because it has an unsupported address type: %v", eps.Name, eps.Namespace, eps.AddressType)
+ klog.Infof("Added for deletion EndpointSlice %s in namespace %s because it has an unsupported address type: %v",
+ eps.Name, eps.Namespace, eps.AddressType)
slicesToDelete = append(slicesToDelete, eps)
continue
}
+
+ /*
+ Otherwise, this slice is potentially still valid for the given AddressType,
+ we'll send it to reconcileByAddressType for final merging and updates.
+ */
slicesByAddressType[eps.AddressType] = append(slicesByAddressType[eps.AddressType], eps)
}
- if !serviceDeleted {
- // Get tenant's endpoint slices for this service
+ /*
+ 4) If the Service was NOT deleted and has NO selector (i.e., it's a "no-selector" LB Service),
+ we proceed to handle creation and updates. That means:
+ - Gather Tenant's EndpointSlices
+ - Reconcile them by each AddressType
+ */
+ if !serviceDeleted && service.Spec.Selector == nil {
tenantEpSlices, err := c.getTenantEPSFromInfraService(ctx, service)
if err != nil {
return err
}
- // Reconcile the EndpointSlices for each address type e.g. ipv4, ipv6
+ // For each addressType (ipv4, ipv6, etc.) reconcile the infra slices
for addressType := range serviceSupportedAddressesTypes {
existingSlices := slicesByAddressType[addressType]
- err := c.reconcileByAddressType(service, tenantEpSlices, existingSlices, addressType)
- if err != nil {
+ if err := c.reconcileByAddressType(service, tenantEpSlices, existingSlices, addressType); err != nil {
return err
}
}
}
- // Delete the EndpointSlices that are no longer needed
+ /*
+ 5) Perform the actual deletion of all slices we flagged.
+ In many cases (serviceDeleted or .Spec.Selector != nil),
+ we end up with only "delete" actions and no new slice creation.
+ */
for _, eps := range slicesToDelete {
err := c.infraClient.DiscoveryV1().EndpointSlices(eps.Namespace).Delete(context.TODO(), eps.Name, metav1.DeleteOptions{})
if err != nil {
@@ -474,11 +538,11 @@ func (c *Controller) reconcileByAddressType(service *v1.Service, tenantSlices []
// Create the desired port configuration
var desiredPorts []discovery.EndpointPort
- for _, port := range service.Spec.Ports {
+ for i := range service.Spec.Ports {
desiredPorts = append(desiredPorts, discovery.EndpointPort{
- Port: &port.TargetPort.IntVal,
- Protocol: &port.Protocol,
- Name: &port.Name,
+ Port: &service.Spec.Ports[i].TargetPort.IntVal,
+ Protocol: &service.Spec.Ports[i].Protocol,
+ Name: &service.Spec.Ports[i].Name,
})
}
@@ -588,55 +652,114 @@ func ownedBy(endpointSlice *discovery.EndpointSlice, svc *v1.Service) bool {
return false
}
-func (c *Controller) finalize(service *v1.Service, slicesToCreate []*discovery.EndpointSlice, slicesToUpdate []*discovery.EndpointSlice, slicesToDelete []*discovery.EndpointSlice) error {
- // If there are slices to delete and slices to create, make them as update
- for i := 0; i < len(slicesToDelete); {
+func (c *Controller) finalize(
+ service *v1.Service,
+ slicesToCreate []*discovery.EndpointSlice,
+ slicesToUpdate []*discovery.EndpointSlice,
+ slicesToDelete []*discovery.EndpointSlice,
+) error {
+ /*
+ We try to turn a "delete + create" pair into a single "update" operation
+ if the original slice (slicesToDelete[i]) has the same address type as
+ the first slice in slicesToCreate, and is owned by the same Service.
+
+ However, we must re-check the lengths of slicesToDelete and slicesToCreate
+ within the loop to avoid an out-of-bounds index in slicesToCreate.
+ */
+
+ i := 0
+ for i < len(slicesToDelete) {
+ // If there is nothing to create, break early
if len(slicesToCreate) == 0 {
break
}
- if slicesToDelete[i].AddressType == slicesToCreate[0].AddressType && ownedBy(slicesToDelete[i], service) {
- slicesToCreate[0].Name = slicesToDelete[i].Name
+
+ sd := slicesToDelete[i]
+ sc := slicesToCreate[0] // We can safely do this now, because len(slicesToCreate) > 0
+
+ // If the address type matches, and the slice is owned by the same Service,
+ // then instead of deleting sd and creating sc, we'll transform it into an update:
+ // we rename sc with sd's name, remove sd from the delete list, remove sc from the create list,
+ // and add sc to the update list.
+ if sd.AddressType == sc.AddressType && ownedBy(sd, service) {
+ sliceToUpdate := sc
+ sliceToUpdate.Name = sd.Name
+
+ // Remove the first element from slicesToCreate
slicesToCreate = slicesToCreate[1:]
- slicesToUpdate = append(slicesToUpdate, slicesToCreate[0])
+
+ // Remove the slice from slicesToDelete
slicesToDelete = append(slicesToDelete[:i], slicesToDelete[i+1:]...)
+
+ // Now add the renamed slice to the list of slices we want to update
+ slicesToUpdate = append(slicesToUpdate, sliceToUpdate)
+
+ /*
+ Do not increment i here, because we've just removed an element from
+ slicesToDelete. The next slice to examine is now at the same index i.
+ */
} else {
+ // If they don't match, move on to the next slice in slicesToDelete.
i++
}
}
- // Create the new slices if service is not marked for deletion
+ /*
+ If the Service is not being deleted, create all remaining slices in slicesToCreate.
+ (If the Service has a DeletionTimestamp, it means it is going away, so we do not
+ want to create new EndpointSlices.)
+ */
if service.DeletionTimestamp == nil {
for _, slice := range slicesToCreate {
- createdSlice, err := c.infraClient.DiscoveryV1().EndpointSlices(slice.Namespace).Create(context.TODO(), slice, metav1.CreateOptions{})
+ createdSlice, err := c.infraClient.DiscoveryV1().EndpointSlices(slice.Namespace).Create(
+ context.TODO(),
+ slice,
+ metav1.CreateOptions{},
+ )
if err != nil {
- klog.Errorf("Failed to create EndpointSlice %s in namespace %s: %v", slice.Name, slice.Namespace, err)
+ klog.Errorf("Failed to create EndpointSlice %s in namespace %s: %v",
+ slice.Name, slice.Namespace, err)
+ // If the namespace is terminating, it's safe to ignore the error.
if k8serrors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
- return nil
+ continue
}
return err
}
- klog.Infof("Created EndpointSlice %s in namespace %s", createdSlice.Name, createdSlice.Namespace)
+ klog.Infof("Created EndpointSlice %s in namespace %s",
+ createdSlice.Name, createdSlice.Namespace)
}
}
- // Update slices
+ // Update slices that are in the slicesToUpdate list.
for _, slice := range slicesToUpdate {
- _, err := c.infraClient.DiscoveryV1().EndpointSlices(slice.Namespace).Update(context.TODO(), slice, metav1.UpdateOptions{})
+ _, err := c.infraClient.DiscoveryV1().EndpointSlices(slice.Namespace).Update(
+ context.TODO(),
+ slice,
+ metav1.UpdateOptions{},
+ )
if err != nil {
- klog.Errorf("Failed to update EndpointSlice %s in namespace %s: %v", slice.Name, slice.Namespace, err)
+ klog.Errorf("Failed to update EndpointSlice %s in namespace %s: %v",
+ slice.Name, slice.Namespace, err)
return err
}
- klog.Infof("Updated EndpointSlice %s in namespace %s", slice.Name, slice.Namespace)
+ klog.Infof("Updated EndpointSlice %s in namespace %s",
+ slice.Name, slice.Namespace)
}
- // Delete slices
+ // Finally, delete slices that are in slicesToDelete and are no longer needed.
for _, slice := range slicesToDelete {
- err := c.infraClient.DiscoveryV1().EndpointSlices(slice.Namespace).Delete(context.TODO(), slice.Name, metav1.DeleteOptions{})
+ err := c.infraClient.DiscoveryV1().EndpointSlices(slice.Namespace).Delete(
+ context.TODO(),
+ slice.Name,
+ metav1.DeleteOptions{},
+ )
if err != nil {
- klog.Errorf("Failed to delete EndpointSlice %s in namespace %s: %v", slice.Name, slice.Namespace, err)
+ klog.Errorf("Failed to delete EndpointSlice %s in namespace %s: %v",
+ slice.Name, slice.Namespace, err)
return err
}
- klog.Infof("Deleted EndpointSlice %s in namespace %s", slice.Name, slice.Namespace)
+ klog.Infof("Deleted EndpointSlice %s in namespace %s",
+ slice.Name, slice.Namespace)
}
return nil
diff --git a/pkg/controller/kubevirteps/kubevirteps_controller_test.go b/pkg/controller/kubevirteps/kubevirteps_controller_test.go
index 1fb86e25f..14d92d340 100644
--- a/pkg/controller/kubevirteps/kubevirteps_controller_test.go
+++ b/pkg/controller/kubevirteps/kubevirteps_controller_test.go
@@ -13,6 +13,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/intstr"
+ "k8s.io/apimachinery/pkg/util/sets"
dfake "k8s.io/client-go/dynamic/fake"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/testing"
@@ -189,7 +190,7 @@ func setupTestKubevirtEPSController() *testKubevirtEPSController {
}: "VirtualMachineInstanceList",
})
- controller := NewKubevirtEPSController(tenantClient, infraClient, infraDynamic, "test")
+ controller := NewKubevirtEPSController(tenantClient, infraClient, infraDynamic, "test", "test-cluster")
err := controller.Init()
if err != nil {
@@ -686,5 +687,229 @@ var _ = g.Describe("KubevirtEPSController", g.Ordered, func() {
return false, err
}).Should(BeTrue(), "EndpointSlice in infra cluster should be recreated by the controller after deletion")
})
+
+ g.It("Should correctly handle multiple unique ports in EndpointSlice", func() {
+ // Create a VMI in the infra cluster
+ createAndAssertVMI("worker-0-test", "ip-10-32-5-13", "123.45.67.89")
+
+ // Create an EndpointSlice in the tenant cluster
+ createAndAssertTenantSlice("test-epslice", "tenant-service-name", discoveryv1.AddressTypeIPv4,
+ *createPort("http", 80, v1.ProtocolTCP),
+ []discoveryv1.Endpoint{*createEndpoint("123.45.67.89", "worker-0-test", true, true, false)})
+
+ // Define multiple ports for the Service
+ servicePorts := []v1.ServicePort{
+ {
+ Name: "client",
+ Protocol: v1.ProtocolTCP,
+ Port: 10001,
+ TargetPort: intstr.FromInt(30396),
+ NodePort: 30396,
+ },
+ {
+ Name: "dashboard",
+ Protocol: v1.ProtocolTCP,
+ Port: 8265,
+ TargetPort: intstr.FromInt(31003),
+ NodePort: 31003,
+ },
+ {
+ Name: "metrics",
+ Protocol: v1.ProtocolTCP,
+ Port: 8080,
+ TargetPort: intstr.FromInt(30452),
+ NodePort: 30452,
+ },
+ }
+
+ createAndAssertInfraServiceLB("infra-multiport-service", "tenant-service-name", "test-cluster",
+ servicePorts[0], v1.ServiceExternalTrafficPolicyLocal)
+
+ svc, err := testVals.infraClient.CoreV1().Services(infraNamespace).Get(context.TODO(), "infra-multiport-service", metav1.GetOptions{})
+ Expect(err).To(BeNil())
+
+ svc.Spec.Ports = servicePorts
+ _, err = testVals.infraClient.CoreV1().Services(infraNamespace).Update(context.TODO(), svc, metav1.UpdateOptions{})
+ Expect(err).To(BeNil())
+
+ var epsListMultiPort *discoveryv1.EndpointSliceList
+
+ Eventually(func() (bool, error) {
+ epsListMultiPort, err = testVals.infraClient.DiscoveryV1().EndpointSlices(infraNamespace).List(context.TODO(), metav1.ListOptions{})
+ if len(epsListMultiPort.Items) != 1 {
+ return false, err
+ }
+
+ createdSlice := epsListMultiPort.Items[0]
+ expectedPortNames := []string{"client", "dashboard", "metrics"}
+ foundPortNames := []string{}
+
+ for _, port := range createdSlice.Ports {
+ if port.Name != nil {
+ foundPortNames = append(foundPortNames, *port.Name)
+ }
+ }
+
+ if len(foundPortNames) != len(expectedPortNames) {
+ return false, err
+ }
+
+ portSet := sets.NewString(foundPortNames...)
+ expectedPortSet := sets.NewString(expectedPortNames...)
+ return portSet.Equal(expectedPortSet), err
+ }).Should(BeTrue(), "EndpointSlice should contain all unique ports from the Service without duplicates")
+ })
+
+ g.It("Should not panic when Service changes to have a non-nil selector, causing EndpointSlice deletion with no new slices to create", func() {
+ createAndAssertVMI("worker-0-test", "ip-10-32-5-13", "123.45.67.89")
+ createAndAssertTenantSlice("test-epslice", "tenant-service-name", discoveryv1.AddressTypeIPv4,
+ *createPort("http", 80, v1.ProtocolTCP),
+ []discoveryv1.Endpoint{*createEndpoint("123.45.67.89", "worker-0-test", true, true, false)})
+ createAndAssertInfraServiceLB("infra-service-no-selector", "tenant-service-name", "test-cluster",
+ v1.ServicePort{
+ Name: "web",
+ Port: 80,
+ NodePort: 31900,
+ Protocol: v1.ProtocolTCP,
+ TargetPort: intstr.IntOrString{IntVal: 30390},
+ },
+ v1.ServiceExternalTrafficPolicyLocal,
+ )
+
+ // Wait for the controller to create an EndpointSlice in the infra cluster.
+ var epsList *discoveryv1.EndpointSliceList
+ var err error
+ Eventually(func() (bool, error) {
+ epsList, err = testVals.infraClient.DiscoveryV1().EndpointSlices(infraNamespace).
+ List(context.TODO(), metav1.ListOptions{})
+ if err != nil {
+ return false, err
+ }
+ // Wait exactly 1 slice
+ if len(epsList.Items) == 1 {
+ return true, nil
+ }
+ return false, nil
+ }).Should(BeTrue(), "Controller should create an EndpointSlice in infra cluster for the LB service")
+
+ svcWithSelector, err := testVals.infraClient.CoreV1().Services(infraNamespace).
+ Get(context.TODO(), "infra-service-no-selector", metav1.GetOptions{})
+ Expect(err).To(BeNil())
+
+ // Let's set any selector to run the slice deletion logic
+ svcWithSelector.Spec.Selector = map[string]string{"test": "selector-added"}
+ _, err = testVals.infraClient.CoreV1().Services(infraNamespace).
+ Update(context.TODO(), svcWithSelector, metav1.UpdateOptions{})
+ Expect(err).To(BeNil())
+
+ Eventually(func() (bool, error) {
+ epsList, err = testVals.infraClient.DiscoveryV1().EndpointSlices(infraNamespace).
+ List(context.TODO(), metav1.ListOptions{})
+ if err != nil {
+ return false, err
+ }
+ // We expect that after the update service.EndpointSlice will become 0
+ if len(epsList.Items) == 0 {
+ return true, nil
+ }
+ return false, nil
+ }).Should(BeTrue(), "Existing EndpointSlice should be removed because Service now has a selector")
+ })
+
+ g.It("Should remove EndpointSlices and not recreate them when a previously no-selector Service obtains a selector", func() {
+ testVals.infraClient.Fake.PrependReactor("create", "endpointslices", func(action testing.Action) (bool, runtime.Object, error) {
+ createAction := action.(testing.CreateAction)
+ slice := createAction.GetObject().(*discoveryv1.EndpointSlice)
+ if slice.Name == "" && slice.GenerateName != "" {
+ slice.Name = slice.GenerateName + "-fake001"
+ }
+ return false, slice, nil
+ })
+
+ createAndAssertVMI("worker-0-test", "ip-10-32-5-13", "123.45.67.89")
+
+ createAndAssertTenantSlice("test-epslice", "tenant-service-name", discoveryv1.AddressTypeIPv4,
+ *createPort("http", 80, v1.ProtocolTCP),
+ []discoveryv1.Endpoint{
+ *createEndpoint("123.45.67.89", "worker-0-test", true, true, false),
+ },
+ )
+
+ noSelectorSvcName := "svc-without-selector"
+ svc := &v1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: noSelectorSvcName,
+ Namespace: infraNamespace,
+ Labels: map[string]string{
+ kubevirt.TenantServiceNameLabelKey: "tenant-service-name",
+ kubevirt.TenantServiceNamespaceLabelKey: tenantNamespace,
+ kubevirt.TenantClusterNameLabelKey: "test-cluster",
+ },
+ },
+ Spec: v1.ServiceSpec{
+ Ports: []v1.ServicePort{
+ {
+ Name: "web",
+ Port: 80,
+ NodePort: 31900,
+ Protocol: v1.ProtocolTCP,
+ TargetPort: intstr.IntOrString{IntVal: 30390},
+ },
+ },
+ Type: v1.ServiceTypeLoadBalancer,
+ ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal,
+ },
+ }
+
+ _, err := testVals.infraClient.CoreV1().Services(infraNamespace).Create(context.TODO(), svc, metav1.CreateOptions{})
+ Expect(err).To(BeNil())
+
+ Eventually(func() (bool, error) {
+ epsList, err := testVals.infraClient.DiscoveryV1().EndpointSlices(infraNamespace).
+ List(context.TODO(), metav1.ListOptions{})
+ if err != nil {
+ return false, err
+ }
+ return len(epsList.Items) == 1, nil
+ }).Should(BeTrue(), "Controller should create an EndpointSlice in infra cluster for the no-selector LB service")
+
+ svcWithSelector, err := testVals.infraClient.CoreV1().Services(infraNamespace).Get(
+ context.TODO(), noSelectorSvcName, metav1.GetOptions{})
+ Expect(err).To(BeNil())
+
+ svcWithSelector.Spec.Selector = map[string]string{"app": "test-value"}
+ _, err = testVals.infraClient.CoreV1().Services(infraNamespace).
+ Update(context.TODO(), svcWithSelector, metav1.UpdateOptions{})
+ Expect(err).To(BeNil())
+
+ Eventually(func() (bool, error) {
+ epsList, err := testVals.infraClient.DiscoveryV1().EndpointSlices(infraNamespace).
+ List(context.TODO(), metav1.ListOptions{})
+ if err != nil {
+ return false, err
+ }
+ return len(epsList.Items) == 0, nil
+ }).Should(BeTrue(), "All EndpointSlices should be removed after Service acquires a selector (no new slices created)")
+ })
+
+ g.It("Should ignore Services from a different cluster", func() {
+ // Create a Service with cluster label "other-cluster"
+ svc := createInfraServiceLB("infra-service-conflict", "tenant-service-name", "other-cluster",
+ v1.ServicePort{Name: "web", Port: 80, NodePort: 31900, Protocol: v1.ProtocolTCP, TargetPort: intstr.IntOrString{IntVal: 30390}},
+ v1.ServiceExternalTrafficPolicyLocal)
+ _, err := testVals.infraClient.CoreV1().Services(infraNamespace).Create(context.TODO(), svc, metav1.CreateOptions{})
+ Expect(err).To(BeNil())
+
+ // The controller should ignore this Service, so no EndpointSlice should be created.
+ Eventually(func() (bool, error) {
+ epsList, err := testVals.infraClient.DiscoveryV1().EndpointSlices(infraNamespace).List(context.TODO(), metav1.ListOptions{})
+ if err != nil {
+ return false, err
+ }
+ // Expect zero slices since cluster label does not match "test-cluster"
+ return len(epsList.Items) == 0, nil
+ }).Should(BeTrue(), "Services with a different cluster label should be ignored")
+ })
+
})
})

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.18.0@sha256:6f9091c3e7e4951c5e43fdafd505705fcc9f1ead290ee3ae42e97e9ec2b87b20
ghcr.io/aenix-io/cozystack/kubevirt-csi-driver:0.14.1@sha256:4b84a077e7f1b75bdf8b272c8f147e4ef3b67b9bea83383a399e9149868384ac

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/ubuntu-container-disk:v1.30.1@sha256:07392e7a87a3d4ef1c86c1b146e6c5de5c2b524aed5a53bf48870dc8a296f99a
ghcr.io/aenix-io/cozystack/ubuntu-container-disk:v1.30.1@sha256:91ec9c31472f8e94ae5f6f5a2568058eb28b3f57ab7e203d8d4a0993911fffc3

View File

@@ -1,50 +0,0 @@
{{/*
Copyright Broadcom, Inc. All Rights Reserved.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{/* vim: set filetype=mustache: */}}
{{/*
Return a resource request/limit object based on a given preset.
These presets are for basic testing and not meant to be used in production
{{ include "resources.preset" (dict "type" "nano") -}}
*/}}
{{- define "resources.preset" -}}
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
{{- $presets := dict
"nano" (dict
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
)
"micro" (dict
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
)
"small" (dict
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
)
"medium" (dict
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
)
"large" (dict
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
)
"xlarge" (dict
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
)
"2xlarge" (dict
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
)
}}
{{- if hasKey $presets .type -}}
{{- index $presets .type | toYaml -}}
{{- else -}}
{{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}}
{{- end -}}
{{- end -}}

View File

@@ -26,23 +26,10 @@ spec:
containers:
- image: "{{ $.Files.Get "images/cluster-autoscaler.tag" | trim }}"
name: cluster-autoscaler
resources:
limits:
cpu: 512m
memory: 512Mi
requests:
cpu: 125m
memory: 128Mi
command:
- /cluster-autoscaler
args:
- --cloud-provider=clusterapi
- --enforce-node-group-min-size=true
- --ignore-daemonsets-utilization=true
- --ignore-mirror-pods-utilization=true
- --scale-down-unneeded-time=30s
- --scan-interval=25s
- --force-delete-unregistered-nodes=true
- --kubeconfig=/etc/kubernetes/kubeconfig/super-admin.svc
- --clusterapi-cloud-config-authoritative
- --node-group-auto-discovery=clusterapi:namespace={{ .Release.Namespace }},clusterName={{ .Release.Name }}

View File

@@ -29,7 +29,6 @@ spec:
{{- range .group.roles }}
node-role.kubernetes.io/{{ . }}: ""
{{- end }}
cluster.x-k8s.io/deployment-name: {{ $.Release.Name }}-{{ .groupName }}
spec:
domain:
{{- if and .group.resources .group.resources.cpu }}
@@ -102,37 +101,12 @@ metadata:
annotations:
kamaji.clastix.io/kubeconfig-secret-key: "super-admin.svc"
spec:
apiServer:
{{- if .Values.kamajiControlPlane.apiServer.resources }}
resources: {{- toYaml .Values.kamajiControlPlane.apiServer.resources | nindent 6 }}
{{- else if ne .Values.kamajiControlPlane.apiServer.resourcesPreset "none" }}
resources: {{- include "resources.preset" (dict "type" .Values.kamajiControlPlane.apiServer.resourcesPreset "Release" .Release) | nindent 6 }}
{{- end }}
controllerManager:
{{- if .Values.kamajiControlPlane.controllerManager.resources }}
resources: {{- toYaml .Values.kamajiControlPlane.controllerManager.resources | nindent 6 }}
{{- else if ne .Values.kamajiControlPlane.controllerManager.resourcesPreset "none" }}
resources: {{- include "resources.preset" (dict "type" .Values.kamajiControlPlane.controllerManager.resourcesPreset "Release" .Release) | nindent 6 }}
{{- end }}
scheduler:
{{- if .Values.kamajiControlPlane.scheduler.resources }}
resources: {{- toYaml .Values.kamajiControlPlane.scheduler.resources | nindent 6 }}
{{- else if ne .Values.kamajiControlPlane.scheduler.resourcesPreset "none" }}
resources: {{- include "resources.preset" (dict "type" .Values.kamajiControlPlane.scheduler.resourcesPreset "Release" .Release) | nindent 6 }}
{{- end }}
dataStoreName: "{{ $etcd }}"
addons:
coreDNS:
dnsServiceIPs:
- 10.95.0.10
konnectivity:
server:
port: 8132
{{- if .Values.kamajiControlPlane.addons.konnectivity.server.resources }}
resources: {{- toYaml .Values.kamajiControlPlane.addons.konnectivity.server.resources | nindent 10 }}
{{- else if ne .Values.kamajiControlPlane.addons.konnectivity.server.resourcesPreset "none" }}
resources: {{- include "resources.preset" (dict "type" .Values.kamajiControlPlane.addons.konnectivity.server.resourcesPreset "Release" .Release) | nindent 10 }}
{{- end }}
konnectivity: {}
kubelet:
cgroupfs: systemd
preferredAddressTypes:
@@ -143,7 +117,7 @@ spec:
ingress:
extraAnnotations:
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
hostname: {{ .Values.host | default (printf "%s.%s" .Release.Name $host) }}
hostname: {{ .Values.host | default (printf "%s.%s" .Release.Name $host) }}:443
className: "{{ $ingress }}"
deployment:
podAdditionalMetadata:
@@ -152,21 +126,6 @@ spec:
replicas: 2
version: 1.30.1
---
apiVersion: cozystack.io/v1alpha1
kind: WorkloadMonitor
metadata:
name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
spec:
replicas: 2
minReplicas: 1
kind: kubernetes
type: control-plane
selector:
kamaji.clastix.io/component: deployment
kamaji.clastix.io/name: {{ .Release.Name }}
version: {{ $.Chart.Version }}
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: KubevirtCluster
metadata:
@@ -213,7 +172,6 @@ spec:
---
{{- $context := deepCopy $ }}
{{- $_ := set $context "group" $group }}
{{- $_ := set $context "groupName" $groupName }}
{{- $kubevirtmachinetemplate := include "kubevirtmachinetemplate" $context }}
{{- $kubevirtmachinetemplateHash := $kubevirtmachinetemplate | sha256sum | trunc 6 }}
{{- $kubevirtmachinetemplateName := printf "%s-%s-%s" $.Release.Name $groupName $kubevirtmachinetemplateHash }}
@@ -275,7 +233,7 @@ spec:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: KubevirtMachineTemplate
name: {{ $.Release.Name }}-{{ $groupName }}-{{ $kubevirtmachinetemplateHash }}
namespace: {{ $.Release.Namespace }}
namespace: default
version: v1.30.1
---
apiVersion: cluster.x-k8s.io/v1beta1
@@ -297,21 +255,6 @@ spec:
- type: Ready
status: "False"
timeout: 300s
---
apiVersion: cozystack.io/v1alpha1
kind: WorkloadMonitor
metadata:
name: {{ $.Release.Name }}-{{ $groupName }}
namespace: {{ $.Release.Namespace }}
spec:
minReplicas: {{ $group.minReplicas }}
kind: kubernetes
type: worker
selector:
cluster.x-k8s.io/cluster-name: {{ $.Release.Name }}
cluster.x-k8s.io/deployment-name: {{ $.Release.Name }}-{{ $groupName }}
cluster.x-k8s.io/role: worker
version: {{ $.Chart.Version }}
{{- end }}
---
{{- /*

View File

@@ -63,21 +63,11 @@ spec:
mountPath: /etc/kubernetes/kubeconfig
readOnly: true
resources:
limits:
cpu: 512m
memory: 512Mi
requests:
cpu: 125m
memory: 128Mi
memory: 50Mi
cpu: 10m
- name: csi-provisioner
image: quay.io/openshift/origin-csi-external-provisioner:latest
resources:
limits:
cpu: 512m
memory: 512Mi
requests:
cpu: 125m
memory: 128Mi
args:
- "--csi-address=$(ADDRESS)"
- "--default-fstype=ext4"
@@ -112,12 +102,9 @@ spec:
mountPath: /etc/kubernetes/kubeconfig
readOnly: true
resources:
limits:
cpu: 512m
memory: 512Mi
requests:
cpu: 125m
memory: 128Mi
memory: 50Mi
cpu: 10m
- name: csi-liveness-probe
image: quay.io/openshift/origin-csi-livenessprobe:latest
args:
@@ -128,12 +115,9 @@ spec:
- name: socket-dir
mountPath: /csi
resources:
limits:
cpu: 512m
memory: 512Mi
requests:
cpu: 125m
memory: 128Mi
memory: 50Mi
cpu: 10m
volumes:
- name: socket-dir
emptyDir: {}

View File

@@ -24,13 +24,3 @@ rules:
resourceNames:
- {{ .Release.Name }}
verbs: ["get", "list", "watch"]
- apiGroups:
- cozystack.io
resources:
- workloadmonitors
resourceNames:
- {{ .Release.Name }}
{{- range $groupName, $group := .Values.nodeGroups }}
- {{ $.Release.Name }}-{{ $groupName }}
{{- end }}
verbs: ["get", "list", "watch"]

View File

@@ -18,8 +18,7 @@ spec:
namespace: cozy-system
kubeConfig:
secretRef:
name: {{ .Release.Name }}-admin-kubeconfig
key: super-admin.svc
name: {{ .Release.Name }}-kubeconfig
targetNamespace: cozy-cert-manager-crds
storageNamespace: cozy-cert-manager-crds
install:

View File

@@ -19,8 +19,7 @@ spec:
namespace: cozy-system
kubeConfig:
secretRef:
name: {{ .Release.Name }}-admin-kubeconfig
key: super-admin.svc
name: {{ .Release.Name }}-kubeconfig
targetNamespace: cozy-cert-manager
storageNamespace: cozy-cert-manager
install:

View File

@@ -18,8 +18,7 @@ spec:
namespace: cozy-system
kubeConfig:
secretRef:
name: {{ .Release.Name }}-admin-kubeconfig
key: super-admin.svc
name: {{ .Release.Name }}-kubeconfig
targetNamespace: cozy-cilium
storageNamespace: cozy-cilium
install:

Some files were not shown because too many files have changed in this diff Show More