mirror of
https://github.com/outbackdingo/cozystack.git
synced 2026-02-05 08:17:59 +00:00
Compare commits
239 Commits
v0.20.2
...
upd-testin
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
24c8f4012d | ||
|
|
d971f2ff29 | ||
|
|
ead0cca5af | ||
|
|
47ff861f00 | ||
|
|
dbc1fb8a09 | ||
|
|
d9c6fb7625 | ||
|
|
cd23a30e76 | ||
|
|
f4fba7924b | ||
|
|
01b3a82ee2 | ||
|
|
0403f30cd6 | ||
|
|
116196b4d4 | ||
|
|
bdc3525c9b | ||
|
|
9a0f459655 | ||
|
|
59aac50ebd | ||
|
|
5f1b14ab53 | ||
|
|
5c900a7467 | ||
|
|
cc9abbc505 | ||
|
|
c66eb9f94c | ||
|
|
0045ddc757 | ||
|
|
209a3ef181 | ||
|
|
92e2173fa5 | ||
|
|
2d997a4f8d | ||
|
|
b1baaa7d98 | ||
|
|
869bd4f12c | ||
|
|
f6d4541db3 | ||
|
|
5729666e72 | ||
|
|
aa3a36831c | ||
|
|
1e03ba4a02 | ||
|
|
d12dd0e117 | ||
|
|
077045b094 | ||
|
|
85ec09b8de | ||
|
|
e0a63c32b0 | ||
|
|
a2af07d1dc | ||
|
|
0cb9e72f99 | ||
|
|
b4584b4d17 | ||
|
|
ea3b092128 | ||
|
|
728743db07 | ||
|
|
3d03b22775 | ||
|
|
d2210df9ec | ||
|
|
423514b338 | ||
|
|
0e10f95293 | ||
|
|
478a3f5191 | ||
|
|
750e452abc | ||
|
|
d266bde1ad | ||
|
|
e904f6bb16 | ||
|
|
2c33c4d78c | ||
|
|
cc2a1db651 | ||
|
|
d18f4d311f | ||
|
|
588c491f4c | ||
|
|
abf4ea129c | ||
|
|
5778a68501 | ||
|
|
3d962685ce | ||
|
|
99a53b8f9a | ||
|
|
e7fa940139 | ||
|
|
e836b62a1e | ||
|
|
023058d3a1 | ||
|
|
9daf4c90df | ||
|
|
618b04e634 | ||
|
|
8a030058eb | ||
|
|
a6a0752feb | ||
|
|
6354b564b4 | ||
|
|
40aa65caba | ||
|
|
fd5ab80f2e | ||
|
|
aa084b4635 | ||
|
|
16e40372bd | ||
|
|
b1a969b1d0 | ||
|
|
b4973945eb | ||
|
|
8267072da2 | ||
|
|
8dd8a718a7 | ||
|
|
63358e3e6c | ||
|
|
063439ac94 | ||
|
|
a7425b0caf | ||
|
|
9ad81f2577 | ||
|
|
485b1dffb7 | ||
|
|
1877f17ca1 | ||
|
|
43e593c72d | ||
|
|
159d0a2294 | ||
|
|
6765f66e11 | ||
|
|
73215dca16 | ||
|
|
85499e2bdc | ||
|
|
06daf34102 | ||
|
|
47dfaaafe1 | ||
|
|
c60b7c0730 | ||
|
|
266d097cab | ||
|
|
d4452ea708 | ||
|
|
ec603bc3ef | ||
|
|
48af411878 | ||
|
|
57d0a236df | ||
|
|
554d5dbbca | ||
|
|
0793b1eaf6 | ||
|
|
425ce77f60 | ||
|
|
88729e4124 | ||
|
|
48f6a248c8 | ||
|
|
9714b130a8 | ||
|
|
4cce138d31 | ||
|
|
e7d6f2dfa3 | ||
|
|
b68a72614a | ||
|
|
36b66a681d | ||
|
|
3e273c03b6 | ||
|
|
da0437a774 | ||
|
|
78cff8c223 | ||
|
|
8c4605284c | ||
|
|
f708dc2043 | ||
|
|
160e4e2a32 | ||
|
|
79eadda494 | ||
|
|
3da1a4ed92 | ||
|
|
a5dc2d5382 | ||
|
|
705eb06078 | ||
|
|
e735f96555 | ||
|
|
f976ff8ed3 | ||
|
|
9ae6b2b0da | ||
|
|
86bb64000e | ||
|
|
19e0e4c2dc | ||
|
|
86724a6860 | ||
|
|
a226fdd242 | ||
|
|
e2369bae68 | ||
|
|
46f0bb2078 | ||
|
|
6ff8b527ea | ||
|
|
0f87c73051 | ||
|
|
d0d62e8847 | ||
|
|
439381e474 | ||
|
|
a6a95b0091 | ||
|
|
392cd862e9 | ||
|
|
b32106484f | ||
|
|
77df31e105 | ||
|
|
24fa722276 | ||
|
|
0211c57bed | ||
|
|
135b0609b4 | ||
|
|
6c73e3f3ae | ||
|
|
bc95159a80 | ||
|
|
0f68db6793 | ||
|
|
9a55747885 | ||
|
|
bd90eb267f | ||
|
|
a31c3a5796 | ||
|
|
7d5b22e662 | ||
|
|
42f1dabc31 | ||
|
|
eefef8b09f | ||
|
|
93c4616115 | ||
|
|
1f6ea333b6 | ||
|
|
4cc48e6f34 | ||
|
|
ecfb02a76f | ||
|
|
cc0222aa11 | ||
|
|
65036e8145 | ||
|
|
e2e32096a3 | ||
|
|
84a23947b0 | ||
|
|
d234d58a16 | ||
|
|
b75aaf177b | ||
|
|
87328a6ff3 | ||
|
|
3fa4dd3af9 | ||
|
|
6245976d3e | ||
|
|
dacabe6317 | ||
|
|
bf68404c53 | ||
|
|
5f40685161 | ||
|
|
f768dc1632 | ||
|
|
1a88883a3b | ||
|
|
a42f98e04c | ||
|
|
842d3e55bc | ||
|
|
f02397aab5 | ||
|
|
5a47754a92 | ||
|
|
d91bc52594 | ||
|
|
f67816e2d3 | ||
|
|
861e6c464b | ||
|
|
835ee117f7 | ||
|
|
e5e14722b8 | ||
|
|
af48519d65 | ||
|
|
d6e9765604 | ||
|
|
0ab39f207c | ||
|
|
ef2e065c77 | ||
|
|
80b4c151bd | ||
|
|
719cedde02 | ||
|
|
cc5eb4765c | ||
|
|
d557050eca | ||
|
|
469d1e9801 | ||
|
|
05857b954d | ||
|
|
81819661dc | ||
|
|
06afcf27a3 | ||
|
|
9587caa4f7 | ||
|
|
2f0d0924a7 | ||
|
|
2a976afe99 | ||
|
|
fb723bc650 | ||
|
|
e23286a336 | ||
|
|
2f5336388c | ||
|
|
af58018a1e | ||
|
|
cfb171b000 | ||
|
|
e037cb0e3e | ||
|
|
749110aaa2 | ||
|
|
59b4a0fb91 | ||
|
|
191c8b4061 | ||
|
|
4e68e65cd9 | ||
|
|
33d2b24ff2 | ||
|
|
9c8652cd5b | ||
|
|
dd4fcd5539 | ||
|
|
9de782e719 | ||
|
|
9f9a774340 | ||
|
|
8193d788fc | ||
|
|
5f1c2a4f7e | ||
|
|
8cce943cb9 | ||
|
|
1256c81bd0 | ||
|
|
6310096e85 | ||
|
|
b246f9dfe2 | ||
|
|
34d6ab032f | ||
|
|
3bb975965d | ||
|
|
4547efab09 | ||
|
|
65593f459f | ||
|
|
b08a5d3e2f | ||
|
|
c3d55e2295 | ||
|
|
cb7b8158e0 | ||
|
|
0e7288707e | ||
|
|
38a993b356 | ||
|
|
107f390ae8 | ||
|
|
0a9b0761dc | ||
|
|
d4634797f3 | ||
|
|
7a53378799 | ||
|
|
5cbe958645 | ||
|
|
2892c220ac | ||
|
|
227848a59d | ||
|
|
b16d954dd3 | ||
|
|
7cfb90df10 | ||
|
|
26388c7757 | ||
|
|
fde4bcfa3b | ||
|
|
b6e27cb3dc | ||
|
|
f1e11451fa | ||
|
|
84f3ccc0a9 | ||
|
|
4f767ee39c | ||
|
|
175a65f871 | ||
|
|
b761bd94e6 | ||
|
|
c48aed0aa8 | ||
|
|
007ebd8c9c | ||
|
|
4754e359f5 | ||
|
|
3ae70f381c | ||
|
|
3c9e50a4df | ||
|
|
97d006e99f | ||
|
|
17fbda6e12 | ||
|
|
c1ca19dc18 | ||
|
|
41f7a90bfd | ||
|
|
2057bb96e6 | ||
|
|
cfe86c0815 | ||
|
|
abc8f08271 | ||
|
|
b43c95868f |
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@@ -1 +1 @@
|
|||||||
* @kvaps
|
* @kvaps @lllamnyp
|
||||||
|
|||||||
@@ -13,8 +13,8 @@ but it means a lot to us.
|
|||||||
|
|
||||||
To add your organization to this list, you can either:
|
To add your organization to this list, you can either:
|
||||||
|
|
||||||
- [open a pull request](https://github.com/aenix-io/cozystack/pulls) to directly update this file, or
|
- [open a pull request](https://github.com/cozystack/cozystack/pulls) to directly update this file, or
|
||||||
- [edit this file](https://github.com/aenix-io/cozystack/blob/main/ADOPTERS.md) directly in GitHub
|
- [edit this file](https://github.com/cozystack/cozystack/blob/main/ADOPTERS.md) directly in GitHub
|
||||||
|
|
||||||
Feel free to ask in the Slack chat if you any questions and/or require
|
Feel free to ask in the Slack chat if you any questions and/or require
|
||||||
assistance with updating this list.
|
assistance with updating this list.
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ We welcome many types of contributions including:
|
|||||||
* New features
|
* New features
|
||||||
* Builds, CI/CD
|
* Builds, CI/CD
|
||||||
* Bug fixes
|
* Bug fixes
|
||||||
* [Documentation](https://github.com/aenix-io/cozystack-website/tree/main)
|
* [Documentation](https://github.com/cozystack/cozystack-website/tree/main)
|
||||||
* Issue Triage
|
* Issue Triage
|
||||||
* Answering questions on Slack or Github Discussions
|
* Answering questions on Slack or Github Discussions
|
||||||
* Web design
|
* Web design
|
||||||
|
|||||||
91
GOVERNANCE.md
Normal file
91
GOVERNANCE.md
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
# Cozystack Governance
|
||||||
|
|
||||||
|
This document defines the governance structure of the Cozystack community, outlining how members collaborate to achieve shared goals.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
**Cozystack**, a Cloud Native Computing Foundation (CNCF) project, is committed
|
||||||
|
to building an open, inclusive, productive, and self-governing open source
|
||||||
|
community focused on building a high-quality open source PaaS and framework for building clouds.
|
||||||
|
|
||||||
|
## Code Repositories
|
||||||
|
|
||||||
|
The following code repositories are governed by the Cozystack community and
|
||||||
|
maintained under the `cozystack` namespace:
|
||||||
|
|
||||||
|
* **[Cozystack](https://github.com/cozystack/cozystack):** Main Cozystack codebase
|
||||||
|
* **[website](https://github.com/cozystack/website):** Cozystack website and documentation sources
|
||||||
|
* **[Talm](https://github.com/cozystack/talm):** Tool for managing Talos Linux the GitOps way
|
||||||
|
* **[cozy-proxy](https://github.com/cozystack/cozy-proxy):** A simple kube-proxy addon for 1:1 NAT services in Kubernetes with NFT backend
|
||||||
|
* **[cozystack-telemetry-server](https://github.com/cozystack/cozystack-telemetry-server):** Cozystack telemetry
|
||||||
|
* **[talos-bootstrap](https://github.com/cozystack/talos-bootstrap):** An interactive Talos Linux installer
|
||||||
|
* **[talos-meta-tool](https://github.com/cozystack/talos-meta-tool):** Tool for writing network metadata into META partition
|
||||||
|
|
||||||
|
## Community Roles
|
||||||
|
|
||||||
|
* **Users:** Members that engage with the Cozystack community via any medium, including Slack, Telegram, GitHub, and mailing lists.
|
||||||
|
* **Contributors:** Members contributing to the projects by contributing and reviewing code, writing documentation,
|
||||||
|
responding to issues, participating in proposal discussions, and so on.
|
||||||
|
* **Directors:** Non-technical project leaders.
|
||||||
|
* **Maintainers**: Technical project leaders.
|
||||||
|
|
||||||
|
## Contributors
|
||||||
|
|
||||||
|
Cozystack is for everyone. Anyone can become a Cozystack contributor simply by
|
||||||
|
contributing to the project, whether through code, documentation, blog posts,
|
||||||
|
community management, or other means.
|
||||||
|
As with all Cozystack community members, contributors are expected to follow the
|
||||||
|
[Cozystack Code of Conduct](https://github.com/cozystack/cozystack/blob/main/CODE_OF_CONDUCT.md).
|
||||||
|
|
||||||
|
All contributions to Cozystack code, documentation, or other components in the
|
||||||
|
Cozystack GitHub organisation must follow the
|
||||||
|
[contributing guidelines](https://github.com/cozystack/cozystack/blob/main/CONTRIBUTING.md).
|
||||||
|
Whether these contributions are merged into the project is the prerogative of the maintainers.
|
||||||
|
|
||||||
|
## Directors
|
||||||
|
|
||||||
|
Directors are responsible for non-technical leadership functions within the project.
|
||||||
|
This includes representing Cozystack and its maintainers to the community, to the press,
|
||||||
|
and to the outside world; interfacing with CNCF and other governance entities;
|
||||||
|
and participating in project decision-making processes when appropriate.
|
||||||
|
|
||||||
|
Directors are elected by a majority vote of the maintainers.
|
||||||
|
|
||||||
|
## Maintainers
|
||||||
|
|
||||||
|
Maintainers have the right to merge code into the project.
|
||||||
|
Anyone can become a Cozystack maintainer (see "Becoming a maintainer" below).
|
||||||
|
|
||||||
|
### Expectations
|
||||||
|
|
||||||
|
Cozystack maintainers are expected to:
|
||||||
|
|
||||||
|
* Review pull requests, triage issues, and fix bugs in their areas of
|
||||||
|
expertise, ensuring that all changes go through the project's code review
|
||||||
|
and integration processes.
|
||||||
|
* Monitor cncf-cozystack-* emails, the Cozystack Slack channels in Kubernetes
|
||||||
|
and CNCF Slack workspaces, Telegram groups, and help out when possible.
|
||||||
|
* Rapidly respond to any time-sensitive security release processes.
|
||||||
|
* Attend Cozystack community meetings.
|
||||||
|
|
||||||
|
If a maintainer is no longer interested in or cannot perform the duties
|
||||||
|
listed above, they should move themselves to emeritus status.
|
||||||
|
If necessary, this can also occur through the decision-making process outlined below.
|
||||||
|
|
||||||
|
### Becoming a Maintainer
|
||||||
|
|
||||||
|
Anyone can become a Cozystack maintainer. Maintainers should be extremely
|
||||||
|
proficient in cloud native technologies and/or Go; have relevant domain expertise;
|
||||||
|
have the time and ability to meet the maintainer's expectations above;
|
||||||
|
and demonstrate the ability to work with the existing maintainers and project processes.
|
||||||
|
|
||||||
|
To become a maintainer, start by expressing interest to existing maintainers.
|
||||||
|
Existing maintainers will then ask you to demonstrate the qualifications above
|
||||||
|
by contributing PRs, doing code reviews, and other such tasks under their guidance.
|
||||||
|
After several months of working together, maintainers will decide whether to grant maintainer status.
|
||||||
|
|
||||||
|
## Project Decision-making Process
|
||||||
|
|
||||||
|
Ideally, all project decisions are resolved by consensus of maintainers and directors.
|
||||||
|
If this is not possible, a vote will be called.
|
||||||
|
The voting process is a simple majority in which each maintainer and director receives one vote.
|
||||||
@@ -1,7 +1,12 @@
|
|||||||
# The Cozystack Maintainers
|
# The Cozystack Maintainers
|
||||||
|
|
||||||
| Maintainer | GitHub Username | Company |
|
| Maintainer | GitHub Username | Company | Responsibility |
|
||||||
| ---------- | --------------- | ------- |
|
| ---------- | --------------- | ------- | --------------------------------- |
|
||||||
| Andrei Kvapil | [@kvaps](https://github.com/kvaps) | Ænix |
|
| Andrei Kvapil | [@kvaps](https://github.com/kvaps) | Ænix | Core Maintainer |
|
||||||
| George Gaál | [@gecube](https://github.com/gecube) | Ænix |
|
| George Gaál | [@gecube](https://github.com/gecube) | Ænix | DevOps Practices in Platform, Developers Advocate |
|
||||||
| Eduard Generalov | [@egeneralov](https://github.com/egeneralov) | Ænix |
|
| Kingdon Barrett | [@kingdonb](https://github.com/kingdonb) | Urmanac | FluxCD and flux-operator |
|
||||||
|
| Timofei Larkin | [@lllamnyp](https://github.com/lllamnyp) | 3commas | Etcd-operator Lead |
|
||||||
|
| Artem Bortnikov | [@aobort](https://github.com/aobort) | Timescale | Etcd-operator Lead |
|
||||||
|
| Andrei Gumilev | [@chumkaska](https://github.com/chumkaska) | Ænix | Platform Documentation |
|
||||||
|
| Timur Tukaev | [@tym83](https://github.com/tym83) | Ænix | Cozystack Website, Marketing, Community Management |
|
||||||
|
| Kirill Klinchenkov | [@klinch0](https://github.com/klinch0) | Ænix | Core Maintainer |
|
||||||
|
|||||||
14
Makefile
14
Makefile
@@ -6,9 +6,12 @@ build:
|
|||||||
make -C packages/apps/mysql image
|
make -C packages/apps/mysql image
|
||||||
make -C packages/apps/clickhouse image
|
make -C packages/apps/clickhouse image
|
||||||
make -C packages/apps/kubernetes image
|
make -C packages/apps/kubernetes image
|
||||||
|
make -C packages/extra/monitoring image
|
||||||
make -C packages/system/cozystack-api image
|
make -C packages/system/cozystack-api image
|
||||||
|
make -C packages/system/cozystack-controller image
|
||||||
make -C packages/system/cilium image
|
make -C packages/system/cilium image
|
||||||
make -C packages/system/kubeovn image
|
make -C packages/system/kubeovn image
|
||||||
|
make -C packages/system/kubeovn-webhook image
|
||||||
make -C packages/system/dashboard image
|
make -C packages/system/dashboard image
|
||||||
make -C packages/system/kamaji image
|
make -C packages/system/kamaji image
|
||||||
make -C packages/system/bucket image
|
make -C packages/system/bucket image
|
||||||
@@ -17,8 +20,8 @@ build:
|
|||||||
make manifests
|
make manifests
|
||||||
|
|
||||||
manifests:
|
manifests:
|
||||||
(cd packages/core/installer/; helm template -n cozy-installer installer .) > manifests/cozystack-installer.yaml
|
mkdir -p _out/assets
|
||||||
sed -i 's|@sha256:[^"]\+||' manifests/cozystack-installer.yaml
|
(cd packages/core/installer/; helm template -n cozy-installer installer .) > _out/assets/cozystack-installer.yaml
|
||||||
|
|
||||||
repos:
|
repos:
|
||||||
rm -rf _out
|
rm -rf _out
|
||||||
@@ -34,6 +37,13 @@ assets:
|
|||||||
make -C packages/core/installer/ assets
|
make -C packages/core/installer/ assets
|
||||||
|
|
||||||
test:
|
test:
|
||||||
|
test -f _out/assets/nocloud-amd64.raw.xz || make -C packages/core/installer talos-nocloud
|
||||||
make -C packages/core/testing apply
|
make -C packages/core/testing apply
|
||||||
make -C packages/core/testing test
|
make -C packages/core/testing test
|
||||||
make -C packages/core/testing test-applications
|
make -C packages/core/testing test-applications
|
||||||
|
|
||||||
|
generate:
|
||||||
|
hack/update-codegen.sh
|
||||||
|
|
||||||
|
upload_assets: assets
|
||||||
|
hack/upload-assets.sh
|
||||||
|
|||||||
24
README.md
24
README.md
@@ -2,11 +2,11 @@
|
|||||||

|

|
||||||
|
|
||||||
[](https://opensource.org/)
|
[](https://opensource.org/)
|
||||||
[](https://opensource.org/licenses/)
|
[](https://opensource.org/licenses/)
|
||||||
[](https://aenix.io/contact-us/#meet)
|
[](https://cozystack.io/support/)
|
||||||
[](https://aenix.io/cozystack/)
|
[](https://github.com/cozystack/cozystack)
|
||||||
[](https://github.com/aenix-io/cozystack)
|
[](https://github.com/cozystack/cozystack/releases/latest)
|
||||||
[](https://github.com/aenix-io/cozystack)
|
[](https://github.com/cozystack/cozystack/graphs/contributors)
|
||||||
|
|
||||||
# Cozystack
|
# Cozystack
|
||||||
|
|
||||||
@@ -42,21 +42,21 @@ If you encounter any difficulties, start with the [troubleshooting guide](https:
|
|||||||
## Versioning
|
## Versioning
|
||||||
|
|
||||||
Versioning adheres to the [Semantic Versioning](http://semver.org/) principles.
|
Versioning adheres to the [Semantic Versioning](http://semver.org/) principles.
|
||||||
A full list of the available releases is available in the GitHub repository's [Release](https://github.com/aenix-io/cozystack/releases) section.
|
A full list of the available releases is available in the GitHub repository's [Release](https://github.com/cozystack/cozystack/releases) section.
|
||||||
|
|
||||||
- [Roadmap](https://github.com/orgs/aenix-io/projects/2)
|
- [Roadmap](https://cozystack.io/docs/roadmap/)
|
||||||
|
|
||||||
## Contributions
|
## Contributions
|
||||||
|
|
||||||
Contributions are highly appreciated and very welcomed!
|
Contributions are highly appreciated and very welcomed!
|
||||||
|
|
||||||
In case of bugs, please, check if the issue has been already opened by checking the [GitHub Issues](https://github.com/aenix-io/cozystack/issues) section.
|
In case of bugs, please, check if the issue has been already opened by checking the [GitHub Issues](https://github.com/cozystack/cozystack/issues) section.
|
||||||
In case it isn't, you can open a new one: a detailed report will help us to replicate it, assess it, and work on a fix.
|
In case it isn't, you can open a new one: a detailed report will help us to replicate it, assess it, and work on a fix.
|
||||||
|
|
||||||
You can express your intention in working on the fix on your own.
|
You can express your intention in working on the fix on your own.
|
||||||
Commits are used to generate the changelog, and their author will be referenced in it.
|
Commits are used to generate the changelog, and their author will be referenced in it.
|
||||||
|
|
||||||
In case of **Feature Requests** please use the [Discussion's Feature Request section](https://github.com/aenix-io/cozystack/discussions/categories/feature-requests).
|
In case of **Feature Requests** please use the [Discussion's Feature Request section](https://github.com/cozystack/cozystack/discussions/categories/feature-requests).
|
||||||
|
|
||||||
You can join our weekly community meetings (just add this events to your [Google Calendar](https://calendar.google.com/calendar?cid=ZTQzZDIxZTVjOWI0NWE5NWYyOGM1ZDY0OWMyY2IxZTFmNDMzZTJlNjUzYjU2ZGJiZGE3NGNhMzA2ZjBkMGY2OEBncm91cC5jYWxlbmRhci5nb29nbGUuY29t) or [iCal](https://calendar.google.com/calendar/ical/e43d21e5c9b45a95f28c5d649c2cb1e1f433e2e653b56dbbda74ca306f0d0f68%40group.calendar.google.com/public/basic.ics)) or [Telegram group](https://t.me/cozystack).
|
You can join our weekly community meetings (just add this events to your [Google Calendar](https://calendar.google.com/calendar?cid=ZTQzZDIxZTVjOWI0NWE5NWYyOGM1ZDY0OWMyY2IxZTFmNDMzZTJlNjUzYjU2ZGJiZGE3NGNhMzA2ZjBkMGY2OEBncm91cC5jYWxlbmRhci5nb29nbGUuY29t) or [iCal](https://calendar.google.com/calendar/ical/e43d21e5c9b45a95f28c5d649c2cb1e1f433e2e653b56dbbda74ca306f0d0f68%40group.calendar.google.com/public/basic.ics)) or [Telegram group](https://t.me/cozystack).
|
||||||
|
|
||||||
@@ -67,8 +67,4 @@ The code is provided as-is with no warranties.
|
|||||||
|
|
||||||
## Commercial Support
|
## Commercial Support
|
||||||
|
|
||||||
[**Ænix**](https://aenix.io) offers enterprise-grade support, available 24/7.
|
A list of companies providing commercial support for this project can be found on [official site](https://cozystack.io/support/).
|
||||||
|
|
||||||
We provide all types of assistance, including consultations, development of missing features, design, assistance with installation, and integration.
|
|
||||||
|
|
||||||
[Contact us](https://aenix.io/contact/)
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
API rule violation: list_type_missing,github.com/aenix.io/cozystack/pkg/apis/apps/v1alpha1,ApplicationStatus,Conditions
|
API rule violation: list_type_missing,github.com/cozystack/cozystack/pkg/apis/apps/v1alpha1,ApplicationStatus,Conditions
|
||||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,Ref
|
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,Ref
|
||||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,Schema
|
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,Schema
|
||||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,XEmbeddedResource
|
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,XEmbeddedResource
|
||||||
|
|||||||
36
api/v1alpha1/groupversion_info.go
Normal file
36
api/v1alpha1/groupversion_info.go
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2025.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package v1alpha1 contains API Schema definitions for the v1alpha1 API group.
|
||||||
|
// +kubebuilder:object:generate=true
|
||||||
|
// +groupName=cozystack.io
|
||||||
|
package v1alpha1
|
||||||
|
|
||||||
|
import (
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/scheme"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// GroupVersion is group version used to register these objects.
|
||||||
|
GroupVersion = schema.GroupVersion{Group: "cozystack.io", Version: "v1alpha1"}
|
||||||
|
|
||||||
|
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
|
||||||
|
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
|
||||||
|
|
||||||
|
// AddToScheme adds the types in this group-version to the given scheme.
|
||||||
|
AddToScheme = SchemeBuilder.AddToScheme
|
||||||
|
)
|
||||||
70
api/v1alpha1/workload_types.go
Normal file
70
api/v1alpha1/workload_types.go
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2025.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package v1alpha1
|
||||||
|
|
||||||
|
import (
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WorkloadStatus defines the observed state of Workload
|
||||||
|
type WorkloadStatus struct {
|
||||||
|
// Kind represents the type of workload (redis, postgres, etc.)
|
||||||
|
// +required
|
||||||
|
Kind string `json:"kind"`
|
||||||
|
|
||||||
|
// Type represents the specific role of the workload (redis, sentinel, etc.)
|
||||||
|
// If not specified, defaults to Kind
|
||||||
|
// +optional
|
||||||
|
Type string `json:"type,omitempty"`
|
||||||
|
|
||||||
|
// Resources specifies the compute resources allocated to this workload
|
||||||
|
// +required
|
||||||
|
Resources map[string]resource.Quantity `json:"resources"`
|
||||||
|
|
||||||
|
// Operational indicates if all pods of the workload are ready
|
||||||
|
// +optional
|
||||||
|
Operational bool `json:"operational"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// +kubebuilder:object:root=true
|
||||||
|
// +kubebuilder:printcolumn:name="Kind",type="string",JSONPath=".status.kind"
|
||||||
|
// +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".status.type"
|
||||||
|
// +kubebuilder:printcolumn:name="CPU",type="string",JSONPath=".status.resources.cpu"
|
||||||
|
// +kubebuilder:printcolumn:name="Memory",type="string",JSONPath=".status.resources.memory"
|
||||||
|
// +kubebuilder:printcolumn:name="Operational",type="boolean",JSONPath=`.status.operational`
|
||||||
|
|
||||||
|
// Workload is the Schema for the workloads API
|
||||||
|
type Workload struct {
|
||||||
|
metav1.TypeMeta `json:",inline"`
|
||||||
|
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||||
|
|
||||||
|
Status WorkloadStatus `json:"status,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// +kubebuilder:object:root=true
|
||||||
|
|
||||||
|
// WorkloadList contains a list of Workload
|
||||||
|
type WorkloadList struct {
|
||||||
|
metav1.TypeMeta `json:",inline"`
|
||||||
|
metav1.ListMeta `json:"metadata,omitempty"`
|
||||||
|
Items []Workload `json:"items"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
SchemeBuilder.Register(&Workload{}, &WorkloadList{})
|
||||||
|
}
|
||||||
91
api/v1alpha1/workloadmonitor_types.go
Normal file
91
api/v1alpha1/workloadmonitor_types.go
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
package v1alpha1
|
||||||
|
|
||||||
|
import (
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WorkloadMonitorSpec defines the desired state of WorkloadMonitor
|
||||||
|
type WorkloadMonitorSpec struct {
|
||||||
|
// Selector is a label selector to find workloads to monitor
|
||||||
|
// +required
|
||||||
|
Selector map[string]string `json:"selector"`
|
||||||
|
|
||||||
|
// Kind specifies the kind of the workload
|
||||||
|
// +optional
|
||||||
|
Kind string `json:"kind,omitempty"`
|
||||||
|
|
||||||
|
// Type specifies the type of the workload
|
||||||
|
// +optional
|
||||||
|
Type string `json:"type,omitempty"`
|
||||||
|
|
||||||
|
// Version specifies the version of the workload
|
||||||
|
// +optional
|
||||||
|
Version string `json:"version,omitempty"`
|
||||||
|
|
||||||
|
// MinReplicas specifies the minimum number of replicas that should be available
|
||||||
|
// +kubebuilder:validation:Minimum=0
|
||||||
|
// +optional
|
||||||
|
MinReplicas *int32 `json:"minReplicas,omitempty"`
|
||||||
|
|
||||||
|
// Replicas is the desired number of replicas
|
||||||
|
// If not specified, will use observedReplicas as the target
|
||||||
|
// +kubebuilder:validation:Minimum=0
|
||||||
|
// +optional
|
||||||
|
Replicas *int32 `json:"replicas,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// WorkloadMonitorStatus defines the observed state of WorkloadMonitor
|
||||||
|
type WorkloadMonitorStatus struct {
|
||||||
|
// Operational indicates if the workload meets all operational requirements
|
||||||
|
// +optional
|
||||||
|
Operational *bool `json:"operational,omitempty"`
|
||||||
|
|
||||||
|
// AvailableReplicas is the number of ready replicas
|
||||||
|
// +optional
|
||||||
|
AvailableReplicas int32 `json:"availableReplicas"`
|
||||||
|
|
||||||
|
// ObservedReplicas is the total number of pods observed
|
||||||
|
// +optional
|
||||||
|
ObservedReplicas int32 `json:"observedReplicas"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// +kubebuilder:object:root=true
|
||||||
|
// +kubebuilder:subresource:status
|
||||||
|
// +kubebuilder:printcolumn:name="Kind",type="string",JSONPath=".spec.kind"
|
||||||
|
// +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".spec.type"
|
||||||
|
// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version"
|
||||||
|
// +kubebuilder:printcolumn:name="Replicas",type="integer",JSONPath=".spec.replicas"
|
||||||
|
// +kubebuilder:printcolumn:name="MinReplicas",type="integer",JSONPath=".spec.minReplicas"
|
||||||
|
// +kubebuilder:printcolumn:name="Available",type="integer",JSONPath=".status.availableReplicas"
|
||||||
|
// +kubebuilder:printcolumn:name="Observed",type="integer",JSONPath=".status.observedReplicas"
|
||||||
|
// +kubebuilder:printcolumn:name="Operational",type="boolean",JSONPath=".status.operational"
|
||||||
|
|
||||||
|
// WorkloadMonitor is the Schema for the workloadmonitors API
|
||||||
|
type WorkloadMonitor struct {
|
||||||
|
metav1.TypeMeta `json:",inline"`
|
||||||
|
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||||
|
|
||||||
|
Spec WorkloadMonitorSpec `json:"spec,omitempty"`
|
||||||
|
Status WorkloadMonitorStatus `json:"status,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// +kubebuilder:object:root=true
|
||||||
|
|
||||||
|
// WorkloadMonitorList contains a list of WorkloadMonitor
|
||||||
|
type WorkloadMonitorList struct {
|
||||||
|
metav1.TypeMeta `json:",inline"`
|
||||||
|
metav1.ListMeta `json:"metadata,omitempty"`
|
||||||
|
Items []WorkloadMonitor `json:"items"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
SchemeBuilder.Register(&WorkloadMonitor{}, &WorkloadMonitorList{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSelector returns the label selector from metadata
|
||||||
|
func (w *WorkloadMonitor) GetSelector() map[string]string {
|
||||||
|
return w.Spec.Selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// Selector specifies the label selector for workloads
|
||||||
|
type Selector map[string]string
|
||||||
238
api/v1alpha1/zz_generated.deepcopy.go
Normal file
238
api/v1alpha1/zz_generated.deepcopy.go
Normal file
@@ -0,0 +1,238 @@
|
|||||||
|
//go:build !ignore_autogenerated
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright 2025 The Cozystack Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Code generated by controller-gen. DO NOT EDIT.
|
||||||
|
|
||||||
|
package v1alpha1
|
||||||
|
|
||||||
|
import (
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
|
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in Selector) DeepCopyInto(out *Selector) {
|
||||||
|
{
|
||||||
|
in := &in
|
||||||
|
*out = make(Selector, len(*in))
|
||||||
|
for key, val := range *in {
|
||||||
|
(*out)[key] = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Selector.
|
||||||
|
func (in Selector) DeepCopy() Selector {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(Selector)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return *out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *Workload) DeepCopyInto(out *Workload) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||||
|
in.Status.DeepCopyInto(&out.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workload.
|
||||||
|
func (in *Workload) DeepCopy() *Workload {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(Workload)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *Workload) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *WorkloadList) DeepCopyInto(out *WorkloadList) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||||
|
if in.Items != nil {
|
||||||
|
in, out := &in.Items, &out.Items
|
||||||
|
*out = make([]Workload, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadList.
|
||||||
|
func (in *WorkloadList) DeepCopy() *WorkloadList {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(WorkloadList)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *WorkloadList) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *WorkloadMonitor) DeepCopyInto(out *WorkloadMonitor) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||||
|
in.Spec.DeepCopyInto(&out.Spec)
|
||||||
|
in.Status.DeepCopyInto(&out.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadMonitor.
|
||||||
|
func (in *WorkloadMonitor) DeepCopy() *WorkloadMonitor {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(WorkloadMonitor)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *WorkloadMonitor) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *WorkloadMonitorList) DeepCopyInto(out *WorkloadMonitorList) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||||
|
if in.Items != nil {
|
||||||
|
in, out := &in.Items, &out.Items
|
||||||
|
*out = make([]WorkloadMonitor, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadMonitorList.
|
||||||
|
func (in *WorkloadMonitorList) DeepCopy() *WorkloadMonitorList {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(WorkloadMonitorList)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *WorkloadMonitorList) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *WorkloadMonitorSpec) DeepCopyInto(out *WorkloadMonitorSpec) {
|
||||||
|
*out = *in
|
||||||
|
if in.Selector != nil {
|
||||||
|
in, out := &in.Selector, &out.Selector
|
||||||
|
*out = make(map[string]string, len(*in))
|
||||||
|
for key, val := range *in {
|
||||||
|
(*out)[key] = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.MinReplicas != nil {
|
||||||
|
in, out := &in.MinReplicas, &out.MinReplicas
|
||||||
|
*out = new(int32)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.Replicas != nil {
|
||||||
|
in, out := &in.Replicas, &out.Replicas
|
||||||
|
*out = new(int32)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadMonitorSpec.
|
||||||
|
func (in *WorkloadMonitorSpec) DeepCopy() *WorkloadMonitorSpec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(WorkloadMonitorSpec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *WorkloadMonitorStatus) DeepCopyInto(out *WorkloadMonitorStatus) {
|
||||||
|
*out = *in
|
||||||
|
if in.Operational != nil {
|
||||||
|
in, out := &in.Operational, &out.Operational
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadMonitorStatus.
|
||||||
|
func (in *WorkloadMonitorStatus) DeepCopy() *WorkloadMonitorStatus {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(WorkloadMonitorStatus)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *WorkloadStatus) DeepCopyInto(out *WorkloadStatus) {
|
||||||
|
*out = *in
|
||||||
|
if in.Resources != nil {
|
||||||
|
in, out := &in.Resources, &out.Resources
|
||||||
|
*out = make(map[string]resource.Quantity, len(*in))
|
||||||
|
for key, val := range *in {
|
||||||
|
(*out)[key] = val.DeepCopy()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadStatus.
|
||||||
|
func (in *WorkloadStatus) DeepCopy() *WorkloadStatus {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(WorkloadStatus)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
@@ -19,7 +19,7 @@ package main
|
|||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/aenix.io/cozystack/pkg/cmd/server"
|
"github.com/cozystack/cozystack/pkg/cmd/server"
|
||||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||||
"k8s.io/component-base/cli"
|
"k8s.io/component-base/cli"
|
||||||
)
|
)
|
||||||
|
|||||||
29
cmd/cozystack-assets-server/main.go
Normal file
29
cmd/cozystack-assets-server/main.go
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
addr := flag.String("address", ":8123", "Address to listen on")
|
||||||
|
dir := flag.String("dir", "/cozystack/assets", "Directory to serve files from")
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
absDir, err := filepath.Abs(*dir)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Error getting absolute path for %s: %v", *dir, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fs := http.FileServer(http.Dir(absDir))
|
||||||
|
http.Handle("/", fs)
|
||||||
|
|
||||||
|
log.Printf("Server starting on %s, serving directory %s", *addr, absDir)
|
||||||
|
|
||||||
|
err = http.ListenAndServe(*addr, nil)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Server failed to start: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
210
cmd/cozystack-controller/main.go
Normal file
210
cmd/cozystack-controller/main.go
Normal file
@@ -0,0 +1,210 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2025.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"flag"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
|
||||||
|
// to ensure that exec-entrypoint and run can make use of them.
|
||||||
|
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
|
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||||
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/metrics/filters"
|
||||||
|
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||||
|
|
||||||
|
cozystackiov1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||||
|
"github.com/cozystack/cozystack/internal/controller"
|
||||||
|
"github.com/cozystack/cozystack/internal/telemetry"
|
||||||
|
// +kubebuilder:scaffold:imports
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
scheme = runtime.NewScheme()
|
||||||
|
setupLog = ctrl.Log.WithName("setup")
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
|
||||||
|
|
||||||
|
utilruntime.Must(cozystackiov1alpha1.AddToScheme(scheme))
|
||||||
|
// +kubebuilder:scaffold:scheme
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
var metricsAddr string
|
||||||
|
var enableLeaderElection bool
|
||||||
|
var probeAddr string
|
||||||
|
var secureMetrics bool
|
||||||
|
var enableHTTP2 bool
|
||||||
|
var disableTelemetry bool
|
||||||
|
var telemetryEndpoint string
|
||||||
|
var telemetryInterval string
|
||||||
|
var cozystackVersion string
|
||||||
|
var tlsOpts []func(*tls.Config)
|
||||||
|
flag.StringVar(&metricsAddr, "metrics-bind-address", "0", "The address the metrics endpoint binds to. "+
|
||||||
|
"Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.")
|
||||||
|
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
|
||||||
|
flag.BoolVar(&enableLeaderElection, "leader-elect", false,
|
||||||
|
"Enable leader election for controller manager. "+
|
||||||
|
"Enabling this will ensure there is only one active controller manager.")
|
||||||
|
flag.BoolVar(&secureMetrics, "metrics-secure", true,
|
||||||
|
"If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.")
|
||||||
|
flag.BoolVar(&enableHTTP2, "enable-http2", false,
|
||||||
|
"If set, HTTP/2 will be enabled for the metrics and webhook servers")
|
||||||
|
flag.BoolVar(&disableTelemetry, "disable-telemetry", false,
|
||||||
|
"Disable telemetry collection")
|
||||||
|
flag.StringVar(&telemetryEndpoint, "telemetry-endpoint", "https://telemetry.cozystack.io",
|
||||||
|
"Endpoint for sending telemetry data")
|
||||||
|
flag.StringVar(&telemetryInterval, "telemetry-interval", "15m",
|
||||||
|
"Interval between telemetry data collection (e.g. 15m, 1h)")
|
||||||
|
flag.StringVar(&cozystackVersion, "cozystack-version", "unknown",
|
||||||
|
"Version of Cozystack")
|
||||||
|
opts := zap.Options{
|
||||||
|
Development: false,
|
||||||
|
}
|
||||||
|
opts.BindFlags(flag.CommandLine)
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
// Parse telemetry interval
|
||||||
|
interval, err := time.ParseDuration(telemetryInterval)
|
||||||
|
if err != nil {
|
||||||
|
setupLog.Error(err, "invalid telemetry interval")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Configure telemetry
|
||||||
|
telemetryConfig := telemetry.Config{
|
||||||
|
Disabled: disableTelemetry,
|
||||||
|
Endpoint: telemetryEndpoint,
|
||||||
|
Interval: interval,
|
||||||
|
CozystackVersion: cozystackVersion,
|
||||||
|
}
|
||||||
|
|
||||||
|
ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
|
||||||
|
|
||||||
|
// if the enable-http2 flag is false (the default), http/2 should be disabled
|
||||||
|
// due to its vulnerabilities. More specifically, disabling http/2 will
|
||||||
|
// prevent from being vulnerable to the HTTP/2 Stream Cancellation and
|
||||||
|
// Rapid Reset CVEs. For more information see:
|
||||||
|
// - https://github.com/advisories/GHSA-qppj-fm5r-hxr3
|
||||||
|
// - https://github.com/advisories/GHSA-4374-p667-p6c8
|
||||||
|
disableHTTP2 := func(c *tls.Config) {
|
||||||
|
setupLog.Info("disabling http/2")
|
||||||
|
c.NextProtos = []string{"http/1.1"}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !enableHTTP2 {
|
||||||
|
tlsOpts = append(tlsOpts, disableHTTP2)
|
||||||
|
}
|
||||||
|
|
||||||
|
webhookServer := webhook.NewServer(webhook.Options{
|
||||||
|
TLSOpts: tlsOpts,
|
||||||
|
})
|
||||||
|
|
||||||
|
// Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server.
|
||||||
|
// More info:
|
||||||
|
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.1/pkg/metrics/server
|
||||||
|
// - https://book.kubebuilder.io/reference/metrics.html
|
||||||
|
metricsServerOptions := metricsserver.Options{
|
||||||
|
BindAddress: metricsAddr,
|
||||||
|
SecureServing: secureMetrics,
|
||||||
|
TLSOpts: tlsOpts,
|
||||||
|
}
|
||||||
|
|
||||||
|
if secureMetrics {
|
||||||
|
// FilterProvider is used to protect the metrics endpoint with authn/authz.
|
||||||
|
// These configurations ensure that only authorized users and service accounts
|
||||||
|
// can access the metrics endpoint. The RBAC are configured in 'config/rbac/kustomization.yaml'. More info:
|
||||||
|
// https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.1/pkg/metrics/filters#WithAuthenticationAndAuthorization
|
||||||
|
metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization
|
||||||
|
|
||||||
|
// TODO(user): If CertDir, CertName, and KeyName are not specified, controller-runtime will automatically
|
||||||
|
// generate self-signed certificates for the metrics server. While convenient for development and testing,
|
||||||
|
// this setup is not recommended for production.
|
||||||
|
}
|
||||||
|
|
||||||
|
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
|
||||||
|
Scheme: scheme,
|
||||||
|
Metrics: metricsServerOptions,
|
||||||
|
WebhookServer: webhookServer,
|
||||||
|
HealthProbeBindAddress: probeAddr,
|
||||||
|
LeaderElection: enableLeaderElection,
|
||||||
|
LeaderElectionID: "19a0338c.cozystack.io",
|
||||||
|
// LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily
|
||||||
|
// when the Manager ends. This requires the binary to immediately end when the
|
||||||
|
// Manager is stopped, otherwise, this setting is unsafe. Setting this significantly
|
||||||
|
// speeds up voluntary leader transitions as the new leader don't have to wait
|
||||||
|
// LeaseDuration time first.
|
||||||
|
//
|
||||||
|
// In the default scaffold provided, the program ends immediately after
|
||||||
|
// the manager stops, so would be fine to enable this option. However,
|
||||||
|
// if you are doing or is intended to do any operation such as perform cleanups
|
||||||
|
// after the manager stops then its usage might be unsafe.
|
||||||
|
// LeaderElectionReleaseOnCancel: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
setupLog.Error(err, "unable to start manager")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = (&controller.WorkloadMonitorReconciler{
|
||||||
|
Client: mgr.GetClient(),
|
||||||
|
Scheme: mgr.GetScheme(),
|
||||||
|
}).SetupWithManager(mgr); err != nil {
|
||||||
|
setupLog.Error(err, "unable to create controller", "controller", "WorkloadMonitor")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
// +kubebuilder:scaffold:builder
|
||||||
|
|
||||||
|
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
|
||||||
|
setupLog.Error(err, "unable to set up health check")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
|
||||||
|
setupLog.Error(err, "unable to set up ready check")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize telemetry collector
|
||||||
|
collector, err := telemetry.NewCollector(mgr.GetClient(), &telemetryConfig, mgr.GetConfig())
|
||||||
|
if err != nil {
|
||||||
|
setupLog.V(1).Error(err, "unable to create telemetry collector, telemetry will be disabled")
|
||||||
|
}
|
||||||
|
|
||||||
|
if collector != nil {
|
||||||
|
if err := mgr.Add(collector); err != nil {
|
||||||
|
setupLog.Error(err, "unable to set up telemetry collector")
|
||||||
|
setupLog.V(1).Error(err, "unable to set up telemetry collector, continuing without telemetry")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
setupLog.Info("starting manager")
|
||||||
|
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
|
||||||
|
setupLog.Error(err, "problem running manager")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
5407
dashboards/clickhouse/altinity-clickhouse-operator-dashboard.json
Normal file
5407
dashboards/clickhouse/altinity-clickhouse-operator-dashboard.json
Normal file
File diff suppressed because it is too large
Load Diff
3611
dashboards/control-plane/kube-etcd.json
Normal file
3611
dashboards/control-plane/kube-etcd.json
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
1725
dashboards/flux/flux-control-plane.json
Normal file
1725
dashboards/flux/flux-control-plane.json
Normal file
File diff suppressed because it is too large
Load Diff
1391
dashboards/flux/flux-stats.json
Normal file
1391
dashboards/flux/flux-stats.json
Normal file
File diff suppressed because it is too large
Load Diff
1219
dashboards/goldpinger/goldpinger.json
Normal file
1219
dashboards/goldpinger/goldpinger.json
Normal file
File diff suppressed because it is too large
Load Diff
2940
dashboards/kafka/strimzi-kafka.json
Normal file
2940
dashboards/kafka/strimzi-kafka.json
Normal file
File diff suppressed because it is too large
Load Diff
5196
dashboards/kubevirt/kubevirt-control-plane.json
Normal file
5196
dashboards/kubevirt/kubevirt-control-plane.json
Normal file
File diff suppressed because it is too large
Load Diff
2193
dashboards/storage/linstor.json
Normal file
2193
dashboards/storage/linstor.json
Normal file
File diff suppressed because it is too large
Load Diff
24
go.mod
24
go.mod
@@ -1,22 +1,27 @@
|
|||||||
// This is a generated file. Do not edit directly.
|
// This is a generated file. Do not edit directly.
|
||||||
|
|
||||||
module github.com/aenix.io/cozystack
|
module github.com/cozystack/cozystack
|
||||||
|
|
||||||
go 1.23.0
|
go 1.23.0
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/emicklei/go-restful/v3 v3.11.0
|
github.com/fluxcd/helm-controller/api v1.1.0
|
||||||
github.com/google/gofuzz v1.2.0
|
github.com/google/gofuzz v1.2.0
|
||||||
|
github.com/onsi/ginkgo/v2 v2.19.0
|
||||||
|
github.com/onsi/gomega v1.33.1
|
||||||
github.com/spf13/cobra v1.8.1
|
github.com/spf13/cobra v1.8.1
|
||||||
github.com/stretchr/testify v1.9.0
|
github.com/stretchr/testify v1.9.0
|
||||||
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
|
k8s.io/api v0.31.2
|
||||||
k8s.io/apiextensions-apiserver v0.31.2
|
k8s.io/apiextensions-apiserver v0.31.2
|
||||||
k8s.io/apimachinery v0.31.2
|
k8s.io/apimachinery v0.31.2
|
||||||
k8s.io/apiserver v0.31.2
|
k8s.io/apiserver v0.31.2
|
||||||
k8s.io/client-go v0.31.2
|
k8s.io/client-go v0.31.2
|
||||||
k8s.io/code-generator v0.31.2
|
|
||||||
k8s.io/component-base v0.31.2
|
k8s.io/component-base v0.31.2
|
||||||
|
k8s.io/klog/v2 v2.130.1
|
||||||
k8s.io/kube-openapi v0.0.0-20240827152857-f7e401e7b4c2
|
k8s.io/kube-openapi v0.0.0-20240827152857-f7e401e7b4c2
|
||||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
|
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
|
||||||
|
sigs.k8s.io/controller-runtime v0.19.0
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1
|
sigs.k8s.io/structured-merge-diff/v4 v4.4.1
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -31,23 +36,27 @@ require (
|
|||||||
github.com/coreos/go-semver v0.3.1 // indirect
|
github.com/coreos/go-semver v0.3.1 // indirect
|
||||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||||
|
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||||
|
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
|
||||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||||
github.com/fluxcd/helm-controller/api v1.1.0 // indirect
|
|
||||||
github.com/fluxcd/pkg/apis/kustomize v1.6.1 // indirect
|
github.com/fluxcd/pkg/apis/kustomize v1.6.1 // indirect
|
||||||
github.com/fluxcd/pkg/apis/meta v1.6.1 // indirect
|
github.com/fluxcd/pkg/apis/meta v1.6.1 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||||
github.com/go-logr/logr v1.4.2 // indirect
|
github.com/go-logr/logr v1.4.2 // indirect
|
||||||
github.com/go-logr/stdr v1.2.2 // indirect
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
|
github.com/go-logr/zapr v1.3.0 // indirect
|
||||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||||
github.com/go-openapi/swag v0.23.0 // indirect
|
github.com/go-openapi/swag v0.23.0 // indirect
|
||||||
|
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||||
github.com/golang/protobuf v1.5.4 // indirect
|
github.com/golang/protobuf v1.5.4 // indirect
|
||||||
github.com/google/cel-go v0.21.0 // indirect
|
github.com/google/cel-go v0.21.0 // indirect
|
||||||
github.com/google/gnostic-models v0.6.8 // indirect
|
github.com/google/gnostic-models v0.6.8 // indirect
|
||||||
github.com/google/go-cmp v0.6.0 // indirect
|
github.com/google/go-cmp v0.6.0 // indirect
|
||||||
|
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect
|
||||||
github.com/google/uuid v1.6.0 // indirect
|
github.com/google/uuid v1.6.0 // indirect
|
||||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
|
||||||
@@ -84,7 +93,6 @@ require (
|
|||||||
go.uber.org/zap v1.27.0 // indirect
|
go.uber.org/zap v1.27.0 // indirect
|
||||||
golang.org/x/crypto v0.28.0 // indirect
|
golang.org/x/crypto v0.28.0 // indirect
|
||||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||||
golang.org/x/mod v0.21.0 // indirect
|
|
||||||
golang.org/x/net v0.30.0 // indirect
|
golang.org/x/net v0.30.0 // indirect
|
||||||
golang.org/x/oauth2 v0.23.0 // indirect
|
golang.org/x/oauth2 v0.23.0 // indirect
|
||||||
golang.org/x/sync v0.8.0 // indirect
|
golang.org/x/sync v0.8.0 // indirect
|
||||||
@@ -93,6 +101,7 @@ require (
|
|||||||
golang.org/x/text v0.19.0 // indirect
|
golang.org/x/text v0.19.0 // indirect
|
||||||
golang.org/x/time v0.7.0 // indirect
|
golang.org/x/time v0.7.0 // indirect
|
||||||
golang.org/x/tools v0.26.0 // indirect
|
golang.org/x/tools v0.26.0 // indirect
|
||||||
|
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 // indirect
|
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 // indirect
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
|
||||||
google.golang.org/grpc v1.65.0 // indirect
|
google.golang.org/grpc v1.65.0 // indirect
|
||||||
@@ -100,14 +109,9 @@ require (
|
|||||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
k8s.io/api v0.31.2 // indirect
|
|
||||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect
|
|
||||||
k8s.io/klog/v2 v2.130.1 // indirect
|
|
||||||
k8s.io/kms v0.31.2 // indirect
|
k8s.io/kms v0.31.2 // indirect
|
||||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect
|
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect
|
||||||
sigs.k8s.io/controller-runtime v0.19.0 // indirect
|
|
||||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
12
go.sum
12
go.sum
@@ -26,6 +26,10 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp
|
|||||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||||
|
github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k=
|
||||||
|
github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
|
||||||
|
github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
|
||||||
|
github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
|
||||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||||
github.com/fluxcd/helm-controller/api v1.1.0 h1:NS5Wm3U6Kv4w7Cw2sDOV++vf2ecGfFV00x1+2Y3QcOY=
|
github.com/fluxcd/helm-controller/api v1.1.0 h1:NS5Wm3U6Kv4w7Cw2sDOV++vf2ecGfFV00x1+2Y3QcOY=
|
||||||
@@ -214,8 +218,6 @@ golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0
|
|||||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
||||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
|
|
||||||
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
@@ -252,6 +254,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
|
|||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
|
||||||
|
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
|
||||||
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY=
|
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY=
|
||||||
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4=
|
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4=
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw=
|
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw=
|
||||||
@@ -287,12 +291,8 @@ k8s.io/apiserver v0.31.2 h1:VUzOEUGRCDi6kX1OyQ801m4A7AUPglpsmGvdsekmcI4=
|
|||||||
k8s.io/apiserver v0.31.2/go.mod h1:o3nKZR7lPlJqkU5I3Ove+Zx3JuoFjQobGX1Gctw6XuE=
|
k8s.io/apiserver v0.31.2/go.mod h1:o3nKZR7lPlJqkU5I3Ove+Zx3JuoFjQobGX1Gctw6XuE=
|
||||||
k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc=
|
k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc=
|
||||||
k8s.io/client-go v0.31.2/go.mod h1:NPa74jSVR/+eez2dFsEIHNa+3o09vtNaWwWwb1qSxSs=
|
k8s.io/client-go v0.31.2/go.mod h1:NPa74jSVR/+eez2dFsEIHNa+3o09vtNaWwWwb1qSxSs=
|
||||||
k8s.io/code-generator v0.31.2 h1:xLWxG0HEpMSHfcM//3u3Ro2Hmc6AyyLINQS//Z2GEOI=
|
|
||||||
k8s.io/code-generator v0.31.2/go.mod h1:eEQHXgBU/m7LDaToDoiz3t97dUUVyOblQdwOr8rivqc=
|
|
||||||
k8s.io/component-base v0.31.2 h1:Z1J1LIaC0AV+nzcPRFqfK09af6bZ4D1nAOpWsy9owlA=
|
k8s.io/component-base v0.31.2 h1:Z1J1LIaC0AV+nzcPRFqfK09af6bZ4D1nAOpWsy9owlA=
|
||||||
k8s.io/component-base v0.31.2/go.mod h1:9PeyyFN/drHjtJZMCTkSpQJS3U9OXORnHQqMLDz0sUQ=
|
k8s.io/component-base v0.31.2/go.mod h1:9PeyyFN/drHjtJZMCTkSpQJS3U9OXORnHQqMLDz0sUQ=
|
||||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 h1:si3PfKm8dDYxgfbeA6orqrtLkvvIeH8UqffFJDl0bz4=
|
|
||||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
|
|
||||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||||
k8s.io/kms v0.31.2 h1:pyx7l2qVOkClzFMIWMVF/FxsSkgd+OIGH7DecpbscJI=
|
k8s.io/kms v0.31.2 h1:pyx7l2qVOkClzFMIWMVF/FxsSkgd+OIGH7DecpbscJI=
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
Copyright 2024 The Cozystack Authors.
|
Copyright 2025 The Cozystack Authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ fix_d8() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
swap_pvc_overview() {
|
swap_pvc_overview() {
|
||||||
jq '(.panels[] | select(.title=="PVC Detailed") | .panels[] | select(.title=="Overview")) as $a | del(.panels[] | select(.title=="PVC Detailed").panels[] | select(.title=="Overview")) | ( (.panels[] | select(.title=="PVC Detailed"))) as $b | del( .panels[] | select(.title=="PVC Detailed")) | (.panels[.panels|length]=($a|.gridPos.y=$b.gridPos.y)) | (.panels[.panels|length]=($b|.gridPos.y=$a.gridPos.y))'
|
jq '(.panels[] | select(.title=="PVC Detailed") | .panels[] | select(.title=="Overview")) as $a | del(.panels[] | select(.title=="PVC Detailed").panels[] | select(.title=="Overview")) | ( (.panels[] | select(.title=="PVC Detailed"))) as $b | del( .panels[] | select(.title=="PVC Detailed")) | (.panels[.panels|length]=($a|.gridPos.y=$b.gridPos.y)) | (.panels[.panels|length]=($b|.gridPos.y=$a.gridPos.y))'
|
||||||
}
|
}
|
||||||
|
|
||||||
deprectaed_remove_faq() {
|
deprectaed_remove_faq() {
|
||||||
@@ -68,7 +68,7 @@ modules/402-ingress-nginx/monitoring/grafana-dashboards/ingress-nginx/namespace/
|
|||||||
modules/402-ingress-nginx/monitoring/grafana-dashboards/ingress-nginx/vhost/vhost_detail.json
|
modules/402-ingress-nginx/monitoring/grafana-dashboards/ingress-nginx/vhost/vhost_detail.json
|
||||||
modules/402-ingress-nginx/monitoring/grafana-dashboards/ingress-nginx/vhost/vhosts.json
|
modules/402-ingress-nginx/monitoring/grafana-dashboards/ingress-nginx/vhost/vhosts.json
|
||||||
modules/340-monitoring-kubernetes-control-plane/monitoring/grafana-dashboards/kubernetes-cluster/control-plane-status.json
|
modules/340-monitoring-kubernetes-control-plane/monitoring/grafana-dashboards/kubernetes-cluster/control-plane-status.json
|
||||||
modules/340-monitoring-kubernetes-control-plane/monitoring/grafana-dashboards/kubernetes-cluster/kube-etcd3.json #TODO
|
modules/340-monitoring-kubernetes-control-plane/monitoring/grafana-dashboards/kubernetes-cluster/kube-etcd.json #TODO
|
||||||
modules/340-monitoring-kubernetes-control-plane/monitoring/grafana-dashboards/kubernetes-cluster/deprecated-resources.json
|
modules/340-monitoring-kubernetes-control-plane/monitoring/grafana-dashboards/kubernetes-cluster/deprecated-resources.json
|
||||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//kubernetes-cluster/nodes/ntp.json #TODO
|
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//kubernetes-cluster/nodes/ntp.json #TODO
|
||||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//kubernetes-cluster/nodes/nodes.json
|
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//kubernetes-cluster/nodes/nodes.json
|
||||||
@@ -78,6 +78,10 @@ modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//main/pod.json
|
|||||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//main/namespace/namespaces.json
|
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//main/namespace/namespaces.json
|
||||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//main/namespace/namespace.json
|
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//main/namespace/namespace.json
|
||||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//main/capacity-planning/capacity-planning.json
|
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//main/capacity-planning/capacity-planning.json
|
||||||
|
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//flux/flux-control-plane.json
|
||||||
|
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//flux/flux-stats.json
|
||||||
|
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//kafka/strimzi-kafka.json
|
||||||
|
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//goldpinger/goldpinger.json
|
||||||
EOT
|
EOT
|
||||||
|
|
||||||
|
|
||||||
@@ -109,4 +113,3 @@ done <<\EOT
|
|||||||
https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-namespaces.json
|
https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-namespaces.json
|
||||||
https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-pods.json
|
https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-pods.json
|
||||||
EOT
|
EOT
|
||||||
|
|
||||||
|
|||||||
11
hack/e2e.sh
11
hack/e2e.sh
@@ -60,7 +60,7 @@ done
|
|||||||
|
|
||||||
# Prepare system drive
|
# Prepare system drive
|
||||||
if [ ! -f nocloud-amd64.raw ]; then
|
if [ ! -f nocloud-amd64.raw ]; then
|
||||||
wget https://github.com/aenix-io/cozystack/releases/latest/download/nocloud-amd64.raw.xz -O nocloud-amd64.raw.xz
|
wget https://github.com/cozystack/cozystack/releases/latest/download/nocloud-amd64.raw.xz -O nocloud-amd64.raw.xz
|
||||||
rm -f nocloud-amd64.raw
|
rm -f nocloud-amd64.raw
|
||||||
xz --decompress nocloud-amd64.raw.xz
|
xz --decompress nocloud-amd64.raw.xz
|
||||||
fi
|
fi
|
||||||
@@ -113,8 +113,6 @@ machine:
|
|||||||
- usermode_helper=disabled
|
- usermode_helper=disabled
|
||||||
- name: zfs
|
- name: zfs
|
||||||
- name: spl
|
- name: spl
|
||||||
install:
|
|
||||||
image: ghcr.io/aenix-io/cozystack/talos:v1.8.3
|
|
||||||
files:
|
files:
|
||||||
- content: |
|
- content: |
|
||||||
[plugins]
|
[plugins]
|
||||||
@@ -142,6 +140,9 @@ EOT
|
|||||||
|
|
||||||
cat > patch-controlplane.yaml <<\EOT
|
cat > patch-controlplane.yaml <<\EOT
|
||||||
machine:
|
machine:
|
||||||
|
nodeLabels:
|
||||||
|
node.kubernetes.io/exclude-from-external-load-balancers:
|
||||||
|
$patch: delete
|
||||||
network:
|
network:
|
||||||
interfaces:
|
interfaces:
|
||||||
- interface: eth0
|
- interface: eth0
|
||||||
@@ -228,6 +229,7 @@ sleep 5
|
|||||||
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n " $1 " hr/" $2 " &"} END{print "wait"}' | sh -x
|
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n " $1 " hr/" $2 " &"} END{print "wait"}' | sh -x
|
||||||
|
|
||||||
# Wait for Cluster-API providers
|
# Wait for Cluster-API providers
|
||||||
|
timeout 30 sh -c 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager; do sleep 1; done'
|
||||||
kubectl wait deploy --timeout=30s --for=condition=available -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager
|
kubectl wait deploy --timeout=30s --for=condition=available -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager
|
||||||
|
|
||||||
# Wait for linstor controller
|
# Wait for linstor controller
|
||||||
@@ -296,6 +298,9 @@ spec:
|
|||||||
avoidBuggyIPs: false
|
avoidBuggyIPs: false
|
||||||
EOT
|
EOT
|
||||||
|
|
||||||
|
# Wait for cozystack-api
|
||||||
|
kubectl wait --for=condition=Available apiservices v1alpha1.apps.cozystack.io --timeout=2m
|
||||||
|
|
||||||
kubectl patch -n tenant-root tenants.apps.cozystack.io root --type=merge -p '{"spec":{
|
kubectl patch -n tenant-root tenants.apps.cozystack.io root --type=merge -p '{"spec":{
|
||||||
"host": "example.org",
|
"host": "example.org",
|
||||||
"ingress": true,
|
"ingress": true,
|
||||||
|
|||||||
@@ -1,12 +1,13 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
file=versions_map
|
file=versions_map
|
||||||
|
|
||||||
charts=$(find . -mindepth 2 -maxdepth 2 -name Chart.yaml | awk 'sub("/Chart.yaml", "")')
|
charts=$(find . -mindepth 2 -maxdepth 2 -name Chart.yaml | awk 'sub("/Chart.yaml", "")')
|
||||||
|
|
||||||
# <chart> <version> <commit>
|
|
||||||
new_map=$(
|
new_map=$(
|
||||||
for chart in $charts; do
|
for chart in $charts; do
|
||||||
awk '/^name:/ {chart=$2} /^version:/ {version=$2} END{printf "%s %s %s\n", chart, version, "HEAD"}' $chart/Chart.yaml
|
awk '/^name:/ {chart=$2} /^version:/ {version=$2} END{printf "%s %s %s\n", chart, version, "HEAD"}' "$chart/Chart.yaml"
|
||||||
done
|
done
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -15,47 +16,48 @@ if [ ! -f "$file" ] || [ ! -s "$file" ]; then
|
|||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
miss_map=$(echo "$new_map" | awk 'NR==FNR { new_map[$1 " " $2] = $3; next } { if (!($1 " " $2 in new_map)) print $1, $2, $3}' - $file)
|
miss_map=$(echo "$new_map" | awk 'NR==FNR { nm[$1 " " $2] = $3; next } { if (!($1 " " $2 in nm)) print $1, $2, $3}' - "$file")
|
||||||
|
|
||||||
|
# search accross all tags sorted by version
|
||||||
|
search_commits=$(git ls-remote --tags origin | grep 'refs/tags/v' | sort -k2,2 -rV | awk '{print $1}')
|
||||||
|
# add latest main commit to search
|
||||||
|
search_commits="${search_commits} $(git rev-parse "origin/main")"
|
||||||
|
|
||||||
resolved_miss_map=$(
|
resolved_miss_map=$(
|
||||||
echo "$miss_map" | while read chart version commit; do
|
echo "$miss_map" | while read -r chart version commit; do
|
||||||
if [ "$commit" = HEAD ]; then
|
# if version is found in HEAD, it's HEAD
|
||||||
line=$(awk '/^version:/ {print NR; exit}' "./$chart/Chart.yaml")
|
if grep -q "^version: $version$" ./${chart}/Chart.yaml; then
|
||||||
change_commit=$(git --no-pager blame -L"$line",+1 -- "$chart/Chart.yaml" | awk '{print $1}')
|
echo "$chart $version HEAD"
|
||||||
|
continue
|
||||||
if [ "$change_commit" = "00000000" ]; then
|
fi
|
||||||
# Not committed yet, use previous commit
|
|
||||||
line=$(git show HEAD:"./$chart/Chart.yaml" | awk '/^version:/ {print NR; exit}')
|
# if commit is not HEAD, check if it's valid
|
||||||
commit=$(git --no-pager blame -L"$line",+1 HEAD -- "$chart/Chart.yaml" | awk '{print $1}')
|
if [ $commit != "HEAD" ]; then
|
||||||
if [ $(echo $commit | cut -c1) = "^" ]; then
|
if ! git show "${commit}:./${chart}/Chart.yaml" 2>/dev/null | grep -q "^version: $version$"; then
|
||||||
# Previous commit not exists
|
echo "Commit $commit for $chart $version is not valid" >&2
|
||||||
commit=$(echo $commit | cut -c2-)
|
exit 1
|
||||||
fi
|
|
||||||
else
|
|
||||||
# Committed, but version_map wasn't updated
|
|
||||||
line=$(git show HEAD:"./$chart/Chart.yaml" | awk '/^version:/ {print NR; exit}')
|
|
||||||
change_commit=$(git --no-pager blame -L"$line",+1 HEAD -- "$chart/Chart.yaml" | awk '{print $1}')
|
|
||||||
if [ $(echo $change_commit | cut -c1) = "^" ]; then
|
|
||||||
# Previous commit not exists
|
|
||||||
commit=$(echo $change_commit | cut -c2-)
|
|
||||||
else
|
|
||||||
commit=$(git describe --always "$change_commit~1")
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Check if the commit belongs to the main branch
|
commit=$(git rev-parse --short "$commit")
|
||||||
if ! git merge-base --is-ancestor "$commit" main; then
|
echo "$chart $version $commit"
|
||||||
# Find the closest parent commit that belongs to main
|
continue
|
||||||
commit_in_main=$(git log --pretty=format:"%h" main -- "$chart" | head -n 1)
|
|
||||||
if [ -n "$commit_in_main" ]; then
|
|
||||||
commit="$commit_in_main"
|
|
||||||
else
|
|
||||||
# No valid commit found in main branch for $chart, skipping..."
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
echo "$chart $version $commit"
|
|
||||||
|
# if commit is HEAD, but version is not found in HEAD, check all tags
|
||||||
|
found_tag=""
|
||||||
|
for tag in $search_commits; do
|
||||||
|
if git show "${tag}:./${chart}/Chart.yaml" 2>/dev/null | grep -q "^version: $version$"; then
|
||||||
|
found_tag=$(git rev-parse --short "${tag}")
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ -z "$found_tag" ]; then
|
||||||
|
echo "Can't find $chart $version in any version tag or in the latest main commit" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "$chart $version $found_tag"
|
||||||
done
|
done
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -22,6 +22,7 @@ SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
|
|||||||
CODEGEN_PKG=${CODEGEN_PKG:-$(cd "${SCRIPT_ROOT}"; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)}
|
CODEGEN_PKG=${CODEGEN_PKG:-$(cd "${SCRIPT_ROOT}"; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)}
|
||||||
API_KNOWN_VIOLATIONS_DIR="${API_KNOWN_VIOLATIONS_DIR:-"${SCRIPT_ROOT}/api/api-rules"}"
|
API_KNOWN_VIOLATIONS_DIR="${API_KNOWN_VIOLATIONS_DIR:-"${SCRIPT_ROOT}/api/api-rules"}"
|
||||||
UPDATE_API_KNOWN_VIOLATIONS="${UPDATE_API_KNOWN_VIOLATIONS:-true}"
|
UPDATE_API_KNOWN_VIOLATIONS="${UPDATE_API_KNOWN_VIOLATIONS:-true}"
|
||||||
|
CONTROLLER_GEN="go run sigs.k8s.io/controller-tools/cmd/controller-gen@v0.16.4"
|
||||||
|
|
||||||
source "${CODEGEN_PKG}/kube_codegen.sh"
|
source "${CODEGEN_PKG}/kube_codegen.sh"
|
||||||
|
|
||||||
@@ -46,3 +47,6 @@ kube::codegen::gen_openapi \
|
|||||||
${update_report:+"${update_report}"} \
|
${update_report:+"${update_report}"} \
|
||||||
--boilerplate "${SCRIPT_ROOT}/hack/boilerplate.go.txt" \
|
--boilerplate "${SCRIPT_ROOT}/hack/boilerplate.go.txt" \
|
||||||
"${SCRIPT_ROOT}/pkg/apis"
|
"${SCRIPT_ROOT}/pkg/apis"
|
||||||
|
|
||||||
|
$CONTROLLER_GEN object:headerFile="hack/boilerplate.go.txt" paths="./api/..."
|
||||||
|
$CONTROLLER_GEN rbac:roleName=manager-role crd paths="./api/..." output:crd:artifacts:config=packages/system/cozystack-controller/templates/crds
|
||||||
|
|||||||
8
hack/upload-assets.sh
Executable file
8
hack/upload-assets.sh
Executable file
@@ -0,0 +1,8 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -xe
|
||||||
|
|
||||||
|
version=$(git describe --tags)
|
||||||
|
gh release upload $version _out/assets/cozystack-installer.yaml
|
||||||
|
gh release upload $version _out/assets/metal-amd64.iso
|
||||||
|
gh release upload $version _out/assets/metal-amd64.raw.xz
|
||||||
|
gh release upload $version _out/assets/nocloud-amd64.raw.xz
|
||||||
96
internal/controller/suite_test.go
Normal file
96
internal/controller/suite_test.go
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2025.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package controller
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
. "github.com/onsi/ginkgo/v2"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
|
||||||
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
|
"k8s.io/client-go/rest"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||||
|
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||||
|
|
||||||
|
cozystackiov1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||||
|
// +kubebuilder:scaffold:imports
|
||||||
|
)
|
||||||
|
|
||||||
|
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
|
||||||
|
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
|
||||||
|
|
||||||
|
var cfg *rest.Config
|
||||||
|
var k8sClient client.Client
|
||||||
|
var testEnv *envtest.Environment
|
||||||
|
var ctx context.Context
|
||||||
|
var cancel context.CancelFunc
|
||||||
|
|
||||||
|
func TestControllers(t *testing.T) {
|
||||||
|
RegisterFailHandler(Fail)
|
||||||
|
|
||||||
|
RunSpecs(t, "Controller Suite")
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = BeforeSuite(func() {
|
||||||
|
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
|
||||||
|
|
||||||
|
ctx, cancel = context.WithCancel(context.TODO())
|
||||||
|
|
||||||
|
By("bootstrapping test environment")
|
||||||
|
testEnv = &envtest.Environment{
|
||||||
|
CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
|
||||||
|
ErrorIfCRDPathMissing: true,
|
||||||
|
|
||||||
|
// The BinaryAssetsDirectory is only required if you want to run the tests directly
|
||||||
|
// without call the makefile target test. If not informed it will look for the
|
||||||
|
// default path defined in controller-runtime which is /usr/local/kubebuilder/.
|
||||||
|
// Note that you must have the required binaries setup under the bin directory to perform
|
||||||
|
// the tests directly. When we run make test it will be setup and used automatically.
|
||||||
|
BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s",
|
||||||
|
fmt.Sprintf("1.31.0-%s-%s", runtime.GOOS, runtime.GOARCH)),
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
// cfg is defined in this file globally.
|
||||||
|
cfg, err = testEnv.Start()
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(cfg).NotTo(BeNil())
|
||||||
|
|
||||||
|
err = cozystackiov1alpha1.AddToScheme(scheme.Scheme)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
// +kubebuilder:scaffold:scheme
|
||||||
|
|
||||||
|
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(k8sClient).NotTo(BeNil())
|
||||||
|
|
||||||
|
})
|
||||||
|
|
||||||
|
var _ = AfterSuite(func() {
|
||||||
|
By("tearing down the test environment")
|
||||||
|
cancel()
|
||||||
|
err := testEnv.Stop()
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
})
|
||||||
273
internal/controller/workloadmonitor_controller.go
Normal file
273
internal/controller/workloadmonitor_controller.go
Normal file
@@ -0,0 +1,273 @@
|
|||||||
|
package controller
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
"k8s.io/utils/pointer"
|
||||||
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||||
|
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
|
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WorkloadMonitorReconciler reconciles a WorkloadMonitor object
|
||||||
|
type WorkloadMonitorReconciler struct {
|
||||||
|
client.Client
|
||||||
|
Scheme *runtime.Scheme
|
||||||
|
}
|
||||||
|
|
||||||
|
// +kubebuilder:rbac:groups=cozystack.io,resources=workloadmonitors,verbs=get;list;watch;create;update;patch;delete
|
||||||
|
// +kubebuilder:rbac:groups=cozystack.io,resources=workloadmonitors/status,verbs=get;update;patch
|
||||||
|
// +kubebuilder:rbac:groups=cozystack.io,resources=workloads,verbs=get;list;watch;create;update;patch;delete
|
||||||
|
// +kubebuilder:rbac:groups=cozystack.io,resources=workloads/status,verbs=get;update;patch
|
||||||
|
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch
|
||||||
|
|
||||||
|
// isPodReady checks if the Pod is in the Ready condition.
|
||||||
|
func (r *WorkloadMonitorReconciler) isPodReady(pod *corev1.Pod) bool {
|
||||||
|
for _, c := range pod.Status.Conditions {
|
||||||
|
if c.Type == corev1.PodReady && c.Status == corev1.ConditionTrue {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateOwnerReferences adds the given monitor as a new owner reference to the object if not already present.
|
||||||
|
// It then sorts the owner references to enforce a consistent order.
|
||||||
|
func updateOwnerReferences(obj metav1.Object, monitor client.Object) {
|
||||||
|
// Retrieve current owner references
|
||||||
|
owners := obj.GetOwnerReferences()
|
||||||
|
|
||||||
|
// Check if current monitor is already in owner references
|
||||||
|
var alreadyOwned bool
|
||||||
|
for _, ownerRef := range owners {
|
||||||
|
if ownerRef.UID == monitor.GetUID() {
|
||||||
|
alreadyOwned = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
runtimeObj, ok := monitor.(runtime.Object)
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
gvk := runtimeObj.GetObjectKind().GroupVersionKind()
|
||||||
|
|
||||||
|
// If not already present, add new owner reference without controller flag
|
||||||
|
if !alreadyOwned {
|
||||||
|
newOwnerRef := metav1.OwnerReference{
|
||||||
|
APIVersion: gvk.GroupVersion().String(),
|
||||||
|
Kind: gvk.Kind,
|
||||||
|
Name: monitor.GetName(),
|
||||||
|
UID: monitor.GetUID(),
|
||||||
|
// Set Controller to false to avoid conflict as multiple controllers are not allowed
|
||||||
|
Controller: pointer.BoolPtr(false),
|
||||||
|
BlockOwnerDeletion: pointer.BoolPtr(true),
|
||||||
|
}
|
||||||
|
owners = append(owners, newOwnerRef)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort owner references to enforce a consistent order by UID
|
||||||
|
sort.SliceStable(owners, func(i, j int) bool {
|
||||||
|
return owners[i].UID < owners[j].UID
|
||||||
|
})
|
||||||
|
|
||||||
|
// Update the owner references of the object
|
||||||
|
obj.SetOwnerReferences(owners)
|
||||||
|
}
|
||||||
|
|
||||||
|
// reconcilePodForMonitor creates or updates a Workload object for the given Pod and WorkloadMonitor.
|
||||||
|
func (r *WorkloadMonitorReconciler) reconcilePodForMonitor(
|
||||||
|
ctx context.Context,
|
||||||
|
monitor *cozyv1alpha1.WorkloadMonitor,
|
||||||
|
pod corev1.Pod,
|
||||||
|
) error {
|
||||||
|
logger := log.FromContext(ctx)
|
||||||
|
|
||||||
|
// Combine both init containers and normal containers to sum resources properly
|
||||||
|
combinedContainers := append(pod.Spec.InitContainers, pod.Spec.Containers...)
|
||||||
|
|
||||||
|
// totalResources will store the sum of all container resource limits
|
||||||
|
totalResources := make(map[string]resource.Quantity)
|
||||||
|
|
||||||
|
// Iterate over all containers to aggregate their Limits
|
||||||
|
for _, container := range combinedContainers {
|
||||||
|
for name, qty := range container.Resources.Limits {
|
||||||
|
if existing, exists := totalResources[name.String()]; exists {
|
||||||
|
existing.Add(qty)
|
||||||
|
totalResources[name.String()] = existing
|
||||||
|
} else {
|
||||||
|
totalResources[name.String()] = qty.DeepCopy()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If annotation "workload.cozystack.io/resources" is present, parse and merge
|
||||||
|
if resourcesStr, ok := pod.Annotations["workload.cozystack.io/resources"]; ok {
|
||||||
|
annRes := map[string]string{}
|
||||||
|
if err := json.Unmarshal([]byte(resourcesStr), &annRes); err != nil {
|
||||||
|
logger.Error(err, "Failed to parse resources annotation", "pod", pod.Name)
|
||||||
|
} else {
|
||||||
|
for k, v := range annRes {
|
||||||
|
parsed, err := resource.ParseQuantity(v)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(err, "Failed to parse resource quantity from annotation", "key", k, "value", v)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
totalResources[k] = parsed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
workload := &cozyv1alpha1.Workload{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: pod.Name,
|
||||||
|
Namespace: pod.Namespace,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := ctrl.CreateOrUpdate(ctx, r.Client, workload, func() error {
|
||||||
|
// Update owner references with the new monitor
|
||||||
|
updateOwnerReferences(workload.GetObjectMeta(), monitor)
|
||||||
|
|
||||||
|
// Copy labels from the Pod if needed
|
||||||
|
workload.Labels = pod.Labels
|
||||||
|
|
||||||
|
// Fill Workload status fields:
|
||||||
|
workload.Status.Kind = monitor.Spec.Kind
|
||||||
|
workload.Status.Type = monitor.Spec.Type
|
||||||
|
workload.Status.Resources = totalResources
|
||||||
|
workload.Status.Operational = r.isPodReady(&pod)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(err, "Failed to CreateOrUpdate Workload", "workload", workload.Name)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reconcile is the main reconcile loop.
|
||||||
|
// 1. It reconciles WorkloadMonitor objects themselves (create/update/delete).
|
||||||
|
// 2. It also reconciles Pod events mapped to WorkloadMonitor via label selector.
|
||||||
|
func (r *WorkloadMonitorReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||||
|
logger := log.FromContext(ctx)
|
||||||
|
|
||||||
|
// Fetch the WorkloadMonitor object if it exists
|
||||||
|
monitor := &cozyv1alpha1.WorkloadMonitor{}
|
||||||
|
err := r.Get(ctx, req.NamespacedName, monitor)
|
||||||
|
if err != nil {
|
||||||
|
// If the resource is not found, it may be a Pod event (mapFunc).
|
||||||
|
if apierrors.IsNotFound(err) {
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
logger.Error(err, "Unable to fetch WorkloadMonitor")
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// List Pods that match the WorkloadMonitor's selector
|
||||||
|
podList := &corev1.PodList{}
|
||||||
|
if err := r.List(
|
||||||
|
ctx,
|
||||||
|
podList,
|
||||||
|
client.InNamespace(monitor.Namespace),
|
||||||
|
client.MatchingLabels(monitor.Spec.Selector),
|
||||||
|
); err != nil {
|
||||||
|
logger.Error(err, "Unable to list Pods for WorkloadMonitor", "monitor", monitor.Name)
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var observedReplicas, availableReplicas int32
|
||||||
|
|
||||||
|
// For each matching Pod, reconcile the corresponding Workload
|
||||||
|
for _, pod := range podList.Items {
|
||||||
|
observedReplicas++
|
||||||
|
if err := r.reconcilePodForMonitor(ctx, monitor, pod); err != nil {
|
||||||
|
logger.Error(err, "Failed to reconcile Workload for Pod", "pod", pod.Name)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if r.isPodReady(&pod) {
|
||||||
|
availableReplicas++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update WorkloadMonitor status based on observed pods
|
||||||
|
monitor.Status.ObservedReplicas = observedReplicas
|
||||||
|
monitor.Status.AvailableReplicas = availableReplicas
|
||||||
|
|
||||||
|
// Default to operational = true, but check MinReplicas if set
|
||||||
|
monitor.Status.Operational = pointer.Bool(true)
|
||||||
|
if monitor.Spec.MinReplicas != nil && availableReplicas < *monitor.Spec.MinReplicas {
|
||||||
|
monitor.Status.Operational = pointer.Bool(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the WorkloadMonitor status in the cluster
|
||||||
|
if err := r.Status().Update(ctx, monitor); err != nil {
|
||||||
|
logger.Error(err, "Unable to update WorkloadMonitor status", "monitor", monitor.Name)
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return without requeue if we want purely event-driven reconciliations
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetupWithManager registers our controller with the Manager and sets up watches.
|
||||||
|
func (r *WorkloadMonitorReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||||
|
return ctrl.NewControllerManagedBy(mgr).
|
||||||
|
// Watch WorkloadMonitor objects
|
||||||
|
For(&cozyv1alpha1.WorkloadMonitor{}).
|
||||||
|
// Also watch Pod objects and map them back to WorkloadMonitor if labels match
|
||||||
|
Watches(
|
||||||
|
&corev1.Pod{},
|
||||||
|
handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||||
|
pod, ok := obj.(*corev1.Pod)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var monitorList cozyv1alpha1.WorkloadMonitorList
|
||||||
|
// List all WorkloadMonitors in the same namespace
|
||||||
|
if err := r.List(ctx, &monitorList, client.InNamespace(pod.Namespace)); err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Match each monitor's selector with the Pod's labels
|
||||||
|
var requests []reconcile.Request
|
||||||
|
for _, m := range monitorList.Items {
|
||||||
|
matches := true
|
||||||
|
for k, v := range m.Spec.Selector {
|
||||||
|
if podVal, exists := pod.Labels[k]; !exists || podVal != v {
|
||||||
|
matches = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if matches {
|
||||||
|
requests = append(requests, reconcile.Request{
|
||||||
|
NamespacedName: types.NamespacedName{
|
||||||
|
Namespace: m.Namespace,
|
||||||
|
Name: m.Name,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return requests
|
||||||
|
}),
|
||||||
|
).
|
||||||
|
// Watch for changes to Workload objects we create (owned by WorkloadMonitor)
|
||||||
|
Owns(&cozyv1alpha1.Workload{}).
|
||||||
|
Complete(r)
|
||||||
|
}
|
||||||
292
internal/telemetry/collector.go
Normal file
292
internal/telemetry/collector.go
Normal file
@@ -0,0 +1,292 @@
|
|||||||
|
package telemetry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
"k8s.io/client-go/discovery"
|
||||||
|
"k8s.io/client-go/rest"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||||
|
|
||||||
|
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Collector handles telemetry data collection and sending
|
||||||
|
type Collector struct {
|
||||||
|
client client.Client
|
||||||
|
discoveryClient discovery.DiscoveryInterface
|
||||||
|
config *Config
|
||||||
|
ticker *time.Ticker
|
||||||
|
stopCh chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCollector creates a new telemetry collector
|
||||||
|
func NewCollector(client client.Client, config *Config, kubeConfig *rest.Config) (*Collector, error) {
|
||||||
|
discoveryClient, err := discovery.NewDiscoveryClientForConfig(kubeConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create discovery client: %w", err)
|
||||||
|
}
|
||||||
|
return &Collector{
|
||||||
|
client: client,
|
||||||
|
discoveryClient: discoveryClient,
|
||||||
|
config: config,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start implements manager.Runnable
|
||||||
|
func (c *Collector) Start(ctx context.Context) error {
|
||||||
|
if c.config.Disabled {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
c.ticker = time.NewTicker(c.config.Interval)
|
||||||
|
c.stopCh = make(chan struct{})
|
||||||
|
|
||||||
|
// Initial collection
|
||||||
|
c.collect(ctx)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
c.ticker.Stop()
|
||||||
|
close(c.stopCh)
|
||||||
|
return nil
|
||||||
|
case <-c.ticker.C:
|
||||||
|
c.collect(ctx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NeedLeaderElection implements manager.LeaderElectionRunnable
|
||||||
|
func (c *Collector) NeedLeaderElection() bool {
|
||||||
|
// Only run telemetry collector on the leader
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop halts telemetry collection
|
||||||
|
func (c *Collector) Stop() {
|
||||||
|
close(c.stopCh)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getSizeGroup returns the exponential size group for PVC
|
||||||
|
func getSizeGroup(size resource.Quantity) string {
|
||||||
|
gb := size.Value() / (1024 * 1024 * 1024)
|
||||||
|
switch {
|
||||||
|
case gb <= 1:
|
||||||
|
return "1Gi"
|
||||||
|
case gb <= 5:
|
||||||
|
return "5Gi"
|
||||||
|
case gb <= 10:
|
||||||
|
return "10Gi"
|
||||||
|
case gb <= 25:
|
||||||
|
return "25Gi"
|
||||||
|
case gb <= 50:
|
||||||
|
return "50Gi"
|
||||||
|
case gb <= 100:
|
||||||
|
return "100Gi"
|
||||||
|
case gb <= 250:
|
||||||
|
return "250Gi"
|
||||||
|
case gb <= 500:
|
||||||
|
return "500Gi"
|
||||||
|
case gb <= 1024:
|
||||||
|
return "1Ti"
|
||||||
|
case gb <= 2048:
|
||||||
|
return "2Ti"
|
||||||
|
case gb <= 5120:
|
||||||
|
return "5Ti"
|
||||||
|
default:
|
||||||
|
return "10Ti"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// collect gathers and sends telemetry data
|
||||||
|
func (c *Collector) collect(ctx context.Context) {
|
||||||
|
logger := log.FromContext(ctx).V(1)
|
||||||
|
|
||||||
|
// Get cluster ID from kube-system namespace
|
||||||
|
var kubeSystemNS corev1.Namespace
|
||||||
|
if err := c.client.Get(ctx, types.NamespacedName{Name: "kube-system"}, &kubeSystemNS); err != nil {
|
||||||
|
logger.Info(fmt.Sprintf("Failed to get kube-system namespace: %v", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
clusterID := string(kubeSystemNS.UID)
|
||||||
|
|
||||||
|
var cozystackCM corev1.ConfigMap
|
||||||
|
if err := c.client.Get(ctx, types.NamespacedName{Namespace: "cozy-system", Name: "cozystack"}, &cozystackCM); err != nil {
|
||||||
|
logger.Info(fmt.Sprintf("Failed to get cozystack configmap in cozy-system namespace: %v", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
oidcEnabled := cozystackCM.Data["oidc-enabled"]
|
||||||
|
bundle := cozystackCM.Data["bundle-name"]
|
||||||
|
bundleEnable := cozystackCM.Data["bundle-enable"]
|
||||||
|
bundleDisable := cozystackCM.Data["bundle-disable"]
|
||||||
|
|
||||||
|
// Get Kubernetes version from nodes
|
||||||
|
var nodeList corev1.NodeList
|
||||||
|
if err := c.client.List(ctx, &nodeList); err != nil {
|
||||||
|
logger.Info(fmt.Sprintf("Failed to list nodes: %v", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create metrics buffer
|
||||||
|
var metrics strings.Builder
|
||||||
|
|
||||||
|
// Add Cozystack info metric
|
||||||
|
if len(nodeList.Items) > 0 {
|
||||||
|
k8sVersion, _ := c.discoveryClient.ServerVersion()
|
||||||
|
metrics.WriteString(fmt.Sprintf(
|
||||||
|
"cozy_cluster_info{cozystack_version=\"%s\",kubernetes_version=\"%s\",oidc_enabled=\"%s\",bundle_name=\"%s\",bunde_enable=\"%s\",bunde_disable=\"%s\"} 1\n",
|
||||||
|
c.config.CozystackVersion,
|
||||||
|
k8sVersion,
|
||||||
|
oidcEnabled,
|
||||||
|
bundle,
|
||||||
|
bundleEnable,
|
||||||
|
bundleDisable,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect node metrics
|
||||||
|
nodeOSCount := make(map[string]int)
|
||||||
|
for _, node := range nodeList.Items {
|
||||||
|
key := fmt.Sprintf("%s (%s)", node.Status.NodeInfo.OperatingSystem, node.Status.NodeInfo.OSImage)
|
||||||
|
nodeOSCount[key] = nodeOSCount[key] + 1
|
||||||
|
}
|
||||||
|
|
||||||
|
for osKey, count := range nodeOSCount {
|
||||||
|
metrics.WriteString(fmt.Sprintf(
|
||||||
|
"cozy_nodes_count{os=\"%s\",kernel=\"%s\"} %d\n",
|
||||||
|
osKey,
|
||||||
|
nodeList.Items[0].Status.NodeInfo.KernelVersion,
|
||||||
|
count,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect LoadBalancer services metrics
|
||||||
|
var serviceList corev1.ServiceList
|
||||||
|
if err := c.client.List(ctx, &serviceList); err != nil {
|
||||||
|
logger.Info(fmt.Sprintf("Failed to list Services: %v", err))
|
||||||
|
} else {
|
||||||
|
lbCount := 0
|
||||||
|
for _, svc := range serviceList.Items {
|
||||||
|
if svc.Spec.Type == corev1.ServiceTypeLoadBalancer {
|
||||||
|
lbCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
metrics.WriteString(fmt.Sprintf("cozy_loadbalancers_count %d\n", lbCount))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count tenant namespaces
|
||||||
|
var nsList corev1.NamespaceList
|
||||||
|
if err := c.client.List(ctx, &nsList); err != nil {
|
||||||
|
logger.Info(fmt.Sprintf("Failed to list Namespaces: %v", err))
|
||||||
|
} else {
|
||||||
|
tenantCount := 0
|
||||||
|
for _, ns := range nsList.Items {
|
||||||
|
if strings.HasPrefix(ns.Name, "tenant-") {
|
||||||
|
tenantCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
metrics.WriteString(fmt.Sprintf("cozy_tenants_count %d\n", tenantCount))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect PV metrics grouped by driver and size
|
||||||
|
var pvList corev1.PersistentVolumeList
|
||||||
|
if err := c.client.List(ctx, &pvList); err != nil {
|
||||||
|
logger.Info(fmt.Sprintf("Failed to list PVs: %v", err))
|
||||||
|
} else {
|
||||||
|
// Map to store counts by size and driver
|
||||||
|
pvMetrics := make(map[string]map[string]int)
|
||||||
|
|
||||||
|
for _, pv := range pvList.Items {
|
||||||
|
if capacity, ok := pv.Spec.Capacity[corev1.ResourceStorage]; ok {
|
||||||
|
sizeGroup := getSizeGroup(capacity)
|
||||||
|
|
||||||
|
// Get the CSI driver name
|
||||||
|
driver := "unknown"
|
||||||
|
if pv.Spec.CSI != nil {
|
||||||
|
driver = pv.Spec.CSI.Driver
|
||||||
|
} else if pv.Spec.HostPath != nil {
|
||||||
|
driver = "hostpath"
|
||||||
|
} else if pv.Spec.NFS != nil {
|
||||||
|
driver = "nfs"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize nested map if needed
|
||||||
|
if _, exists := pvMetrics[sizeGroup]; !exists {
|
||||||
|
pvMetrics[sizeGroup] = make(map[string]int)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Increment count for this size/driver combination
|
||||||
|
pvMetrics[sizeGroup][driver]++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write metrics
|
||||||
|
for size, drivers := range pvMetrics {
|
||||||
|
for driver, count := range drivers {
|
||||||
|
metrics.WriteString(fmt.Sprintf(
|
||||||
|
"cozy_pvs_count{driver=\"%s\",size=\"%s\"} %d\n",
|
||||||
|
driver,
|
||||||
|
size,
|
||||||
|
count,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect workload metrics
|
||||||
|
var monitorList cozyv1alpha1.WorkloadMonitorList
|
||||||
|
if err := c.client.List(ctx, &monitorList); err != nil {
|
||||||
|
logger.Info(fmt.Sprintf("Failed to list WorkloadMonitors: %v", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, monitor := range monitorList.Items {
|
||||||
|
metrics.WriteString(fmt.Sprintf(
|
||||||
|
"cozy_workloads_count{uid=\"%s\",kind=\"%s\",type=\"%s\",version=\"%s\"} %d\n",
|
||||||
|
monitor.UID,
|
||||||
|
monitor.Spec.Kind,
|
||||||
|
monitor.Spec.Type,
|
||||||
|
monitor.Spec.Version,
|
||||||
|
monitor.Status.ObservedReplicas,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send metrics
|
||||||
|
if err := c.sendMetrics(clusterID, metrics.String()); err != nil {
|
||||||
|
logger.Info(fmt.Sprintf("Failed to send metrics: %v", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sendMetrics sends collected metrics to the configured endpoint
|
||||||
|
func (c *Collector) sendMetrics(clusterID, metrics string) error {
|
||||||
|
req, err := http.NewRequest("POST", c.config.Endpoint, bytes.NewBufferString(metrics))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Set("Content-Type", "text/plain")
|
||||||
|
req.Header.Set("X-Cluster-ID", clusterID)
|
||||||
|
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to send request: %w", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return fmt.Errorf("unexpected status code: %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
27
internal/telemetry/config.go
Normal file
27
internal/telemetry/config.go
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
package telemetry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Config holds telemetry configuration
|
||||||
|
type Config struct {
|
||||||
|
// Disable telemetry collection if set to true
|
||||||
|
Disabled bool
|
||||||
|
// Endpoint to send telemetry data to
|
||||||
|
Endpoint string
|
||||||
|
// Interval between telemetry data collection
|
||||||
|
Interval time.Duration
|
||||||
|
// CozystackVersion represents the current version of Cozystack
|
||||||
|
CozystackVersion string
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultConfig returns default telemetry configuration
|
||||||
|
func DefaultConfig() *Config {
|
||||||
|
return &Config{
|
||||||
|
Disabled: false,
|
||||||
|
Endpoint: "https://telemetry.cozystack.io",
|
||||||
|
Interval: 15 * time.Minute,
|
||||||
|
CozystackVersion: "unknown",
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,105 +0,0 @@
|
|||||||
---
|
|
||||||
# Source: cozy-installer/templates/cozystack.yaml
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Namespace
|
|
||||||
metadata:
|
|
||||||
name: cozy-system
|
|
||||||
labels:
|
|
||||||
pod-security.kubernetes.io/enforce: privileged
|
|
||||||
---
|
|
||||||
# Source: cozy-installer/templates/cozystack.yaml
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ServiceAccount
|
|
||||||
metadata:
|
|
||||||
name: cozystack
|
|
||||||
namespace: cozy-system
|
|
||||||
---
|
|
||||||
# Source: cozy-installer/templates/cozystack.yaml
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRoleBinding
|
|
||||||
metadata:
|
|
||||||
name: cozystack
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: cozystack
|
|
||||||
namespace: cozy-system
|
|
||||||
roleRef:
|
|
||||||
kind: ClusterRole
|
|
||||||
name: cluster-admin
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
---
|
|
||||||
# Source: cozy-installer/templates/cozystack.yaml
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: cozystack
|
|
||||||
namespace: cozy-system
|
|
||||||
spec:
|
|
||||||
ports:
|
|
||||||
- name: http
|
|
||||||
port: 80
|
|
||||||
targetPort: 8123
|
|
||||||
selector:
|
|
||||||
app: cozystack
|
|
||||||
type: ClusterIP
|
|
||||||
---
|
|
||||||
# Source: cozy-installer/templates/cozystack.yaml
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: cozystack
|
|
||||||
namespace: cozy-system
|
|
||||||
spec:
|
|
||||||
replicas: 1
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: cozystack
|
|
||||||
strategy:
|
|
||||||
type: RollingUpdate
|
|
||||||
rollingUpdate:
|
|
||||||
maxSurge: 0
|
|
||||||
maxUnavailable: 1
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: cozystack
|
|
||||||
spec:
|
|
||||||
hostNetwork: true
|
|
||||||
serviceAccountName: cozystack
|
|
||||||
containers:
|
|
||||||
- name: cozystack
|
|
||||||
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.20.2"
|
|
||||||
env:
|
|
||||||
- name: KUBERNETES_SERVICE_HOST
|
|
||||||
value: localhost
|
|
||||||
- name: KUBERNETES_SERVICE_PORT
|
|
||||||
value: "7445"
|
|
||||||
- name: K8S_AWAIT_ELECTION_ENABLED
|
|
||||||
value: "1"
|
|
||||||
- name: K8S_AWAIT_ELECTION_NAME
|
|
||||||
value: cozystack
|
|
||||||
- name: K8S_AWAIT_ELECTION_LOCK_NAME
|
|
||||||
value: cozystack
|
|
||||||
- name: K8S_AWAIT_ELECTION_LOCK_NAMESPACE
|
|
||||||
value: cozy-system
|
|
||||||
- name: K8S_AWAIT_ELECTION_IDENTITY
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: metadata.name
|
|
||||||
- name: darkhttpd
|
|
||||||
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.20.2"
|
|
||||||
command:
|
|
||||||
- /usr/bin/darkhttpd
|
|
||||||
- /cozystack/assets
|
|
||||||
- --port
|
|
||||||
- "8123"
|
|
||||||
ports:
|
|
||||||
- name: http
|
|
||||||
containerPort: 8123
|
|
||||||
tolerations:
|
|
||||||
- key: "node.kubernetes.io/not-ready"
|
|
||||||
operator: "Exists"
|
|
||||||
effect: "NoSchedule"
|
|
||||||
- key: "node.cilium.io/agent-not-ready"
|
|
||||||
operator: "Exists"
|
|
||||||
effect: "NoSchedule"
|
|
||||||
@@ -16,7 +16,7 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.6.1
|
version: 0.7.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ image:
|
|||||||
--cache-to type=inline \
|
--cache-to type=inline \
|
||||||
--metadata-file images/clickhouse-backup.json \
|
--metadata-file images/clickhouse-backup.json \
|
||||||
--push=$(PUSH) \
|
--push=$(PUSH) \
|
||||||
|
--label "org.opencontainers.image.source=https://github.com/cozystack/cozystack" \
|
||||||
--load=$(LOAD)
|
--load=$(LOAD)
|
||||||
echo "$(REGISTRY)/clickhouse-backup:$(call settag,$(CLICKHOUSE_BACKUP_TAG))@$$(yq e '."containerimage.digest"' images/clickhouse-backup.json -o json -r)" \
|
echo "$(REGISTRY)/clickhouse-backup:$(call settag,$(CLICKHOUSE_BACKUP_TAG))@$$(yq e '."containerimage.digest"' images/clickhouse-backup.json -o json -r)" \
|
||||||
> images/clickhouse-backup.tag
|
> images/clickhouse-backup.tag
|
||||||
|
|||||||
@@ -36,13 +36,15 @@ more details:
|
|||||||
|
|
||||||
### Backup parameters
|
### Backup parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ------------------------ | ---------------------------------------------- | ------------------------------------------------------ |
|
| ------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ |
|
||||||
| `backup.enabled` | Enable pereiodic backups | `false` |
|
| `backup.enabled` | Enable pereiodic backups | `false` |
|
||||||
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
|
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
|
||||||
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/clickhouse-backups` |
|
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/clickhouse-backups` |
|
||||||
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
|
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
|
||||||
| `backup.cleanupStrategy` | The strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
|
| `backup.cleanupStrategy` | The strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
|
||||||
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||||
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||||
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
||||||
|
| `resources` | Resources | `{}` |
|
||||||
|
| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
ghcr.io/aenix-io/cozystack/clickhouse-backup:0.6.1@sha256:dda84420cb8648721299221268a00d72a05c7af5b7fb452619bac727068b9e61
|
ghcr.io/cozystack/cozystack/clickhouse-backup:0.6.2@sha256:67dd53efa86b704fc5cb876aca055fef294b31ab67899b683a4821ea12582ea7
|
||||||
|
|||||||
50
packages/apps/clickhouse/templates/_resources.tpl
Normal file
50
packages/apps/clickhouse/templates/_resources.tpl
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
{{/*
|
||||||
|
Copyright Broadcom, Inc. All Rights Reserved.
|
||||||
|
SPDX-License-Identifier: APACHE-2.0
|
||||||
|
*/}}
|
||||||
|
|
||||||
|
{{/* vim: set filetype=mustache: */}}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Return a resource request/limit object based on a given preset.
|
||||||
|
These presets are for basic testing and not meant to be used in production
|
||||||
|
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||||
|
*/}}
|
||||||
|
{{- define "resources.preset" -}}
|
||||||
|
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
|
||||||
|
{{- $presets := dict
|
||||||
|
"nano" (dict
|
||||||
|
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
"micro" (dict
|
||||||
|
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
"small" (dict
|
||||||
|
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
"medium" (dict
|
||||||
|
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
"large" (dict
|
||||||
|
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
"xlarge" (dict
|
||||||
|
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
"2xlarge" (dict
|
||||||
|
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
}}
|
||||||
|
{{- if hasKey $presets .type -}}
|
||||||
|
{{- index $presets .type | toYaml -}}
|
||||||
|
{{- else -}}
|
||||||
|
{{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}}
|
||||||
|
{{- end -}}
|
||||||
|
{{- end -}}
|
||||||
@@ -121,6 +121,11 @@ spec:
|
|||||||
containers:
|
containers:
|
||||||
- name: clickhouse
|
- name: clickhouse
|
||||||
image: clickhouse/clickhouse-server:24.9.2.42
|
image: clickhouse/clickhouse-server:24.9.2.42
|
||||||
|
{{- if .Values.resources }}
|
||||||
|
resources: {{- toYaml .Values.resources | nindent 16 }}
|
||||||
|
{{- else if ne .Values.resourcesPreset "none" }}
|
||||||
|
resources: {{- include "resources.preset" (dict "type" .Values.resourcesPreset "Release" .Release) | nindent 16 }}
|
||||||
|
{{- end }}
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: data-volume-template
|
- name: data-volume-template
|
||||||
mountPath: /var/lib/clickhouse
|
mountPath: /var/lib/clickhouse
|
||||||
|
|||||||
@@ -17,3 +17,10 @@ rules:
|
|||||||
resourceNames:
|
resourceNames:
|
||||||
- {{ .Release.Name }}-credentials
|
- {{ .Release.Name }}-credentials
|
||||||
verbs: ["get", "list", "watch"]
|
verbs: ["get", "list", "watch"]
|
||||||
|
- apiGroups:
|
||||||
|
- cozystack.io
|
||||||
|
resources:
|
||||||
|
- workloadmonitors
|
||||||
|
resourceNames:
|
||||||
|
- {{ .Release.Name }}
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
|||||||
13
packages/apps/clickhouse/templates/workloadmonitor.yaml
Normal file
13
packages/apps/clickhouse/templates/workloadmonitor.yaml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
---
|
||||||
|
apiVersion: cozystack.io/v1alpha1
|
||||||
|
kind: WorkloadMonitor
|
||||||
|
metadata:
|
||||||
|
name: {{ $.Release.Name }}
|
||||||
|
spec:
|
||||||
|
replicas: {{ .Values.replicas }}
|
||||||
|
minReplicas: 1
|
||||||
|
kind: clickhouse
|
||||||
|
type: clickhouse
|
||||||
|
selector:
|
||||||
|
clickhouse.altinity.com/chi: {{ $.Release.Name }}
|
||||||
|
version: {{ $.Chart.Version }}
|
||||||
@@ -76,6 +76,16 @@
|
|||||||
"default": "ChaXoveekoh6eigh4siesheeda2quai0"
|
"default": "ChaXoveekoh6eigh4siesheeda2quai0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"resources": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "Resources",
|
||||||
|
"default": {}
|
||||||
|
},
|
||||||
|
"resourcesPreset": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||||
|
"default": "nano"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -46,3 +46,16 @@ backup:
|
|||||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||||
|
|
||||||
|
## @param resources Resources
|
||||||
|
resources: {}
|
||||||
|
# resources:
|
||||||
|
# limits:
|
||||||
|
# cpu: 4000m
|
||||||
|
# memory: 4Gi
|
||||||
|
# requests:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 512Mi
|
||||||
|
|
||||||
|
## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||||
|
resourcesPreset: "nano"
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.4.1
|
version: 0.5.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
|
|||||||
@@ -21,15 +21,17 @@
|
|||||||
|
|
||||||
### Backup parameters
|
### Backup parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ------------------------ | ---------------------------------------------- | ------------------------------------------------------ |
|
| ------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ |
|
||||||
| `backup.enabled` | Enable pereiodic backups | `false` |
|
| `backup.enabled` | Enable pereiodic backups | `false` |
|
||||||
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
|
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
|
||||||
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/postgres-backups` |
|
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/postgres-backups` |
|
||||||
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
|
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
|
||||||
| `backup.cleanupStrategy` | The strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
|
| `backup.cleanupStrategy` | The strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
|
||||||
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||||
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||||
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
||||||
|
| `resources` | Resources | `{}` |
|
||||||
|
| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
ghcr.io/aenix-io/cozystack/postgres-backup:0.7.1@sha256:406d2c5a30fa8b6fe10eab3cba45c06fea3876e81fd123ead6dc3c19347762d0
|
ghcr.io/cozystack/cozystack/postgres-backup:0.9.0@sha256:2b6ba87f5688a439bd2ac12835a5ab9e601feb15c0c44ed0d9ca48cec7c52521
|
||||||
|
|||||||
50
packages/apps/ferretdb/templates/_resources.tpl
Normal file
50
packages/apps/ferretdb/templates/_resources.tpl
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
{{/*
|
||||||
|
Copyright Broadcom, Inc. All Rights Reserved.
|
||||||
|
SPDX-License-Identifier: APACHE-2.0
|
||||||
|
*/}}
|
||||||
|
|
||||||
|
{{/* vim: set filetype=mustache: */}}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Return a resource request/limit object based on a given preset.
|
||||||
|
These presets are for basic testing and not meant to be used in production
|
||||||
|
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||||
|
*/}}
|
||||||
|
{{- define "resources.preset" -}}
|
||||||
|
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
|
||||||
|
{{- $presets := dict
|
||||||
|
"nano" (dict
|
||||||
|
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
"micro" (dict
|
||||||
|
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
"small" (dict
|
||||||
|
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
"medium" (dict
|
||||||
|
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
"large" (dict
|
||||||
|
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
"xlarge" (dict
|
||||||
|
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
"2xlarge" (dict
|
||||||
|
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
}}
|
||||||
|
{{- if hasKey $presets .type -}}
|
||||||
|
{{- index $presets .type | toYaml -}}
|
||||||
|
{{- else -}}
|
||||||
|
{{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}}
|
||||||
|
{{- end -}}
|
||||||
|
{{- end -}}
|
||||||
@@ -17,3 +17,10 @@ rules:
|
|||||||
resourceNames:
|
resourceNames:
|
||||||
- {{ .Release.Name }}-credentials
|
- {{ .Release.Name }}-credentials
|
||||||
verbs: ["get", "list", "watch"]
|
verbs: ["get", "list", "watch"]
|
||||||
|
- apiGroups:
|
||||||
|
- cozystack.io
|
||||||
|
resources:
|
||||||
|
- workloadmonitors
|
||||||
|
resourceNames:
|
||||||
|
- {{ .Release.Name }}
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
|||||||
@@ -6,10 +6,20 @@ metadata:
|
|||||||
spec:
|
spec:
|
||||||
instances: {{ .Values.replicas }}
|
instances: {{ .Values.replicas }}
|
||||||
enableSuperuserAccess: true
|
enableSuperuserAccess: true
|
||||||
|
{{- $configMap := lookup "v1" "ConfigMap" "cozy-system" "cozystack-scheduling" }}
|
||||||
|
{{- if $configMap }}
|
||||||
|
{{- $rawConstraints := get $configMap.data "globalAppTopologySpreadConstraints" }}
|
||||||
|
{{- if $rawConstraints }}
|
||||||
|
{{- $rawConstraints | fromYaml | toYaml | nindent 2 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
minSyncReplicas: {{ .Values.quorum.minSyncReplicas }}
|
minSyncReplicas: {{ .Values.quorum.minSyncReplicas }}
|
||||||
maxSyncReplicas: {{ .Values.quorum.maxSyncReplicas }}
|
maxSyncReplicas: {{ .Values.quorum.maxSyncReplicas }}
|
||||||
|
{{- if .Values.resources }}
|
||||||
|
resources: {{- toYaml .Values.resources | nindent 4 }}
|
||||||
|
{{- else if ne .Values.resourcesPreset "none" }}
|
||||||
|
resources: {{- include "resources.preset" (dict "type" .Values.resourcesPreset "Release" .Release) | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
monitoring:
|
monitoring:
|
||||||
enablePodMonitor: true
|
enablePodMonitor: true
|
||||||
|
|
||||||
|
|||||||
13
packages/apps/ferretdb/templates/workloadmonitor.yaml
Normal file
13
packages/apps/ferretdb/templates/workloadmonitor.yaml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
---
|
||||||
|
apiVersion: cozystack.io/v1alpha1
|
||||||
|
kind: WorkloadMonitor
|
||||||
|
metadata:
|
||||||
|
name: {{ $.Release.Name }}
|
||||||
|
spec:
|
||||||
|
replicas: {{ .Values.replicas }}
|
||||||
|
minReplicas: 1
|
||||||
|
kind: ferretdb
|
||||||
|
type: ferretdb
|
||||||
|
selector:
|
||||||
|
app: {{ $.Release.Name }}
|
||||||
|
version: {{ $.Chart.Version }}
|
||||||
@@ -81,6 +81,16 @@
|
|||||||
"default": "ChaXoveekoh6eigh4siesheeda2quai0"
|
"default": "ChaXoveekoh6eigh4siesheeda2quai0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"resources": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "Resources",
|
||||||
|
"default": {}
|
||||||
|
},
|
||||||
|
"resourcesPreset": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||||
|
"default": "nano"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -48,3 +48,16 @@ backup:
|
|||||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||||
|
|
||||||
|
## @param resources Resources
|
||||||
|
resources: {}
|
||||||
|
# resources:
|
||||||
|
# limits:
|
||||||
|
# cpu: 4000m
|
||||||
|
# memory: 4Gi
|
||||||
|
# requests:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 512Mi
|
||||||
|
|
||||||
|
## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||||
|
resourcesPreset: "nano"
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.3.1
|
version: 0.4.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ image-nginx:
|
|||||||
--cache-to type=inline \
|
--cache-to type=inline \
|
||||||
--metadata-file images/nginx-cache.json \
|
--metadata-file images/nginx-cache.json \
|
||||||
--push=$(PUSH) \
|
--push=$(PUSH) \
|
||||||
|
--label "org.opencontainers.image.source=https://github.com/cozystack/cozystack" \
|
||||||
--load=$(LOAD)
|
--load=$(LOAD)
|
||||||
echo "$(REGISTRY)/nginx-cache:$(call settag,$(NGINX_CACHE_TAG))@$$(yq e '."containerimage.digest"' images/nginx-cache.json -o json -r)" \
|
echo "$(REGISTRY)/nginx-cache:$(call settag,$(NGINX_CACHE_TAG))@$$(yq e '."containerimage.digest"' images/nginx-cache.json -o json -r)" \
|
||||||
> images/nginx-cache.tag
|
> images/nginx-cache.tag
|
||||||
|
|||||||
@@ -60,13 +60,17 @@ VTS module shows wrong upstream resonse time
|
|||||||
|
|
||||||
### Common parameters
|
### Common parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ------------------ | ----------------------------------------------- | ------- |
|
| ------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||||
| `external` | Enable external access from outside the cluster | `false` |
|
| `external` | Enable external access from outside the cluster | `false` |
|
||||||
| `size` | Persistent Volume size | `10Gi` |
|
| `size` | Persistent Volume size | `10Gi` |
|
||||||
| `storageClass` | StorageClass used to store the data | `""` |
|
| `storageClass` | StorageClass used to store the data | `""` |
|
||||||
| `haproxy.replicas` | Number of HAProxy replicas | `2` |
|
| `haproxy.replicas` | Number of HAProxy replicas | `2` |
|
||||||
| `nginx.replicas` | Number of Nginx replicas | `2` |
|
| `nginx.replicas` | Number of Nginx replicas | `2` |
|
||||||
|
| `haproxy.resources` | Resources | `{}` |
|
||||||
|
| `haproxy.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||||
|
| `nginx.resources` | Resources | `{}` |
|
||||||
|
| `nginx.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||||
|
|
||||||
### Configuration parameters
|
### Configuration parameters
|
||||||
|
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
ghcr.io/aenix-io/cozystack/nginx-cache:0.3.1@sha256:27112d470a31725b75b29b29919af06b4ce1339e3b502b08889a92ab7099adde
|
ghcr.io/cozystack/cozystack/nginx-cache:0.3.1@sha256:2b82eae28239ca0f9968602c69bbb752cd2a5818e64934ccd06cb91d95d019c7
|
||||||
|
|||||||
50
packages/apps/http-cache/templates/_resources.tpl
Normal file
50
packages/apps/http-cache/templates/_resources.tpl
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
{{/*
|
||||||
|
Copyright Broadcom, Inc. All Rights Reserved.
|
||||||
|
SPDX-License-Identifier: APACHE-2.0
|
||||||
|
*/}}
|
||||||
|
|
||||||
|
{{/* vim: set filetype=mustache: */}}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Return a resource request/limit object based on a given preset.
|
||||||
|
These presets are for basic testing and not meant to be used in production
|
||||||
|
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||||
|
*/}}
|
||||||
|
{{- define "resources.preset" -}}
|
||||||
|
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
|
||||||
|
{{- $presets := dict
|
||||||
|
"nano" (dict
|
||||||
|
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
"micro" (dict
|
||||||
|
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
"small" (dict
|
||||||
|
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
"medium" (dict
|
||||||
|
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
"large" (dict
|
||||||
|
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
"xlarge" (dict
|
||||||
|
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
"2xlarge" (dict
|
||||||
|
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
}}
|
||||||
|
{{- if hasKey $presets .type -}}
|
||||||
|
{{- index $presets .type | toYaml -}}
|
||||||
|
{{- else -}}
|
||||||
|
{{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}}
|
||||||
|
{{- end -}}
|
||||||
|
{{- end -}}
|
||||||
@@ -33,6 +33,11 @@ spec:
|
|||||||
containers:
|
containers:
|
||||||
- image: haproxy:latest
|
- image: haproxy:latest
|
||||||
name: haproxy
|
name: haproxy
|
||||||
|
{{- if .Values.haproxy.resources }}
|
||||||
|
resources: {{- toYaml .Values.haproxy.resources | nindent 10 }}
|
||||||
|
{{- else if ne .Values.haproxy.resourcesPreset "none" }}
|
||||||
|
resources: {{- include "resources.preset" (dict "type" .Values.haproxy.resourcesPreset "Release" .Release) | nindent 10 }}
|
||||||
|
{{- end }}
|
||||||
ports:
|
ports:
|
||||||
- containerPort: 8080
|
- containerPort: 8080
|
||||||
name: http
|
name: http
|
||||||
|
|||||||
@@ -52,6 +52,11 @@ spec:
|
|||||||
shareProcessNamespace: true
|
shareProcessNamespace: true
|
||||||
containers:
|
containers:
|
||||||
- name: nginx
|
- name: nginx
|
||||||
|
{{- if $.Values.nginx.resources }}
|
||||||
|
resources: {{- toYaml $.Values.nginx.resources | nindent 10 }}
|
||||||
|
{{- else if ne $.Values.nginx.resourcesPreset "none" }}
|
||||||
|
resources: {{- include "resources.preset" (dict "type" $.Values.nginx.resourcesPreset "Release" $.Release) | nindent 10 }}
|
||||||
|
{{- end }}
|
||||||
image: "{{ $.Files.Get "images/nginx-cache.tag" | trim }}"
|
image: "{{ $.Files.Get "images/nginx-cache.tag" | trim }}"
|
||||||
readinessProbe:
|
readinessProbe:
|
||||||
httpGet:
|
httpGet:
|
||||||
@@ -83,6 +88,13 @@ spec:
|
|||||||
- name: reloader
|
- name: reloader
|
||||||
image: "{{ $.Files.Get "images/nginx-cache.tag" | trim }}"
|
image: "{{ $.Files.Get "images/nginx-cache.tag" | trim }}"
|
||||||
command: ["/usr/bin/nginx-reloader.sh"]
|
command: ["/usr/bin/nginx-reloader.sh"]
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: 50m
|
||||||
|
memory: 50Mi
|
||||||
|
requests:
|
||||||
|
cpu: 50m
|
||||||
|
memory: 50Mi
|
||||||
#command: ["sleep", "infinity"]
|
#command: ["sleep", "infinity"]
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- mountPath: /etc/nginx/nginx.conf
|
- mountPath: /etc/nginx/nginx.conf
|
||||||
|
|||||||
@@ -24,6 +24,16 @@
|
|||||||
"type": "number",
|
"type": "number",
|
||||||
"description": "Number of HAProxy replicas",
|
"description": "Number of HAProxy replicas",
|
||||||
"default": 2
|
"default": 2
|
||||||
|
},
|
||||||
|
"resources": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "Resources",
|
||||||
|
"default": {}
|
||||||
|
},
|
||||||
|
"resourcesPreset": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||||
|
"default": "nano"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@@ -34,6 +44,16 @@
|
|||||||
"type": "number",
|
"type": "number",
|
||||||
"description": "Number of Nginx replicas",
|
"description": "Number of Nginx replicas",
|
||||||
"default": 2
|
"default": 2
|
||||||
|
},
|
||||||
|
"resources": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "Resources",
|
||||||
|
"default": {}
|
||||||
|
},
|
||||||
|
"resourcesPreset": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||||
|
"default": "nano"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -12,8 +12,32 @@ size: 10Gi
|
|||||||
storageClass: ""
|
storageClass: ""
|
||||||
haproxy:
|
haproxy:
|
||||||
replicas: 2
|
replicas: 2
|
||||||
|
## @param haproxy.resources Resources
|
||||||
|
resources: {}
|
||||||
|
# resources:
|
||||||
|
# limits:
|
||||||
|
# cpu: 4000m
|
||||||
|
# memory: 4Gi
|
||||||
|
# requests:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 512Mi
|
||||||
|
|
||||||
|
## @param haproxy.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||||
|
resourcesPreset: "nano"
|
||||||
nginx:
|
nginx:
|
||||||
replicas: 2
|
replicas: 2
|
||||||
|
## @param nginx.resources Resources
|
||||||
|
resources: {}
|
||||||
|
# resources:
|
||||||
|
# limits:
|
||||||
|
# cpu: 4000m
|
||||||
|
# memory: 4Gi
|
||||||
|
# requests:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 512Mi
|
||||||
|
|
||||||
|
## @param nginx.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||||
|
resourcesPreset: "nano"
|
||||||
|
|
||||||
## @section Configuration parameters
|
## @section Configuration parameters
|
||||||
|
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.3.1
|
version: 0.5.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
|
|||||||
@@ -4,15 +4,19 @@
|
|||||||
|
|
||||||
### Common parameters
|
### Common parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ------------------------ | ----------------------------------------------- | ------- |
|
| --------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||||
| `external` | Enable external access from outside the cluster | `false` |
|
| `external` | Enable external access from outside the cluster | `false` |
|
||||||
| `kafka.size` | Persistent Volume size for Kafka | `10Gi` |
|
| `kafka.size` | Persistent Volume size for Kafka | `10Gi` |
|
||||||
| `kafka.replicas` | Number of Kafka replicas | `3` |
|
| `kafka.replicas` | Number of Kafka replicas | `3` |
|
||||||
| `kafka.storageClass` | StorageClass used to store the Kafka data | `""` |
|
| `kafka.storageClass` | StorageClass used to store the Kafka data | `""` |
|
||||||
| `zookeeper.size` | Persistent Volume size for ZooKeeper | `5Gi` |
|
| `zookeeper.size` | Persistent Volume size for ZooKeeper | `5Gi` |
|
||||||
| `zookeeper.replicas` | Number of ZooKeeper replicas | `3` |
|
| `zookeeper.replicas` | Number of ZooKeeper replicas | `3` |
|
||||||
| `zookeeper.storageClass` | StorageClass used to store the ZooKeeper data | `""` |
|
| `zookeeper.storageClass` | StorageClass used to store the ZooKeeper data | `""` |
|
||||||
|
| `kafka.resources` | Resources | `{}` |
|
||||||
|
| `kafka.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||||
|
| `zookeeper.resources` | Resources | `{}` |
|
||||||
|
| `zookeeper.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||||
|
|
||||||
### Configuration parameters
|
### Configuration parameters
|
||||||
|
|
||||||
|
|||||||
50
packages/apps/kafka/templates/_resources.tpl
Normal file
50
packages/apps/kafka/templates/_resources.tpl
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
{{/*
|
||||||
|
Copyright Broadcom, Inc. All Rights Reserved.
|
||||||
|
SPDX-License-Identifier: APACHE-2.0
|
||||||
|
*/}}
|
||||||
|
|
||||||
|
{{/* vim: set filetype=mustache: */}}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Return a resource request/limit object based on a given preset.
|
||||||
|
These presets are for basic testing and not meant to be used in production
|
||||||
|
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||||
|
*/}}
|
||||||
|
{{- define "resources.preset" -}}
|
||||||
|
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
|
||||||
|
{{- $presets := dict
|
||||||
|
"nano" (dict
|
||||||
|
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
"micro" (dict
|
||||||
|
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
"small" (dict
|
||||||
|
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
"medium" (dict
|
||||||
|
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
"large" (dict
|
||||||
|
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
"xlarge" (dict
|
||||||
|
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
"2xlarge" (dict
|
||||||
|
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
}}
|
||||||
|
{{- if hasKey $presets .type -}}
|
||||||
|
{{- index $presets .type | toYaml -}}
|
||||||
|
{{- else -}}
|
||||||
|
{{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}}
|
||||||
|
{{- end -}}
|
||||||
|
{{- end -}}
|
||||||
@@ -17,3 +17,11 @@ rules:
|
|||||||
resourceNames:
|
resourceNames:
|
||||||
- {{ .Release.Name }}-clients-ca
|
- {{ .Release.Name }}-clients-ca
|
||||||
verbs: ["get", "list", "watch"]
|
verbs: ["get", "list", "watch"]
|
||||||
|
- apiGroups:
|
||||||
|
- cozystack.io
|
||||||
|
resources:
|
||||||
|
- workloadmonitors
|
||||||
|
resourceNames:
|
||||||
|
- {{ .Release.Name }}
|
||||||
|
- {{ $.Release.Name }}-zookeeper
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
|||||||
@@ -8,6 +8,11 @@ metadata:
|
|||||||
spec:
|
spec:
|
||||||
kafka:
|
kafka:
|
||||||
replicas: {{ .Values.kafka.replicas }}
|
replicas: {{ .Values.kafka.replicas }}
|
||||||
|
{{- if .Values.kafka.resources }}
|
||||||
|
resources: {{- toYaml .Values.kafka.resources | nindent 6 }}
|
||||||
|
{{- else if ne .Values.kafka.resourcesPreset "none" }}
|
||||||
|
resources: {{- include "resources.preset" (dict "type" .Values.kafka.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
listeners:
|
listeners:
|
||||||
- name: plain
|
- name: plain
|
||||||
port: 9092
|
port: 9092
|
||||||
@@ -57,8 +62,19 @@ spec:
|
|||||||
class: {{ . }}
|
class: {{ . }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
deleteClaim: true
|
deleteClaim: true
|
||||||
|
metricsConfig:
|
||||||
|
type: jmxPrometheusExporter
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: {{ .Release.Name }}-metrics
|
||||||
|
key: kafka-metrics-config.yml
|
||||||
zookeeper:
|
zookeeper:
|
||||||
replicas: {{ .Values.zookeeper.replicas }}
|
replicas: {{ .Values.zookeeper.replicas }}
|
||||||
|
{{- if .Values.zookeeper.resources }}
|
||||||
|
resources: {{- toYaml .Values.zookeeper.resources | nindent 6 }}
|
||||||
|
{{- else if ne .Values.zookeeper.resourcesPreset "none" }}
|
||||||
|
resources: {{- include "resources.preset" (dict "type" .Values.zookeeper.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
storage:
|
storage:
|
||||||
type: persistent-claim
|
type: persistent-claim
|
||||||
{{- with .Values.zookeeper.size }}
|
{{- with .Values.zookeeper.size }}
|
||||||
@@ -68,6 +84,12 @@ spec:
|
|||||||
class: {{ . }}
|
class: {{ . }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
deleteClaim: false
|
deleteClaim: false
|
||||||
|
metricsConfig:
|
||||||
|
type: jmxPrometheusExporter
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: {{ .Release.Name }}-metrics
|
||||||
|
key: kafka-metrics-config.yml
|
||||||
entityOperator:
|
entityOperator:
|
||||||
topicOperator: {}
|
topicOperator: {}
|
||||||
userOperator: {}
|
userOperator: {}
|
||||||
|
|||||||
198
packages/apps/kafka/templates/metrics-configmap.yaml
Normal file
198
packages/apps/kafka/templates/metrics-configmap.yaml
Normal file
@@ -0,0 +1,198 @@
|
|||||||
|
kind: ConfigMap
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: {{ .Release.Name }}-metrics
|
||||||
|
data:
|
||||||
|
kafka-metrics-config.yml: |
|
||||||
|
# See https://github.com/prometheus/jmx_exporter for more info about JMX Prometheus Exporter metrics
|
||||||
|
lowercaseOutputName: true
|
||||||
|
rules:
|
||||||
|
# Special cases and very specific rules
|
||||||
|
- pattern: kafka.server<type=(.+), name=(.+), clientId=(.+), topic=(.+), partition=(.*)><>Value
|
||||||
|
name: kafka_server_$1_$2
|
||||||
|
type: GAUGE
|
||||||
|
labels:
|
||||||
|
clientId: "$3"
|
||||||
|
topic: "$4"
|
||||||
|
partition: "$5"
|
||||||
|
- pattern: kafka.server<type=(.+), name=(.+), clientId=(.+), brokerHost=(.+), brokerPort=(.+)><>Value
|
||||||
|
name: kafka_server_$1_$2
|
||||||
|
type: GAUGE
|
||||||
|
labels:
|
||||||
|
clientId: "$3"
|
||||||
|
broker: "$4:$5"
|
||||||
|
- pattern: kafka.server<type=(.+), cipher=(.+), protocol=(.+), listener=(.+), networkProcessor=(.+)><>connections
|
||||||
|
name: kafka_server_$1_connections_tls_info
|
||||||
|
type: GAUGE
|
||||||
|
labels:
|
||||||
|
cipher: "$2"
|
||||||
|
protocol: "$3"
|
||||||
|
listener: "$4"
|
||||||
|
networkProcessor: "$5"
|
||||||
|
- pattern: kafka.server<type=(.+), clientSoftwareName=(.+), clientSoftwareVersion=(.+), listener=(.+), networkProcessor=(.+)><>connections
|
||||||
|
name: kafka_server_$1_connections_software
|
||||||
|
type: GAUGE
|
||||||
|
labels:
|
||||||
|
clientSoftwareName: "$2"
|
||||||
|
clientSoftwareVersion: "$3"
|
||||||
|
listener: "$4"
|
||||||
|
networkProcessor: "$5"
|
||||||
|
- pattern: "kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+-total):"
|
||||||
|
name: kafka_server_$1_$4
|
||||||
|
type: COUNTER
|
||||||
|
labels:
|
||||||
|
listener: "$2"
|
||||||
|
networkProcessor: "$3"
|
||||||
|
- pattern: "kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+):"
|
||||||
|
name: kafka_server_$1_$4
|
||||||
|
type: GAUGE
|
||||||
|
labels:
|
||||||
|
listener: "$2"
|
||||||
|
networkProcessor: "$3"
|
||||||
|
- pattern: kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+-total)
|
||||||
|
name: kafka_server_$1_$4
|
||||||
|
type: COUNTER
|
||||||
|
labels:
|
||||||
|
listener: "$2"
|
||||||
|
networkProcessor: "$3"
|
||||||
|
- pattern: kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+)
|
||||||
|
name: kafka_server_$1_$4
|
||||||
|
type: GAUGE
|
||||||
|
labels:
|
||||||
|
listener: "$2"
|
||||||
|
networkProcessor: "$3"
|
||||||
|
# Some percent metrics use MeanRate attribute
|
||||||
|
# Ex) kafka.server<type=(KafkaRequestHandlerPool), name=(RequestHandlerAvgIdlePercent)><>MeanRate
|
||||||
|
- pattern: kafka.(\w+)<type=(.+), name=(.+)Percent\w*><>MeanRate
|
||||||
|
name: kafka_$1_$2_$3_percent
|
||||||
|
type: GAUGE
|
||||||
|
# Generic gauges for percents
|
||||||
|
- pattern: kafka.(\w+)<type=(.+), name=(.+)Percent\w*><>Value
|
||||||
|
name: kafka_$1_$2_$3_percent
|
||||||
|
type: GAUGE
|
||||||
|
- pattern: kafka.(\w+)<type=(.+), name=(.+)Percent\w*, (.+)=(.+)><>Value
|
||||||
|
name: kafka_$1_$2_$3_percent
|
||||||
|
type: GAUGE
|
||||||
|
labels:
|
||||||
|
"$4": "$5"
|
||||||
|
# Generic per-second counters with 0-2 key/value pairs
|
||||||
|
- pattern: kafka.(\w+)<type=(.+), name=(.+)PerSec\w*, (.+)=(.+), (.+)=(.+)><>Count
|
||||||
|
name: kafka_$1_$2_$3_total
|
||||||
|
type: COUNTER
|
||||||
|
labels:
|
||||||
|
"$4": "$5"
|
||||||
|
"$6": "$7"
|
||||||
|
- pattern: kafka.(\w+)<type=(.+), name=(.+)PerSec\w*, (.+)=(.+)><>Count
|
||||||
|
name: kafka_$1_$2_$3_total
|
||||||
|
type: COUNTER
|
||||||
|
labels:
|
||||||
|
"$4": "$5"
|
||||||
|
- pattern: kafka.(\w+)<type=(.+), name=(.+)PerSec\w*><>Count
|
||||||
|
name: kafka_$1_$2_$3_total
|
||||||
|
type: COUNTER
|
||||||
|
# Generic gauges with 0-2 key/value pairs
|
||||||
|
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+), (.+)=(.+)><>Value
|
||||||
|
name: kafka_$1_$2_$3
|
||||||
|
type: GAUGE
|
||||||
|
labels:
|
||||||
|
"$4": "$5"
|
||||||
|
"$6": "$7"
|
||||||
|
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+)><>Value
|
||||||
|
name: kafka_$1_$2_$3
|
||||||
|
type: GAUGE
|
||||||
|
labels:
|
||||||
|
"$4": "$5"
|
||||||
|
- pattern: kafka.(\w+)<type=(.+), name=(.+)><>Value
|
||||||
|
name: kafka_$1_$2_$3
|
||||||
|
type: GAUGE
|
||||||
|
# Emulate Prometheus 'Summary' metrics for the exported 'Histogram's.
|
||||||
|
# Note that these are missing the '_sum' metric!
|
||||||
|
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+), (.+)=(.+)><>Count
|
||||||
|
name: kafka_$1_$2_$3_count
|
||||||
|
type: COUNTER
|
||||||
|
labels:
|
||||||
|
"$4": "$5"
|
||||||
|
"$6": "$7"
|
||||||
|
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.*), (.+)=(.+)><>(\d+)thPercentile
|
||||||
|
name: kafka_$1_$2_$3
|
||||||
|
type: GAUGE
|
||||||
|
labels:
|
||||||
|
"$4": "$5"
|
||||||
|
"$6": "$7"
|
||||||
|
quantile: "0.$8"
|
||||||
|
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+)><>Count
|
||||||
|
name: kafka_$1_$2_$3_count
|
||||||
|
type: COUNTER
|
||||||
|
labels:
|
||||||
|
"$4": "$5"
|
||||||
|
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.*)><>(\d+)thPercentile
|
||||||
|
name: kafka_$1_$2_$3
|
||||||
|
type: GAUGE
|
||||||
|
labels:
|
||||||
|
"$4": "$5"
|
||||||
|
quantile: "0.$6"
|
||||||
|
- pattern: kafka.(\w+)<type=(.+), name=(.+)><>Count
|
||||||
|
name: kafka_$1_$2_$3_count
|
||||||
|
type: COUNTER
|
||||||
|
- pattern: kafka.(\w+)<type=(.+), name=(.+)><>(\d+)thPercentile
|
||||||
|
name: kafka_$1_$2_$3
|
||||||
|
type: GAUGE
|
||||||
|
labels:
|
||||||
|
quantile: "0.$4"
|
||||||
|
# KRaft overall related metrics
|
||||||
|
# distinguish between always increasing COUNTER (total and max) and variable GAUGE (all others) metrics
|
||||||
|
- pattern: "kafka.server<type=raft-metrics><>(.+-total|.+-max):"
|
||||||
|
name: kafka_server_raftmetrics_$1
|
||||||
|
type: COUNTER
|
||||||
|
- pattern: "kafka.server<type=raft-metrics><>(current-state): (.+)"
|
||||||
|
name: kafka_server_raftmetrics_$1
|
||||||
|
value: 1
|
||||||
|
type: UNTYPED
|
||||||
|
labels:
|
||||||
|
$1: "$2"
|
||||||
|
- pattern: "kafka.server<type=raft-metrics><>(.+):"
|
||||||
|
name: kafka_server_raftmetrics_$1
|
||||||
|
type: GAUGE
|
||||||
|
# KRaft "low level" channels related metrics
|
||||||
|
# distinguish between always increasing COUNTER (total and max) and variable GAUGE (all others) metrics
|
||||||
|
- pattern: "kafka.server<type=raft-channel-metrics><>(.+-total|.+-max):"
|
||||||
|
name: kafka_server_raftchannelmetrics_$1
|
||||||
|
type: COUNTER
|
||||||
|
- pattern: "kafka.server<type=raft-channel-metrics><>(.+):"
|
||||||
|
name: kafka_server_raftchannelmetrics_$1
|
||||||
|
type: GAUGE
|
||||||
|
# Broker metrics related to fetching metadata topic records in KRaft mode
|
||||||
|
- pattern: "kafka.server<type=broker-metadata-metrics><>(.+):"
|
||||||
|
name: kafka_server_brokermetadatametrics_$1
|
||||||
|
type: GAUGE
|
||||||
|
zookeeper-metrics-config.yml: |
|
||||||
|
# See https://github.com/prometheus/jmx_exporter for more info about JMX Prometheus Exporter metrics
|
||||||
|
lowercaseOutputName: true
|
||||||
|
rules:
|
||||||
|
# replicated Zookeeper
|
||||||
|
- pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+)><>(\\w+)"
|
||||||
|
name: "zookeeper_$2"
|
||||||
|
type: GAUGE
|
||||||
|
- pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+)><>(\\w+)"
|
||||||
|
name: "zookeeper_$3"
|
||||||
|
type: GAUGE
|
||||||
|
labels:
|
||||||
|
replicaId: "$2"
|
||||||
|
- pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+), name2=(\\w+)><>(Packets\\w+)"
|
||||||
|
name: "zookeeper_$4"
|
||||||
|
type: COUNTER
|
||||||
|
labels:
|
||||||
|
replicaId: "$2"
|
||||||
|
memberType: "$3"
|
||||||
|
- pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+), name2=(\\w+)><>(\\w+)"
|
||||||
|
name: "zookeeper_$4"
|
||||||
|
type: GAUGE
|
||||||
|
labels:
|
||||||
|
replicaId: "$2"
|
||||||
|
memberType: "$3"
|
||||||
|
- pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+), name2=(\\w+), name3=(\\w+)><>(\\w+)"
|
||||||
|
name: "zookeeper_$4_$5"
|
||||||
|
type: GAUGE
|
||||||
|
labels:
|
||||||
|
replicaId: "$2"
|
||||||
|
memberType: "$3"
|
||||||
40
packages/apps/kafka/templates/podscrape.yaml
Normal file
40
packages/apps/kafka/templates/podscrape.yaml
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
apiVersion: operator.victoriametrics.com/v1beta1
|
||||||
|
kind: VMPodScrape
|
||||||
|
metadata:
|
||||||
|
name: {{ .Release.Name }}
|
||||||
|
spec:
|
||||||
|
podMetricsEndpoints:
|
||||||
|
- port: tcp-prometheus
|
||||||
|
scheme: http
|
||||||
|
relabelConfigs:
|
||||||
|
- separator: ;
|
||||||
|
regex: __meta_kubernetes_pod_label_(strimzi_io_.+)
|
||||||
|
replacement: $1
|
||||||
|
action: labelmap
|
||||||
|
- sourceLabels: [__meta_kubernetes_namespace]
|
||||||
|
separator: ;
|
||||||
|
regex: (.*)
|
||||||
|
targetLabel: namespace
|
||||||
|
replacement: $1
|
||||||
|
action: replace
|
||||||
|
- sourceLabels: [__meta_kubernetes_pod_name]
|
||||||
|
separator: ;
|
||||||
|
regex: (.*)
|
||||||
|
targetLabel: pod
|
||||||
|
replacement: $1
|
||||||
|
action: replace
|
||||||
|
- sourceLabels: [__meta_kubernetes_pod_node_name]
|
||||||
|
separator: ;
|
||||||
|
regex: (.*)
|
||||||
|
targetLabel: node
|
||||||
|
replacement: $1
|
||||||
|
action: replace
|
||||||
|
- sourceLabels: [__meta_kubernetes_pod_host_ip]
|
||||||
|
separator: ;
|
||||||
|
regex: (.*)
|
||||||
|
targetLabel: node_ip
|
||||||
|
replacement: $1
|
||||||
|
action: replace
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
30
packages/apps/kafka/templates/workloadmonitor.yaml
Normal file
30
packages/apps/kafka/templates/workloadmonitor.yaml
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
---
|
||||||
|
apiVersion: cozystack.io/v1alpha1
|
||||||
|
kind: WorkloadMonitor
|
||||||
|
metadata:
|
||||||
|
name: {{ $.Release.Name }}
|
||||||
|
spec:
|
||||||
|
replicas: {{ .Values.replicas }}
|
||||||
|
minReplicas: 1
|
||||||
|
kind: kafka
|
||||||
|
type: kafka
|
||||||
|
selector:
|
||||||
|
app.kubernetes.io/instance: {{ $.Release.Name }}
|
||||||
|
app.kubernetes.io/name: kafka
|
||||||
|
version: {{ $.Chart.Version }}
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
apiVersion: cozystack.io/v1alpha1
|
||||||
|
kind: WorkloadMonitor
|
||||||
|
metadata:
|
||||||
|
name: {{ $.Release.Name }}-zookeeper
|
||||||
|
spec:
|
||||||
|
replicas: {{ .Values.replicas }}
|
||||||
|
minReplicas: 1
|
||||||
|
kind: kafka
|
||||||
|
type: zookeeper
|
||||||
|
selector:
|
||||||
|
app.kubernetes.io/instance: {{ $.Release.Name }}
|
||||||
|
app.kubernetes.io/name: zookeeper
|
||||||
|
version: {{ $.Chart.Version }}
|
||||||
@@ -24,6 +24,16 @@
|
|||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "StorageClass used to store the Kafka data",
|
"description": "StorageClass used to store the Kafka data",
|
||||||
"default": ""
|
"default": ""
|
||||||
|
},
|
||||||
|
"resources": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "Resources",
|
||||||
|
"default": {}
|
||||||
|
},
|
||||||
|
"resourcesPreset": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||||
|
"default": "nano"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@@ -44,6 +54,16 @@
|
|||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "StorageClass used to store the ZooKeeper data",
|
"description": "StorageClass used to store the ZooKeeper data",
|
||||||
"default": ""
|
"default": ""
|
||||||
|
},
|
||||||
|
"resources": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "Resources",
|
||||||
|
"default": {}
|
||||||
|
},
|
||||||
|
"resourcesPreset": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||||
|
"default": "nano"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -14,10 +14,35 @@ kafka:
|
|||||||
size: 10Gi
|
size: 10Gi
|
||||||
replicas: 3
|
replicas: 3
|
||||||
storageClass: ""
|
storageClass: ""
|
||||||
|
## @param kafka.resources Resources
|
||||||
|
resources: {}
|
||||||
|
# resources:
|
||||||
|
# limits:
|
||||||
|
# cpu: 4000m
|
||||||
|
# memory: 4Gi
|
||||||
|
# requests:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 512Mi
|
||||||
|
|
||||||
|
## @param kafka.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||||
|
resourcesPreset: "nano"
|
||||||
|
|
||||||
zookeeper:
|
zookeeper:
|
||||||
size: 5Gi
|
size: 5Gi
|
||||||
replicas: 3
|
replicas: 3
|
||||||
storageClass: ""
|
storageClass: ""
|
||||||
|
## @param zookeeper.resources Resources
|
||||||
|
resources: {}
|
||||||
|
# resources:
|
||||||
|
# limits:
|
||||||
|
# cpu: 4000m
|
||||||
|
# memory: 4Gi
|
||||||
|
# requests:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 512Mi
|
||||||
|
|
||||||
|
## @param zookeeper.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||||
|
resourcesPreset: "nano"
|
||||||
|
|
||||||
## @section Configuration parameters
|
## @section Configuration parameters
|
||||||
|
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.14.1
|
version: 0.17.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ image-ubuntu-container-disk:
|
|||||||
--cache-to type=inline \
|
--cache-to type=inline \
|
||||||
--metadata-file images/ubuntu-container-disk.json \
|
--metadata-file images/ubuntu-container-disk.json \
|
||||||
--push=$(PUSH) \
|
--push=$(PUSH) \
|
||||||
|
--label "org.opencontainers.image.source=https://github.com/cozystack/cozystack" \
|
||||||
--load=$(LOAD)
|
--load=$(LOAD)
|
||||||
echo "$(REGISTRY)/ubuntu-container-disk:$(call settag,$(UBUNTU_CONTAINER_DISK_TAG))@$$(yq e '."containerimage.digest"' images/ubuntu-container-disk.json -o json -r)" \
|
echo "$(REGISTRY)/ubuntu-container-disk:$(call settag,$(UBUNTU_CONTAINER_DISK_TAG))@$$(yq e '."containerimage.digest"' images/ubuntu-container-disk.json -o json -r)" \
|
||||||
> images/ubuntu-container-disk.tag
|
> images/ubuntu-container-disk.tag
|
||||||
@@ -32,6 +33,7 @@ image-kubevirt-cloud-provider:
|
|||||||
--cache-to type=inline \
|
--cache-to type=inline \
|
||||||
--metadata-file images/kubevirt-cloud-provider.json \
|
--metadata-file images/kubevirt-cloud-provider.json \
|
||||||
--push=$(PUSH) \
|
--push=$(PUSH) \
|
||||||
|
--label "org.opencontainers.image.source=https://github.com/cozystack/cozystack" \
|
||||||
--load=$(LOAD)
|
--load=$(LOAD)
|
||||||
echo "$(REGISTRY)/kubevirt-cloud-provider:$(call settag,$(KUBERNETES_PKG_TAG))@$$(yq e '."containerimage.digest"' images/kubevirt-cloud-provider.json -o json -r)" \
|
echo "$(REGISTRY)/kubevirt-cloud-provider:$(call settag,$(KUBERNETES_PKG_TAG))@$$(yq e '."containerimage.digest"' images/kubevirt-cloud-provider.json -o json -r)" \
|
||||||
> images/kubevirt-cloud-provider.tag
|
> images/kubevirt-cloud-provider.tag
|
||||||
@@ -46,6 +48,7 @@ image-kubevirt-csi-driver:
|
|||||||
--cache-to type=inline \
|
--cache-to type=inline \
|
||||||
--metadata-file images/kubevirt-csi-driver.json \
|
--metadata-file images/kubevirt-csi-driver.json \
|
||||||
--push=$(PUSH) \
|
--push=$(PUSH) \
|
||||||
|
--label "org.opencontainers.image.source=https://github.com/cozystack/cozystack" \
|
||||||
--load=$(LOAD)
|
--load=$(LOAD)
|
||||||
echo "$(REGISTRY)/kubevirt-csi-driver:$(call settag,$(KUBERNETES_PKG_TAG))@$$(yq e '."containerimage.digest"' images/kubevirt-csi-driver.json -o json -r)" \
|
echo "$(REGISTRY)/kubevirt-csi-driver:$(call settag,$(KUBERNETES_PKG_TAG))@$$(yq e '."containerimage.digest"' images/kubevirt-csi-driver.json -o json -r)" \
|
||||||
> images/kubevirt-csi-driver.tag
|
> images/kubevirt-csi-driver.tag
|
||||||
@@ -61,6 +64,7 @@ image-cluster-autoscaler:
|
|||||||
--cache-to type=inline \
|
--cache-to type=inline \
|
||||||
--metadata-file images/cluster-autoscaler.json \
|
--metadata-file images/cluster-autoscaler.json \
|
||||||
--push=$(PUSH) \
|
--push=$(PUSH) \
|
||||||
|
--label "org.opencontainers.image.source=https://github.com/cozystack/cozystack" \
|
||||||
--load=$(LOAD)
|
--load=$(LOAD)
|
||||||
echo "$(REGISTRY)/cluster-autoscaler:$(call settag,$(KUBERNETES_PKG_TAG))@$$(yq e '."containerimage.digest"' images/cluster-autoscaler.json -o json -r)" \
|
echo "$(REGISTRY)/cluster-autoscaler:$(call settag,$(KUBERNETES_PKG_TAG))@$$(yq e '."containerimage.digest"' images/cluster-autoscaler.json -o json -r)" \
|
||||||
> images/cluster-autoscaler.tag
|
> images/cluster-autoscaler.tag
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
ghcr.io/aenix-io/cozystack/cluster-autoscaler:0.14.1@sha256:b63293bc295e8c04574900bb711ebfe51db6774beb6bc3a58791562ec11b406b
|
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.15.2@sha256:967e51702102d0dbd97f9847de4159d62681b31eb606322d2c29755393c2236e
|
||||||
|
|||||||
@@ -1,12 +1,14 @@
|
|||||||
# Source: https://raw.githubusercontent.com/kubernetes/autoscaler/refs/heads/master/cluster-autoscaler/Dockerfile.amd64
|
# Source: https://raw.githubusercontent.com/kubernetes/autoscaler/refs/heads/master/cluster-autoscaler/Dockerfile.amd64
|
||||||
ARG builder_image=docker.io/library/golang:1.22.5
|
ARG builder_image=docker.io/library/golang:1.23.4
|
||||||
ARG BASEIMAGE=gcr.io/distroless/static:nonroot-amd64
|
ARG BASEIMAGE=gcr.io/distroless/static:nonroot-amd64
|
||||||
FROM ${builder_image} AS builder
|
FROM ${builder_image} AS builder
|
||||||
RUN git clone https://github.com/kubernetes/autoscaler /src/autoscaler \
|
RUN git clone https://github.com/kubernetes/autoscaler /src/autoscaler \
|
||||||
&& cd /src/autoscaler/cluster-autoscaler \
|
&& cd /src/autoscaler/cluster-autoscaler \
|
||||||
&& git checkout cluster-autoscaler-1.31.0
|
&& git checkout cluster-autoscaler-1.32.0
|
||||||
|
|
||||||
WORKDIR /src/autoscaler/cluster-autoscaler
|
WORKDIR /src/autoscaler/cluster-autoscaler
|
||||||
|
COPY fix-downscale.diff /fix-downscale.diff
|
||||||
|
RUN git apply /fix-downscale.diff
|
||||||
RUN make build
|
RUN make build
|
||||||
|
|
||||||
FROM $BASEIMAGE
|
FROM $BASEIMAGE
|
||||||
|
|||||||
@@ -0,0 +1,13 @@
|
|||||||
|
diff --git a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured.go b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured.go
|
||||||
|
index 4eec0e4bf..f28fd9241 100644
|
||||||
|
--- a/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured.go
|
||||||
|
+++ b/cluster-autoscaler/cloudprovider/clusterapi/clusterapi_unstructured.go
|
||||||
|
@@ -106,8 +106,6 @@ func (r unstructuredScalableResource) Replicas() (int, error) {
|
||||||
|
|
||||||
|
func (r unstructuredScalableResource) SetSize(nreplicas int) error {
|
||||||
|
switch {
|
||||||
|
- case nreplicas > r.maxSize:
|
||||||
|
- return fmt.Errorf("size increase too large - desired:%d max:%d", nreplicas, r.maxSize)
|
||||||
|
case nreplicas < r.minSize:
|
||||||
|
return fmt.Errorf("size decrease too large - desired:%d min:%d", nreplicas, r.minSize)
|
||||||
|
}
|
||||||
@@ -1 +1 @@
|
|||||||
ghcr.io/aenix-io/cozystack/kubevirt-cloud-provider:0.14.1@sha256:c0561a342e6b55d066f3363182f442e8fa30a0b6b448d89d15a1a855c999b98e
|
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:latest@sha256:47ad85a2bb2b11818df85e80cbc6e07021e97e429d5bb020ce8db002b37a77f1
|
||||||
|
|||||||
@@ -3,12 +3,11 @@ FROM --platform=linux/amd64 golang:1.20.6 AS builder
|
|||||||
|
|
||||||
RUN git clone https://github.com/kubevirt/cloud-provider-kubevirt /go/src/kubevirt.io/cloud-provider-kubevirt \
|
RUN git clone https://github.com/kubevirt/cloud-provider-kubevirt /go/src/kubevirt.io/cloud-provider-kubevirt \
|
||||||
&& cd /go/src/kubevirt.io/cloud-provider-kubevirt \
|
&& cd /go/src/kubevirt.io/cloud-provider-kubevirt \
|
||||||
&& git checkout da9e0cf
|
&& git checkout 443a1fe
|
||||||
|
|
||||||
WORKDIR /go/src/kubevirt.io/cloud-provider-kubevirt
|
WORKDIR /go/src/kubevirt.io/cloud-provider-kubevirt
|
||||||
|
|
||||||
# see: https://github.com/kubevirt/cloud-provider-kubevirt/pull/335
|
# see: https://github.com/kubevirt/cloud-provider-kubevirt/pull/335
|
||||||
# see: https://github.com/kubevirt/cloud-provider-kubevirt/pull/336
|
|
||||||
ADD patches /patches
|
ADD patches /patches
|
||||||
RUN git apply /patches/*.diff
|
RUN git apply /patches/*.diff
|
||||||
RUN go get 'k8s.io/endpointslice/util@v0.28' 'k8s.io/apiserver@v0.28'
|
RUN go get 'k8s.io/endpointslice/util@v0.28' 'k8s.io/apiserver@v0.28'
|
||||||
|
|||||||
@@ -1,20 +0,0 @@
|
|||||||
diff --git a/pkg/controller/kubevirteps/kubevirteps_controller.go b/pkg/controller/kubevirteps/kubevirteps_controller.go
|
|
||||||
index a3c1aa33..95c31438 100644
|
|
||||||
--- a/pkg/controller/kubevirteps/kubevirteps_controller.go
|
|
||||||
+++ b/pkg/controller/kubevirteps/kubevirteps_controller.go
|
|
||||||
@@ -412,11 +412,11 @@ func (c *Controller) reconcileByAddressType(service *v1.Service, tenantSlices []
|
|
||||||
// Create the desired port configuration
|
|
||||||
var desiredPorts []discovery.EndpointPort
|
|
||||||
|
|
||||||
- for _, port := range service.Spec.Ports {
|
|
||||||
+ for i := range service.Spec.Ports {
|
|
||||||
desiredPorts = append(desiredPorts, discovery.EndpointPort{
|
|
||||||
- Port: &port.TargetPort.IntVal,
|
|
||||||
- Protocol: &port.Protocol,
|
|
||||||
- Name: &port.Name,
|
|
||||||
+ Port: &service.Spec.Ports[i].TargetPort.IntVal,
|
|
||||||
+ Protocol: &service.Spec.Ports[i].Protocol,
|
|
||||||
+ Name: &service.Spec.Ports[i].Name,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -1,129 +0,0 @@
|
|||||||
diff --git a/pkg/controller/kubevirteps/kubevirteps_controller.go b/pkg/controller/kubevirteps/kubevirteps_controller.go
|
|
||||||
index a3c1aa33..6f6e3d32 100644
|
|
||||||
--- a/pkg/controller/kubevirteps/kubevirteps_controller.go
|
|
||||||
+++ b/pkg/controller/kubevirteps/kubevirteps_controller.go
|
|
||||||
@@ -108,32 +108,24 @@ func newRequest(reqType ReqType, obj interface{}, oldObj interface{}) *Request {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Controller) Init() error {
|
|
||||||
-
|
|
||||||
- // Act on events from Services on the infra cluster. These are created by the EnsureLoadBalancer function.
|
|
||||||
- // We need to watch for these events so that we can update the EndpointSlices in the infra cluster accordingly.
|
|
||||||
+ // Existing Service event handlers...
|
|
||||||
_, err := c.infraFactory.Core().V1().Services().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
|
||||||
AddFunc: func(obj interface{}) {
|
|
||||||
- // cast obj to Service
|
|
||||||
svc := obj.(*v1.Service)
|
|
||||||
- // Only act on Services of type LoadBalancer
|
|
||||||
if svc.Spec.Type == v1.ServiceTypeLoadBalancer {
|
|
||||||
klog.Infof("Service added: %v/%v", svc.Namespace, svc.Name)
|
|
||||||
c.queue.Add(newRequest(AddReq, obj, nil))
|
|
||||||
}
|
|
||||||
},
|
|
||||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
|
||||||
- // cast obj to Service
|
|
||||||
newSvc := newObj.(*v1.Service)
|
|
||||||
- // Only act on Services of type LoadBalancer
|
|
||||||
if newSvc.Spec.Type == v1.ServiceTypeLoadBalancer {
|
|
||||||
klog.Infof("Service updated: %v/%v", newSvc.Namespace, newSvc.Name)
|
|
||||||
c.queue.Add(newRequest(UpdateReq, newObj, oldObj))
|
|
||||||
}
|
|
||||||
},
|
|
||||||
DeleteFunc: func(obj interface{}) {
|
|
||||||
- // cast obj to Service
|
|
||||||
svc := obj.(*v1.Service)
|
|
||||||
- // Only act on Services of type LoadBalancer
|
|
||||||
if svc.Spec.Type == v1.ServiceTypeLoadBalancer {
|
|
||||||
klog.Infof("Service deleted: %v/%v", svc.Namespace, svc.Name)
|
|
||||||
c.queue.Add(newRequest(DeleteReq, obj, nil))
|
|
||||||
@@ -144,7 +136,7 @@ func (c *Controller) Init() error {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
- // Monitor endpoint slices that we are interested in based on known services in the infra cluster
|
|
||||||
+ // Existing EndpointSlice event handlers in tenant cluster...
|
|
||||||
_, err = c.tenantFactory.Discovery().V1().EndpointSlices().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
|
||||||
AddFunc: func(obj interface{}) {
|
|
||||||
eps := obj.(*discovery.EndpointSlice)
|
|
||||||
@@ -194,10 +186,80 @@ func (c *Controller) Init() error {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
- //TODO: Add informer for EndpointSlices in the infra cluster to watch for (unwanted) changes
|
|
||||||
+ // Add an informer for EndpointSlices in the infra cluster
|
|
||||||
+ _, err = c.infraFactory.Discovery().V1().EndpointSlices().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
|
||||||
+ AddFunc: func(obj interface{}) {
|
|
||||||
+ eps := obj.(*discovery.EndpointSlice)
|
|
||||||
+ if c.managedByController(eps) {
|
|
||||||
+ svc, svcErr := c.getInfraServiceForEPS(context.TODO(), eps)
|
|
||||||
+ if svcErr != nil {
|
|
||||||
+ klog.Errorf("Failed to get infra Service for EndpointSlice %s/%s: %v", eps.Namespace, eps.Name, svcErr)
|
|
||||||
+ return
|
|
||||||
+ }
|
|
||||||
+ if svc != nil {
|
|
||||||
+ klog.Infof("Infra EndpointSlice added: %v/%v, requeuing Service: %v/%v", eps.Namespace, eps.Name, svc.Namespace, svc.Name)
|
|
||||||
+ c.queue.Add(newRequest(AddReq, svc, nil))
|
|
||||||
+ }
|
|
||||||
+ }
|
|
||||||
+ },
|
|
||||||
+ UpdateFunc: func(oldObj, newObj interface{}) {
|
|
||||||
+ eps := newObj.(*discovery.EndpointSlice)
|
|
||||||
+ if c.managedByController(eps) {
|
|
||||||
+ svc, svcErr := c.getInfraServiceForEPS(context.TODO(), eps)
|
|
||||||
+ if svcErr != nil {
|
|
||||||
+ klog.Errorf("Failed to get infra Service for EndpointSlice %s/%s: %v", eps.Namespace, eps.Name, svcErr)
|
|
||||||
+ return
|
|
||||||
+ }
|
|
||||||
+ if svc != nil {
|
|
||||||
+ klog.Infof("Infra EndpointSlice updated: %v/%v, requeuing Service: %v/%v", eps.Namespace, eps.Name, svc.Namespace, svc.Name)
|
|
||||||
+ c.queue.Add(newRequest(UpdateReq, svc, nil))
|
|
||||||
+ }
|
|
||||||
+ }
|
|
||||||
+ },
|
|
||||||
+ DeleteFunc: func(obj interface{}) {
|
|
||||||
+ eps := obj.(*discovery.EndpointSlice)
|
|
||||||
+ if c.managedByController(eps) {
|
|
||||||
+ svc, svcErr := c.getInfraServiceForEPS(context.TODO(), eps)
|
|
||||||
+ if svcErr != nil {
|
|
||||||
+ klog.Errorf("Failed to get infra Service for EndpointSlice %s/%s on delete: %v", eps.Namespace, eps.Name, svcErr)
|
|
||||||
+ return
|
|
||||||
+ }
|
|
||||||
+ if svc != nil {
|
|
||||||
+ klog.Infof("Infra EndpointSlice deleted: %v/%v, requeuing Service: %v/%v", eps.Namespace, eps.Name, svc.Namespace, svc.Name)
|
|
||||||
+ c.queue.Add(newRequest(DeleteReq, svc, nil))
|
|
||||||
+ }
|
|
||||||
+ }
|
|
||||||
+ },
|
|
||||||
+ })
|
|
||||||
+ if err != nil {
|
|
||||||
+ return err
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
+// getInfraServiceForEPS returns the Service in the infra cluster associated with the given EndpointSlice.
|
|
||||||
+// It does this by reading the "kubernetes.io/service-name" label from the EndpointSlice, which should correspond
|
|
||||||
+// to the Service name. If not found or if the Service doesn't exist, it returns nil.
|
|
||||||
+func (c *Controller) getInfraServiceForEPS(ctx context.Context, eps *discovery.EndpointSlice) (*v1.Service, error) {
|
|
||||||
+ svcName := eps.Labels[discovery.LabelServiceName]
|
|
||||||
+ if svcName == "" {
|
|
||||||
+ // No service name label found, can't determine infra service.
|
|
||||||
+ return nil, nil
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
+ svc, err := c.infraClient.CoreV1().Services(c.infraNamespace).Get(ctx, svcName, metav1.GetOptions{})
|
|
||||||
+ if err != nil {
|
|
||||||
+ if k8serrors.IsNotFound(err) {
|
|
||||||
+ // Service doesn't exist
|
|
||||||
+ return nil, nil
|
|
||||||
+ }
|
|
||||||
+ return nil, err
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
+ return svc, nil
|
|
||||||
+}
|
|
||||||
+
|
|
||||||
// Run starts an asynchronous loop that monitors and updates GKENetworkParamSet in the cluster.
|
|
||||||
func (c *Controller) Run(numWorkers int, stopCh <-chan struct{}, controllerManagerMetrics *controllersmetrics.ControllerManagerMetrics) {
|
|
||||||
defer utilruntime.HandleCrash()
|
|
||||||
@@ -0,0 +1,689 @@
|
|||||||
|
diff --git a/.golangci.yml b/.golangci.yml
|
||||||
|
index cf72a41a2..1c9237e83 100644
|
||||||
|
--- a/.golangci.yml
|
||||||
|
+++ b/.golangci.yml
|
||||||
|
@@ -122,3 +122,9 @@ linters:
|
||||||
|
# - testpackage
|
||||||
|
# - revive
|
||||||
|
# - wsl
|
||||||
|
+issues:
|
||||||
|
+ exclude-rules:
|
||||||
|
+ - filename: "kubevirteps_controller_test.go"
|
||||||
|
+ linters:
|
||||||
|
+ - govet
|
||||||
|
+ text: "declaration of \"err\" shadows"
|
||||||
|
diff --git a/cmd/kubevirt-cloud-controller-manager/kubevirteps.go b/cmd/kubevirt-cloud-controller-manager/kubevirteps.go
|
||||||
|
index 74166b5d9..4e744f8de 100644
|
||||||
|
--- a/cmd/kubevirt-cloud-controller-manager/kubevirteps.go
|
||||||
|
+++ b/cmd/kubevirt-cloud-controller-manager/kubevirteps.go
|
||||||
|
@@ -101,7 +101,18 @@ func startKubevirtCloudController(
|
||||||
|
|
||||||
|
klog.Infof("Setting up kubevirtEPSController")
|
||||||
|
|
||||||
|
- kubevirtEPSController := kubevirteps.NewKubevirtEPSController(tenantClient, infraClient, infraDynamic, kubevirtCloud.Namespace())
|
||||||
|
+ clusterName := ccmConfig.ComponentConfig.KubeCloudShared.ClusterName
|
||||||
|
+ if clusterName == "" {
|
||||||
|
+ klog.Fatalf("Required flag --cluster-name is missing")
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ kubevirtEPSController := kubevirteps.NewKubevirtEPSController(
|
||||||
|
+ tenantClient,
|
||||||
|
+ infraClient,
|
||||||
|
+ infraDynamic,
|
||||||
|
+ kubevirtCloud.Namespace(),
|
||||||
|
+ clusterName,
|
||||||
|
+ )
|
||||||
|
|
||||||
|
klog.Infof("Initializing kubevirtEPSController")
|
||||||
|
|
||||||
|
diff --git a/pkg/controller/kubevirteps/kubevirteps_controller.go b/pkg/controller/kubevirteps/kubevirteps_controller.go
|
||||||
|
index 6f6e3d322..b56882c12 100644
|
||||||
|
--- a/pkg/controller/kubevirteps/kubevirteps_controller.go
|
||||||
|
+++ b/pkg/controller/kubevirteps/kubevirteps_controller.go
|
||||||
|
@@ -54,10 +54,10 @@ type Controller struct {
|
||||||
|
infraDynamic dynamic.Interface
|
||||||
|
infraFactory informers.SharedInformerFactory
|
||||||
|
|
||||||
|
- infraNamespace string
|
||||||
|
- queue workqueue.RateLimitingInterface
|
||||||
|
- maxRetries int
|
||||||
|
-
|
||||||
|
+ infraNamespace string
|
||||||
|
+ clusterName string
|
||||||
|
+ queue workqueue.RateLimitingInterface
|
||||||
|
+ maxRetries int
|
||||||
|
maxEndPointsPerSlice int
|
||||||
|
}
|
||||||
|
|
||||||
|
@@ -65,8 +65,9 @@ func NewKubevirtEPSController(
|
||||||
|
tenantClient kubernetes.Interface,
|
||||||
|
infraClient kubernetes.Interface,
|
||||||
|
infraDynamic dynamic.Interface,
|
||||||
|
- infraNamespace string) *Controller {
|
||||||
|
-
|
||||||
|
+ infraNamespace string,
|
||||||
|
+ clusterName string,
|
||||||
|
+) *Controller {
|
||||||
|
tenantFactory := informers.NewSharedInformerFactory(tenantClient, 0)
|
||||||
|
infraFactory := informers.NewSharedInformerFactoryWithOptions(infraClient, 0, informers.WithNamespace(infraNamespace))
|
||||||
|
queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
|
||||||
|
@@ -79,6 +80,7 @@ func NewKubevirtEPSController(
|
||||||
|
infraDynamic: infraDynamic,
|
||||||
|
infraFactory: infraFactory,
|
||||||
|
infraNamespace: infraNamespace,
|
||||||
|
+ clusterName: clusterName,
|
||||||
|
queue: queue,
|
||||||
|
maxRetries: 25,
|
||||||
|
maxEndPointsPerSlice: 100,
|
||||||
|
@@ -320,22 +322,30 @@ func (c *Controller) processNextItem(ctx context.Context) bool {
|
||||||
|
|
||||||
|
// getInfraServiceFromTenantEPS returns the Service in the infra cluster that is associated with the given tenant endpoint slice.
|
||||||
|
func (c *Controller) getInfraServiceFromTenantEPS(ctx context.Context, slice *discovery.EndpointSlice) (*v1.Service, error) {
|
||||||
|
- infraServices, err := c.infraClient.CoreV1().Services(c.infraNamespace).List(ctx,
|
||||||
|
- metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s,%s=%s", kubevirt.TenantServiceNameLabelKey, slice.Labels["kubernetes.io/service-name"],
|
||||||
|
- kubevirt.TenantServiceNamespaceLabelKey, slice.Namespace)})
|
||||||
|
+ tenantServiceName := slice.Labels[discovery.LabelServiceName]
|
||||||
|
+ tenantServiceNamespace := slice.Namespace
|
||||||
|
+
|
||||||
|
+ labelSelector := fmt.Sprintf(
|
||||||
|
+ "%s=%s,%s=%s,%s=%s",
|
||||||
|
+ kubevirt.TenantServiceNameLabelKey, tenantServiceName,
|
||||||
|
+ kubevirt.TenantServiceNamespaceLabelKey, tenantServiceNamespace,
|
||||||
|
+ kubevirt.TenantClusterNameLabelKey, c.clusterName,
|
||||||
|
+ )
|
||||||
|
+
|
||||||
|
+ svcList, err := c.infraClient.CoreV1().Services(c.infraNamespace).List(ctx, metav1.ListOptions{
|
||||||
|
+ LabelSelector: labelSelector,
|
||||||
|
+ })
|
||||||
|
if err != nil {
|
||||||
|
- klog.Errorf("Failed to get Service in Infra for EndpointSlice %s in namespace %s: %v", slice.Name, slice.Namespace, err)
|
||||||
|
+ klog.Errorf("Failed to get Service in Infra for EndpointSlice %s in namespace %s: %v", slice.Name, tenantServiceNamespace, err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
- if len(infraServices.Items) > 1 {
|
||||||
|
- // This should never be possible, only one service should exist for a given tenant endpoint slice
|
||||||
|
- klog.Errorf("Multiple services found for tenant endpoint slice %s in namespace %s", slice.Name, slice.Namespace)
|
||||||
|
+ if len(svcList.Items) > 1 {
|
||||||
|
+ klog.Errorf("Multiple services found for tenant endpoint slice %s in namespace %s", slice.Name, tenantServiceNamespace)
|
||||||
|
return nil, errors.New("multiple services found for tenant endpoint slice")
|
||||||
|
}
|
||||||
|
- if len(infraServices.Items) == 1 {
|
||||||
|
- return &infraServices.Items[0], nil
|
||||||
|
+ if len(svcList.Items) == 1 {
|
||||||
|
+ return &svcList.Items[0], nil
|
||||||
|
}
|
||||||
|
- // No service found, possible if service is deleted.
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
@@ -363,16 +373,27 @@ func (c *Controller) getTenantEPSFromInfraService(ctx context.Context, svc *v1.S
|
||||||
|
// getInfraEPSFromInfraService returns the EndpointSlices in the infra cluster that are associated with the given infra service.
|
||||||
|
func (c *Controller) getInfraEPSFromInfraService(ctx context.Context, svc *v1.Service) ([]*discovery.EndpointSlice, error) {
|
||||||
|
var infraEPSSlices []*discovery.EndpointSlice
|
||||||
|
- klog.Infof("Searching for endpoints on infra cluster for service %s in namespace %s.", svc.Name, svc.Namespace)
|
||||||
|
- result, err := c.infraClient.DiscoveryV1().EndpointSlices(svc.Namespace).List(ctx,
|
||||||
|
- metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", discovery.LabelServiceName, svc.Name)})
|
||||||
|
+
|
||||||
|
+ klog.Infof("Searching for EndpointSlices in infra cluster for service %s/%s", svc.Namespace, svc.Name)
|
||||||
|
+
|
||||||
|
+ labelSelector := fmt.Sprintf(
|
||||||
|
+ "%s=%s,%s=%s",
|
||||||
|
+ discovery.LabelServiceName, svc.Name,
|
||||||
|
+ kubevirt.TenantClusterNameLabelKey, c.clusterName,
|
||||||
|
+ )
|
||||||
|
+
|
||||||
|
+ result, err := c.infraClient.DiscoveryV1().EndpointSlices(svc.Namespace).List(ctx, metav1.ListOptions{
|
||||||
|
+ LabelSelector: labelSelector,
|
||||||
|
+ })
|
||||||
|
if err != nil {
|
||||||
|
klog.Errorf("Failed to get EndpointSlices for Service %s in namespace %s: %v", svc.Name, svc.Namespace, err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
+
|
||||||
|
for _, eps := range result.Items {
|
||||||
|
infraEPSSlices = append(infraEPSSlices, &eps)
|
||||||
|
}
|
||||||
|
+
|
||||||
|
return infraEPSSlices, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
@@ -382,74 +403,117 @@ func (c *Controller) reconcile(ctx context.Context, r *Request) error {
|
||||||
|
return errors.New("could not cast object to service")
|
||||||
|
}
|
||||||
|
|
||||||
|
+ // Skip services not managed by this controller (missing required labels)
|
||||||
|
if service.Labels[kubevirt.TenantServiceNameLabelKey] == "" ||
|
||||||
|
service.Labels[kubevirt.TenantServiceNamespaceLabelKey] == "" ||
|
||||||
|
service.Labels[kubevirt.TenantClusterNameLabelKey] == "" {
|
||||||
|
- klog.Infof("This LoadBalancer Service: %s is not managed by the %s. Skipping.", service.Name, ControllerName)
|
||||||
|
+ klog.Infof("Service %s is not managed by this controller. Skipping.", service.Name)
|
||||||
|
+ return nil
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ // Skip services for other clusters
|
||||||
|
+ if service.Labels[kubevirt.TenantClusterNameLabelKey] != c.clusterName {
|
||||||
|
+ klog.Infof("Skipping Service %s: cluster label %q doesn't match our clusterName %q", service.Name, service.Labels[kubevirt.TenantClusterNameLabelKey], c.clusterName)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
+
|
||||||
|
klog.Infof("Reconciling: %v", service.Name)
|
||||||
|
|
||||||
|
+ /*
|
||||||
|
+ 1) Check if Service in the infra cluster is actually present.
|
||||||
|
+ If it's not found, mark it as 'deleted' so that we don't create new slices.
|
||||||
|
+ */
|
||||||
|
serviceDeleted := false
|
||||||
|
- svc, err := c.infraFactory.Core().V1().Services().Lister().Services(c.infraNamespace).Get(service.Name)
|
||||||
|
+ infraSvc, err := c.infraFactory.Core().V1().Services().Lister().Services(c.infraNamespace).Get(service.Name)
|
||||||
|
if err != nil {
|
||||||
|
- klog.Infof("Service %s in namespace %s is deleted.", service.Name, service.Namespace)
|
||||||
|
+ // The Service is not present in the infra lister => treat as deleted
|
||||||
|
+ klog.Infof("Service %s in namespace %s is deleted (or not found).", service.Name, service.Namespace)
|
||||||
|
serviceDeleted = true
|
||||||
|
} else {
|
||||||
|
- service = svc
|
||||||
|
+ // Use the actual object from the lister, so we have the latest state
|
||||||
|
+ service = infraSvc
|
||||||
|
}
|
||||||
|
|
||||||
|
+ /*
|
||||||
|
+ 2) Get all existing EndpointSlices in the infra cluster that belong to this LB Service.
|
||||||
|
+ We'll decide which of them should be updated or deleted.
|
||||||
|
+ */
|
||||||
|
infraExistingEpSlices, err := c.getInfraEPSFromInfraService(ctx, service)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
- // At this point we have the current state of the 3 main objects we are interested in:
|
||||||
|
- // 1. The Service in the infra cluster, the one created by the KubevirtCloudController.
|
||||||
|
- // 2. The EndpointSlices in the tenant cluster, created for the tenant cluster's Service.
|
||||||
|
- // 3. The EndpointSlices in the infra cluster, managed by this controller.
|
||||||
|
-
|
||||||
|
slicesToDelete := []*discovery.EndpointSlice{}
|
||||||
|
slicesByAddressType := make(map[discovery.AddressType][]*discovery.EndpointSlice)
|
||||||
|
|
||||||
|
+ // For example, if the service is single-stack IPv4 => only AddressTypeIPv4
|
||||||
|
+ // or if dual-stack => IPv4 and IPv6, etc.
|
||||||
|
serviceSupportedAddressesTypes := getAddressTypesForService(service)
|
||||||
|
- // If the services switched to a different address type, we need to delete the old ones, because it's immutable.
|
||||||
|
- // If the services switched to a different externalTrafficPolicy, we need to delete the old ones.
|
||||||
|
+
|
||||||
|
+ /*
|
||||||
|
+ 3) Determine which slices to delete, and which to pass on to the normal
|
||||||
|
+ "reconcileByAddressType" logic.
|
||||||
|
+
|
||||||
|
+ - If 'serviceDeleted' is true OR service.Spec.Selector != nil, we remove them.
|
||||||
|
+ - Also, if the slice's address type is unsupported by the Service, we remove it.
|
||||||
|
+ */
|
||||||
|
for _, eps := range infraExistingEpSlices {
|
||||||
|
- if service.Spec.Selector != nil || serviceDeleted {
|
||||||
|
- klog.Infof("Added for deletion EndpointSlice %s in namespace %s because it has a selector", eps.Name, eps.Namespace)
|
||||||
|
- // to be sure we don't delete any slice that is not managed by us
|
||||||
|
+ // If service is deleted or has a non-nil selector => remove slices
|
||||||
|
+ if serviceDeleted || service.Spec.Selector != nil {
|
||||||
|
+ /*
|
||||||
|
+ Only remove if it is clearly labeled as managed by us:
|
||||||
|
+ we do not want to accidentally remove slices that are not
|
||||||
|
+ created by this controller.
|
||||||
|
+ */
|
||||||
|
if c.managedByController(eps) {
|
||||||
|
+ klog.Infof("Added for deletion EndpointSlice %s in namespace %s because service is deleted or has a selector",
|
||||||
|
+ eps.Name, eps.Namespace)
|
||||||
|
slicesToDelete = append(slicesToDelete, eps)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
+
|
||||||
|
+ // If the Service does not support this slice's AddressType => remove
|
||||||
|
if !serviceSupportedAddressesTypes.Has(eps.AddressType) {
|
||||||
|
- klog.Infof("Added for deletion EndpointSlice %s in namespace %s because it has an unsupported address type: %v", eps.Name, eps.Namespace, eps.AddressType)
|
||||||
|
+ klog.Infof("Added for deletion EndpointSlice %s in namespace %s because it has an unsupported address type: %v",
|
||||||
|
+ eps.Name, eps.Namespace, eps.AddressType)
|
||||||
|
slicesToDelete = append(slicesToDelete, eps)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
+
|
||||||
|
+ /*
|
||||||
|
+ Otherwise, this slice is potentially still valid for the given AddressType,
|
||||||
|
+ we'll send it to reconcileByAddressType for final merging and updates.
|
||||||
|
+ */
|
||||||
|
slicesByAddressType[eps.AddressType] = append(slicesByAddressType[eps.AddressType], eps)
|
||||||
|
}
|
||||||
|
|
||||||
|
- if !serviceDeleted {
|
||||||
|
- // Get tenant's endpoint slices for this service
|
||||||
|
+ /*
|
||||||
|
+ 4) If the Service was NOT deleted and has NO selector (i.e., it's a "no-selector" LB Service),
|
||||||
|
+ we proceed to handle creation and updates. That means:
|
||||||
|
+ - Gather Tenant's EndpointSlices
|
||||||
|
+ - Reconcile them by each AddressType
|
||||||
|
+ */
|
||||||
|
+ if !serviceDeleted && service.Spec.Selector == nil {
|
||||||
|
tenantEpSlices, err := c.getTenantEPSFromInfraService(ctx, service)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
- // Reconcile the EndpointSlices for each address type e.g. ipv4, ipv6
|
||||||
|
+ // For each addressType (ipv4, ipv6, etc.) reconcile the infra slices
|
||||||
|
for addressType := range serviceSupportedAddressesTypes {
|
||||||
|
existingSlices := slicesByAddressType[addressType]
|
||||||
|
- err := c.reconcileByAddressType(service, tenantEpSlices, existingSlices, addressType)
|
||||||
|
- if err != nil {
|
||||||
|
+ if err := c.reconcileByAddressType(service, tenantEpSlices, existingSlices, addressType); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
- // Delete the EndpointSlices that are no longer needed
|
||||||
|
+ /*
|
||||||
|
+ 5) Perform the actual deletion of all slices we flagged.
|
||||||
|
+ In many cases (serviceDeleted or .Spec.Selector != nil),
|
||||||
|
+ we end up with only "delete" actions and no new slice creation.
|
||||||
|
+ */
|
||||||
|
for _, eps := range slicesToDelete {
|
||||||
|
err := c.infraClient.DiscoveryV1().EndpointSlices(eps.Namespace).Delete(context.TODO(), eps.Name, metav1.DeleteOptions{})
|
||||||
|
if err != nil {
|
||||||
|
@@ -474,11 +538,11 @@ func (c *Controller) reconcileByAddressType(service *v1.Service, tenantSlices []
|
||||||
|
// Create the desired port configuration
|
||||||
|
var desiredPorts []discovery.EndpointPort
|
||||||
|
|
||||||
|
- for _, port := range service.Spec.Ports {
|
||||||
|
+ for i := range service.Spec.Ports {
|
||||||
|
desiredPorts = append(desiredPorts, discovery.EndpointPort{
|
||||||
|
- Port: &port.TargetPort.IntVal,
|
||||||
|
- Protocol: &port.Protocol,
|
||||||
|
- Name: &port.Name,
|
||||||
|
+ Port: &service.Spec.Ports[i].TargetPort.IntVal,
|
||||||
|
+ Protocol: &service.Spec.Ports[i].Protocol,
|
||||||
|
+ Name: &service.Spec.Ports[i].Name,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
@@ -588,55 +652,114 @@ func ownedBy(endpointSlice *discovery.EndpointSlice, svc *v1.Service) bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
-func (c *Controller) finalize(service *v1.Service, slicesToCreate []*discovery.EndpointSlice, slicesToUpdate []*discovery.EndpointSlice, slicesToDelete []*discovery.EndpointSlice) error {
|
||||||
|
- // If there are slices to delete and slices to create, make them as update
|
||||||
|
- for i := 0; i < len(slicesToDelete); {
|
||||||
|
+func (c *Controller) finalize(
|
||||||
|
+ service *v1.Service,
|
||||||
|
+ slicesToCreate []*discovery.EndpointSlice,
|
||||||
|
+ slicesToUpdate []*discovery.EndpointSlice,
|
||||||
|
+ slicesToDelete []*discovery.EndpointSlice,
|
||||||
|
+) error {
|
||||||
|
+ /*
|
||||||
|
+ We try to turn a "delete + create" pair into a single "update" operation
|
||||||
|
+ if the original slice (slicesToDelete[i]) has the same address type as
|
||||||
|
+ the first slice in slicesToCreate, and is owned by the same Service.
|
||||||
|
+
|
||||||
|
+ However, we must re-check the lengths of slicesToDelete and slicesToCreate
|
||||||
|
+ within the loop to avoid an out-of-bounds index in slicesToCreate.
|
||||||
|
+ */
|
||||||
|
+
|
||||||
|
+ i := 0
|
||||||
|
+ for i < len(slicesToDelete) {
|
||||||
|
+ // If there is nothing to create, break early
|
||||||
|
if len(slicesToCreate) == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
- if slicesToDelete[i].AddressType == slicesToCreate[0].AddressType && ownedBy(slicesToDelete[i], service) {
|
||||||
|
- slicesToCreate[0].Name = slicesToDelete[i].Name
|
||||||
|
+
|
||||||
|
+ sd := slicesToDelete[i]
|
||||||
|
+ sc := slicesToCreate[0] // We can safely do this now, because len(slicesToCreate) > 0
|
||||||
|
+
|
||||||
|
+ // If the address type matches, and the slice is owned by the same Service,
|
||||||
|
+ // then instead of deleting sd and creating sc, we'll transform it into an update:
|
||||||
|
+ // we rename sc with sd's name, remove sd from the delete list, remove sc from the create list,
|
||||||
|
+ // and add sc to the update list.
|
||||||
|
+ if sd.AddressType == sc.AddressType && ownedBy(sd, service) {
|
||||||
|
+ sliceToUpdate := sc
|
||||||
|
+ sliceToUpdate.Name = sd.Name
|
||||||
|
+
|
||||||
|
+ // Remove the first element from slicesToCreate
|
||||||
|
slicesToCreate = slicesToCreate[1:]
|
||||||
|
- slicesToUpdate = append(slicesToUpdate, slicesToCreate[0])
|
||||||
|
+
|
||||||
|
+ // Remove the slice from slicesToDelete
|
||||||
|
slicesToDelete = append(slicesToDelete[:i], slicesToDelete[i+1:]...)
|
||||||
|
+
|
||||||
|
+ // Now add the renamed slice to the list of slices we want to update
|
||||||
|
+ slicesToUpdate = append(slicesToUpdate, sliceToUpdate)
|
||||||
|
+
|
||||||
|
+ /*
|
||||||
|
+ Do not increment i here, because we've just removed an element from
|
||||||
|
+ slicesToDelete. The next slice to examine is now at the same index i.
|
||||||
|
+ */
|
||||||
|
} else {
|
||||||
|
+ // If they don't match, move on to the next slice in slicesToDelete.
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
- // Create the new slices if service is not marked for deletion
|
||||||
|
+ /*
|
||||||
|
+ If the Service is not being deleted, create all remaining slices in slicesToCreate.
|
||||||
|
+ (If the Service has a DeletionTimestamp, it means it is going away, so we do not
|
||||||
|
+ want to create new EndpointSlices.)
|
||||||
|
+ */
|
||||||
|
if service.DeletionTimestamp == nil {
|
||||||
|
for _, slice := range slicesToCreate {
|
||||||
|
- createdSlice, err := c.infraClient.DiscoveryV1().EndpointSlices(slice.Namespace).Create(context.TODO(), slice, metav1.CreateOptions{})
|
||||||
|
+ createdSlice, err := c.infraClient.DiscoveryV1().EndpointSlices(slice.Namespace).Create(
|
||||||
|
+ context.TODO(),
|
||||||
|
+ slice,
|
||||||
|
+ metav1.CreateOptions{},
|
||||||
|
+ )
|
||||||
|
if err != nil {
|
||||||
|
- klog.Errorf("Failed to create EndpointSlice %s in namespace %s: %v", slice.Name, slice.Namespace, err)
|
||||||
|
+ klog.Errorf("Failed to create EndpointSlice %s in namespace %s: %v",
|
||||||
|
+ slice.Name, slice.Namespace, err)
|
||||||
|
+ // If the namespace is terminating, it's safe to ignore the error.
|
||||||
|
if k8serrors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
|
||||||
|
- return nil
|
||||||
|
+ continue
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
- klog.Infof("Created EndpointSlice %s in namespace %s", createdSlice.Name, createdSlice.Namespace)
|
||||||
|
+ klog.Infof("Created EndpointSlice %s in namespace %s",
|
||||||
|
+ createdSlice.Name, createdSlice.Namespace)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
- // Update slices
|
||||||
|
+ // Update slices that are in the slicesToUpdate list.
|
||||||
|
for _, slice := range slicesToUpdate {
|
||||||
|
- _, err := c.infraClient.DiscoveryV1().EndpointSlices(slice.Namespace).Update(context.TODO(), slice, metav1.UpdateOptions{})
|
||||||
|
+ _, err := c.infraClient.DiscoveryV1().EndpointSlices(slice.Namespace).Update(
|
||||||
|
+ context.TODO(),
|
||||||
|
+ slice,
|
||||||
|
+ metav1.UpdateOptions{},
|
||||||
|
+ )
|
||||||
|
if err != nil {
|
||||||
|
- klog.Errorf("Failed to update EndpointSlice %s in namespace %s: %v", slice.Name, slice.Namespace, err)
|
||||||
|
+ klog.Errorf("Failed to update EndpointSlice %s in namespace %s: %v",
|
||||||
|
+ slice.Name, slice.Namespace, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
- klog.Infof("Updated EndpointSlice %s in namespace %s", slice.Name, slice.Namespace)
|
||||||
|
+ klog.Infof("Updated EndpointSlice %s in namespace %s",
|
||||||
|
+ slice.Name, slice.Namespace)
|
||||||
|
}
|
||||||
|
|
||||||
|
- // Delete slices
|
||||||
|
+ // Finally, delete slices that are in slicesToDelete and are no longer needed.
|
||||||
|
for _, slice := range slicesToDelete {
|
||||||
|
- err := c.infraClient.DiscoveryV1().EndpointSlices(slice.Namespace).Delete(context.TODO(), slice.Name, metav1.DeleteOptions{})
|
||||||
|
+ err := c.infraClient.DiscoveryV1().EndpointSlices(slice.Namespace).Delete(
|
||||||
|
+ context.TODO(),
|
||||||
|
+ slice.Name,
|
||||||
|
+ metav1.DeleteOptions{},
|
||||||
|
+ )
|
||||||
|
if err != nil {
|
||||||
|
- klog.Errorf("Failed to delete EndpointSlice %s in namespace %s: %v", slice.Name, slice.Namespace, err)
|
||||||
|
+ klog.Errorf("Failed to delete EndpointSlice %s in namespace %s: %v",
|
||||||
|
+ slice.Name, slice.Namespace, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
- klog.Infof("Deleted EndpointSlice %s in namespace %s", slice.Name, slice.Namespace)
|
||||||
|
+ klog.Infof("Deleted EndpointSlice %s in namespace %s",
|
||||||
|
+ slice.Name, slice.Namespace)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
diff --git a/pkg/controller/kubevirteps/kubevirteps_controller_test.go b/pkg/controller/kubevirteps/kubevirteps_controller_test.go
|
||||||
|
index 1fb86e25f..14d92d340 100644
|
||||||
|
--- a/pkg/controller/kubevirteps/kubevirteps_controller_test.go
|
||||||
|
+++ b/pkg/controller/kubevirteps/kubevirteps_controller_test.go
|
||||||
|
@@ -13,6 +13,7 @@ import (
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
|
+ "k8s.io/apimachinery/pkg/util/sets"
|
||||||
|
dfake "k8s.io/client-go/dynamic/fake"
|
||||||
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
|
"k8s.io/client-go/testing"
|
||||||
|
@@ -189,7 +190,7 @@ func setupTestKubevirtEPSController() *testKubevirtEPSController {
|
||||||
|
}: "VirtualMachineInstanceList",
|
||||||
|
})
|
||||||
|
|
||||||
|
- controller := NewKubevirtEPSController(tenantClient, infraClient, infraDynamic, "test")
|
||||||
|
+ controller := NewKubevirtEPSController(tenantClient, infraClient, infraDynamic, "test", "test-cluster")
|
||||||
|
|
||||||
|
err := controller.Init()
|
||||||
|
if err != nil {
|
||||||
|
@@ -686,5 +687,229 @@ var _ = g.Describe("KubevirtEPSController", g.Ordered, func() {
|
||||||
|
return false, err
|
||||||
|
}).Should(BeTrue(), "EndpointSlice in infra cluster should be recreated by the controller after deletion")
|
||||||
|
})
|
||||||
|
+
|
||||||
|
+ g.It("Should correctly handle multiple unique ports in EndpointSlice", func() {
|
||||||
|
+ // Create a VMI in the infra cluster
|
||||||
|
+ createAndAssertVMI("worker-0-test", "ip-10-32-5-13", "123.45.67.89")
|
||||||
|
+
|
||||||
|
+ // Create an EndpointSlice in the tenant cluster
|
||||||
|
+ createAndAssertTenantSlice("test-epslice", "tenant-service-name", discoveryv1.AddressTypeIPv4,
|
||||||
|
+ *createPort("http", 80, v1.ProtocolTCP),
|
||||||
|
+ []discoveryv1.Endpoint{*createEndpoint("123.45.67.89", "worker-0-test", true, true, false)})
|
||||||
|
+
|
||||||
|
+ // Define multiple ports for the Service
|
||||||
|
+ servicePorts := []v1.ServicePort{
|
||||||
|
+ {
|
||||||
|
+ Name: "client",
|
||||||
|
+ Protocol: v1.ProtocolTCP,
|
||||||
|
+ Port: 10001,
|
||||||
|
+ TargetPort: intstr.FromInt(30396),
|
||||||
|
+ NodePort: 30396,
|
||||||
|
+ },
|
||||||
|
+ {
|
||||||
|
+ Name: "dashboard",
|
||||||
|
+ Protocol: v1.ProtocolTCP,
|
||||||
|
+ Port: 8265,
|
||||||
|
+ TargetPort: intstr.FromInt(31003),
|
||||||
|
+ NodePort: 31003,
|
||||||
|
+ },
|
||||||
|
+ {
|
||||||
|
+ Name: "metrics",
|
||||||
|
+ Protocol: v1.ProtocolTCP,
|
||||||
|
+ Port: 8080,
|
||||||
|
+ TargetPort: intstr.FromInt(30452),
|
||||||
|
+ NodePort: 30452,
|
||||||
|
+ },
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ createAndAssertInfraServiceLB("infra-multiport-service", "tenant-service-name", "test-cluster",
|
||||||
|
+ servicePorts[0], v1.ServiceExternalTrafficPolicyLocal)
|
||||||
|
+
|
||||||
|
+ svc, err := testVals.infraClient.CoreV1().Services(infraNamespace).Get(context.TODO(), "infra-multiport-service", metav1.GetOptions{})
|
||||||
|
+ Expect(err).To(BeNil())
|
||||||
|
+
|
||||||
|
+ svc.Spec.Ports = servicePorts
|
||||||
|
+ _, err = testVals.infraClient.CoreV1().Services(infraNamespace).Update(context.TODO(), svc, metav1.UpdateOptions{})
|
||||||
|
+ Expect(err).To(BeNil())
|
||||||
|
+
|
||||||
|
+ var epsListMultiPort *discoveryv1.EndpointSliceList
|
||||||
|
+
|
||||||
|
+ Eventually(func() (bool, error) {
|
||||||
|
+ epsListMultiPort, err = testVals.infraClient.DiscoveryV1().EndpointSlices(infraNamespace).List(context.TODO(), metav1.ListOptions{})
|
||||||
|
+ if len(epsListMultiPort.Items) != 1 {
|
||||||
|
+ return false, err
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ createdSlice := epsListMultiPort.Items[0]
|
||||||
|
+ expectedPortNames := []string{"client", "dashboard", "metrics"}
|
||||||
|
+ foundPortNames := []string{}
|
||||||
|
+
|
||||||
|
+ for _, port := range createdSlice.Ports {
|
||||||
|
+ if port.Name != nil {
|
||||||
|
+ foundPortNames = append(foundPortNames, *port.Name)
|
||||||
|
+ }
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ if len(foundPortNames) != len(expectedPortNames) {
|
||||||
|
+ return false, err
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ portSet := sets.NewString(foundPortNames...)
|
||||||
|
+ expectedPortSet := sets.NewString(expectedPortNames...)
|
||||||
|
+ return portSet.Equal(expectedPortSet), err
|
||||||
|
+ }).Should(BeTrue(), "EndpointSlice should contain all unique ports from the Service without duplicates")
|
||||||
|
+ })
|
||||||
|
+
|
||||||
|
+ g.It("Should not panic when Service changes to have a non-nil selector, causing EndpointSlice deletion with no new slices to create", func() {
|
||||||
|
+ createAndAssertVMI("worker-0-test", "ip-10-32-5-13", "123.45.67.89")
|
||||||
|
+ createAndAssertTenantSlice("test-epslice", "tenant-service-name", discoveryv1.AddressTypeIPv4,
|
||||||
|
+ *createPort("http", 80, v1.ProtocolTCP),
|
||||||
|
+ []discoveryv1.Endpoint{*createEndpoint("123.45.67.89", "worker-0-test", true, true, false)})
|
||||||
|
+ createAndAssertInfraServiceLB("infra-service-no-selector", "tenant-service-name", "test-cluster",
|
||||||
|
+ v1.ServicePort{
|
||||||
|
+ Name: "web",
|
||||||
|
+ Port: 80,
|
||||||
|
+ NodePort: 31900,
|
||||||
|
+ Protocol: v1.ProtocolTCP,
|
||||||
|
+ TargetPort: intstr.IntOrString{IntVal: 30390},
|
||||||
|
+ },
|
||||||
|
+ v1.ServiceExternalTrafficPolicyLocal,
|
||||||
|
+ )
|
||||||
|
+
|
||||||
|
+ // Wait for the controller to create an EndpointSlice in the infra cluster.
|
||||||
|
+ var epsList *discoveryv1.EndpointSliceList
|
||||||
|
+ var err error
|
||||||
|
+ Eventually(func() (bool, error) {
|
||||||
|
+ epsList, err = testVals.infraClient.DiscoveryV1().EndpointSlices(infraNamespace).
|
||||||
|
+ List(context.TODO(), metav1.ListOptions{})
|
||||||
|
+ if err != nil {
|
||||||
|
+ return false, err
|
||||||
|
+ }
|
||||||
|
+ // Wait exactly 1 slice
|
||||||
|
+ if len(epsList.Items) == 1 {
|
||||||
|
+ return true, nil
|
||||||
|
+ }
|
||||||
|
+ return false, nil
|
||||||
|
+ }).Should(BeTrue(), "Controller should create an EndpointSlice in infra cluster for the LB service")
|
||||||
|
+
|
||||||
|
+ svcWithSelector, err := testVals.infraClient.CoreV1().Services(infraNamespace).
|
||||||
|
+ Get(context.TODO(), "infra-service-no-selector", metav1.GetOptions{})
|
||||||
|
+ Expect(err).To(BeNil())
|
||||||
|
+
|
||||||
|
+ // Let's set any selector to run the slice deletion logic
|
||||||
|
+ svcWithSelector.Spec.Selector = map[string]string{"test": "selector-added"}
|
||||||
|
+ _, err = testVals.infraClient.CoreV1().Services(infraNamespace).
|
||||||
|
+ Update(context.TODO(), svcWithSelector, metav1.UpdateOptions{})
|
||||||
|
+ Expect(err).To(BeNil())
|
||||||
|
+
|
||||||
|
+ Eventually(func() (bool, error) {
|
||||||
|
+ epsList, err = testVals.infraClient.DiscoveryV1().EndpointSlices(infraNamespace).
|
||||||
|
+ List(context.TODO(), metav1.ListOptions{})
|
||||||
|
+ if err != nil {
|
||||||
|
+ return false, err
|
||||||
|
+ }
|
||||||
|
+ // We expect that after the update service.EndpointSlice will become 0
|
||||||
|
+ if len(epsList.Items) == 0 {
|
||||||
|
+ return true, nil
|
||||||
|
+ }
|
||||||
|
+ return false, nil
|
||||||
|
+ }).Should(BeTrue(), "Existing EndpointSlice should be removed because Service now has a selector")
|
||||||
|
+ })
|
||||||
|
+
|
||||||
|
+ g.It("Should remove EndpointSlices and not recreate them when a previously no-selector Service obtains a selector", func() {
|
||||||
|
+ testVals.infraClient.Fake.PrependReactor("create", "endpointslices", func(action testing.Action) (bool, runtime.Object, error) {
|
||||||
|
+ createAction := action.(testing.CreateAction)
|
||||||
|
+ slice := createAction.GetObject().(*discoveryv1.EndpointSlice)
|
||||||
|
+ if slice.Name == "" && slice.GenerateName != "" {
|
||||||
|
+ slice.Name = slice.GenerateName + "-fake001"
|
||||||
|
+ }
|
||||||
|
+ return false, slice, nil
|
||||||
|
+ })
|
||||||
|
+
|
||||||
|
+ createAndAssertVMI("worker-0-test", "ip-10-32-5-13", "123.45.67.89")
|
||||||
|
+
|
||||||
|
+ createAndAssertTenantSlice("test-epslice", "tenant-service-name", discoveryv1.AddressTypeIPv4,
|
||||||
|
+ *createPort("http", 80, v1.ProtocolTCP),
|
||||||
|
+ []discoveryv1.Endpoint{
|
||||||
|
+ *createEndpoint("123.45.67.89", "worker-0-test", true, true, false),
|
||||||
|
+ },
|
||||||
|
+ )
|
||||||
|
+
|
||||||
|
+ noSelectorSvcName := "svc-without-selector"
|
||||||
|
+ svc := &v1.Service{
|
||||||
|
+ ObjectMeta: metav1.ObjectMeta{
|
||||||
|
+ Name: noSelectorSvcName,
|
||||||
|
+ Namespace: infraNamespace,
|
||||||
|
+ Labels: map[string]string{
|
||||||
|
+ kubevirt.TenantServiceNameLabelKey: "tenant-service-name",
|
||||||
|
+ kubevirt.TenantServiceNamespaceLabelKey: tenantNamespace,
|
||||||
|
+ kubevirt.TenantClusterNameLabelKey: "test-cluster",
|
||||||
|
+ },
|
||||||
|
+ },
|
||||||
|
+ Spec: v1.ServiceSpec{
|
||||||
|
+ Ports: []v1.ServicePort{
|
||||||
|
+ {
|
||||||
|
+ Name: "web",
|
||||||
|
+ Port: 80,
|
||||||
|
+ NodePort: 31900,
|
||||||
|
+ Protocol: v1.ProtocolTCP,
|
||||||
|
+ TargetPort: intstr.IntOrString{IntVal: 30390},
|
||||||
|
+ },
|
||||||
|
+ },
|
||||||
|
+ Type: v1.ServiceTypeLoadBalancer,
|
||||||
|
+ ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal,
|
||||||
|
+ },
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ _, err := testVals.infraClient.CoreV1().Services(infraNamespace).Create(context.TODO(), svc, metav1.CreateOptions{})
|
||||||
|
+ Expect(err).To(BeNil())
|
||||||
|
+
|
||||||
|
+ Eventually(func() (bool, error) {
|
||||||
|
+ epsList, err := testVals.infraClient.DiscoveryV1().EndpointSlices(infraNamespace).
|
||||||
|
+ List(context.TODO(), metav1.ListOptions{})
|
||||||
|
+ if err != nil {
|
||||||
|
+ return false, err
|
||||||
|
+ }
|
||||||
|
+ return len(epsList.Items) == 1, nil
|
||||||
|
+ }).Should(BeTrue(), "Controller should create an EndpointSlice in infra cluster for the no-selector LB service")
|
||||||
|
+
|
||||||
|
+ svcWithSelector, err := testVals.infraClient.CoreV1().Services(infraNamespace).Get(
|
||||||
|
+ context.TODO(), noSelectorSvcName, metav1.GetOptions{})
|
||||||
|
+ Expect(err).To(BeNil())
|
||||||
|
+
|
||||||
|
+ svcWithSelector.Spec.Selector = map[string]string{"app": "test-value"}
|
||||||
|
+ _, err = testVals.infraClient.CoreV1().Services(infraNamespace).
|
||||||
|
+ Update(context.TODO(), svcWithSelector, metav1.UpdateOptions{})
|
||||||
|
+ Expect(err).To(BeNil())
|
||||||
|
+
|
||||||
|
+ Eventually(func() (bool, error) {
|
||||||
|
+ epsList, err := testVals.infraClient.DiscoveryV1().EndpointSlices(infraNamespace).
|
||||||
|
+ List(context.TODO(), metav1.ListOptions{})
|
||||||
|
+ if err != nil {
|
||||||
|
+ return false, err
|
||||||
|
+ }
|
||||||
|
+ return len(epsList.Items) == 0, nil
|
||||||
|
+ }).Should(BeTrue(), "All EndpointSlices should be removed after Service acquires a selector (no new slices created)")
|
||||||
|
+ })
|
||||||
|
+
|
||||||
|
+ g.It("Should ignore Services from a different cluster", func() {
|
||||||
|
+ // Create a Service with cluster label "other-cluster"
|
||||||
|
+ svc := createInfraServiceLB("infra-service-conflict", "tenant-service-name", "other-cluster",
|
||||||
|
+ v1.ServicePort{Name: "web", Port: 80, NodePort: 31900, Protocol: v1.ProtocolTCP, TargetPort: intstr.IntOrString{IntVal: 30390}},
|
||||||
|
+ v1.ServiceExternalTrafficPolicyLocal)
|
||||||
|
+ _, err := testVals.infraClient.CoreV1().Services(infraNamespace).Create(context.TODO(), svc, metav1.CreateOptions{})
|
||||||
|
+ Expect(err).To(BeNil())
|
||||||
|
+
|
||||||
|
+ // The controller should ignore this Service, so no EndpointSlice should be created.
|
||||||
|
+ Eventually(func() (bool, error) {
|
||||||
|
+ epsList, err := testVals.infraClient.DiscoveryV1().EndpointSlices(infraNamespace).List(context.TODO(), metav1.ListOptions{})
|
||||||
|
+ if err != nil {
|
||||||
|
+ return false, err
|
||||||
|
+ }
|
||||||
|
+ // Expect zero slices since cluster label does not match "test-cluster"
|
||||||
|
+ return len(epsList.Items) == 0, nil
|
||||||
|
+ }).Should(BeTrue(), "Services with a different cluster label should be ignored")
|
||||||
|
+ })
|
||||||
|
+
|
||||||
|
})
|
||||||
|
})
|
||||||
@@ -1 +1 @@
|
|||||||
ghcr.io/aenix-io/cozystack/kubevirt-csi-driver:0.14.1@sha256:4b84a077e7f1b75bdf8b272c8f147e4ef3b67b9bea83383a399e9149868384ac
|
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.15.2@sha256:cb4ab74099662f73e058f7c7495fb403488622c3425c06ad23b687bfa8bc805b
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
ghcr.io/aenix-io/cozystack/ubuntu-container-disk:v1.30.1@sha256:91ec9c31472f8e94ae5f6f5a2568058eb28b3f57ab7e203d8d4a0993911fffc3
|
ghcr.io/cozystack/cozystack/ubuntu-container-disk:v1.30.1@sha256:bc08ea0ced2cb7dd98b26d72a9462fc0a3863adb908a5effbfcdf7227656ea65
|
||||||
|
|||||||
50
packages/apps/kubernetes/templates/_resources.tpl
Normal file
50
packages/apps/kubernetes/templates/_resources.tpl
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
{{/*
|
||||||
|
Copyright Broadcom, Inc. All Rights Reserved.
|
||||||
|
SPDX-License-Identifier: APACHE-2.0
|
||||||
|
*/}}
|
||||||
|
|
||||||
|
{{/* vim: set filetype=mustache: */}}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Return a resource request/limit object based on a given preset.
|
||||||
|
These presets are for basic testing and not meant to be used in production
|
||||||
|
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||||
|
*/}}
|
||||||
|
{{- define "resources.preset" -}}
|
||||||
|
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
|
||||||
|
{{- $presets := dict
|
||||||
|
"nano" (dict
|
||||||
|
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
"micro" (dict
|
||||||
|
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
"small" (dict
|
||||||
|
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
"medium" (dict
|
||||||
|
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
"large" (dict
|
||||||
|
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
"xlarge" (dict
|
||||||
|
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
"2xlarge" (dict
|
||||||
|
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||||
|
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
|
||||||
|
)
|
||||||
|
}}
|
||||||
|
{{- if hasKey $presets .type -}}
|
||||||
|
{{- index $presets .type | toYaml -}}
|
||||||
|
{{- else -}}
|
||||||
|
{{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}}
|
||||||
|
{{- end -}}
|
||||||
|
{{- end -}}
|
||||||
@@ -26,10 +26,23 @@ spec:
|
|||||||
containers:
|
containers:
|
||||||
- image: "{{ $.Files.Get "images/cluster-autoscaler.tag" | trim }}"
|
- image: "{{ $.Files.Get "images/cluster-autoscaler.tag" | trim }}"
|
||||||
name: cluster-autoscaler
|
name: cluster-autoscaler
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: 512m
|
||||||
|
memory: 512Mi
|
||||||
|
requests:
|
||||||
|
cpu: 125m
|
||||||
|
memory: 128Mi
|
||||||
command:
|
command:
|
||||||
- /cluster-autoscaler
|
- /cluster-autoscaler
|
||||||
args:
|
args:
|
||||||
- --cloud-provider=clusterapi
|
- --cloud-provider=clusterapi
|
||||||
|
- --enforce-node-group-min-size=true
|
||||||
|
- --ignore-daemonsets-utilization=true
|
||||||
|
- --ignore-mirror-pods-utilization=true
|
||||||
|
- --scale-down-unneeded-time=30s
|
||||||
|
- --scan-interval=25s
|
||||||
|
- --force-delete-unregistered-nodes=true
|
||||||
- --kubeconfig=/etc/kubernetes/kubeconfig/super-admin.svc
|
- --kubeconfig=/etc/kubernetes/kubeconfig/super-admin.svc
|
||||||
- --clusterapi-cloud-config-authoritative
|
- --clusterapi-cloud-config-authoritative
|
||||||
- --node-group-auto-discovery=clusterapi:namespace={{ .Release.Namespace }},clusterName={{ .Release.Name }}
|
- --node-group-auto-discovery=clusterapi:namespace={{ .Release.Namespace }},clusterName={{ .Release.Name }}
|
||||||
|
|||||||
@@ -29,6 +29,7 @@ spec:
|
|||||||
{{- range .group.roles }}
|
{{- range .group.roles }}
|
||||||
node-role.kubernetes.io/{{ . }}: ""
|
node-role.kubernetes.io/{{ . }}: ""
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
cluster.x-k8s.io/deployment-name: {{ $.Release.Name }}-{{ .groupName }}
|
||||||
spec:
|
spec:
|
||||||
domain:
|
domain:
|
||||||
{{- if and .group.resources .group.resources.cpu }}
|
{{- if and .group.resources .group.resources.cpu }}
|
||||||
@@ -101,12 +102,37 @@ metadata:
|
|||||||
annotations:
|
annotations:
|
||||||
kamaji.clastix.io/kubeconfig-secret-key: "super-admin.svc"
|
kamaji.clastix.io/kubeconfig-secret-key: "super-admin.svc"
|
||||||
spec:
|
spec:
|
||||||
|
apiServer:
|
||||||
|
{{- if .Values.kamajiControlPlane.apiServer.resources }}
|
||||||
|
resources: {{- toYaml .Values.kamajiControlPlane.apiServer.resources | nindent 6 }}
|
||||||
|
{{- else if ne .Values.kamajiControlPlane.apiServer.resourcesPreset "none" }}
|
||||||
|
resources: {{- include "resources.preset" (dict "type" .Values.kamajiControlPlane.apiServer.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
|
controllerManager:
|
||||||
|
{{- if .Values.kamajiControlPlane.controllerManager.resources }}
|
||||||
|
resources: {{- toYaml .Values.kamajiControlPlane.controllerManager.resources | nindent 6 }}
|
||||||
|
{{- else if ne .Values.kamajiControlPlane.controllerManager.resourcesPreset "none" }}
|
||||||
|
resources: {{- include "resources.preset" (dict "type" .Values.kamajiControlPlane.controllerManager.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
|
scheduler:
|
||||||
|
{{- if .Values.kamajiControlPlane.scheduler.resources }}
|
||||||
|
resources: {{- toYaml .Values.kamajiControlPlane.scheduler.resources | nindent 6 }}
|
||||||
|
{{- else if ne .Values.kamajiControlPlane.scheduler.resourcesPreset "none" }}
|
||||||
|
resources: {{- include "resources.preset" (dict "type" .Values.kamajiControlPlane.scheduler.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
dataStoreName: "{{ $etcd }}"
|
dataStoreName: "{{ $etcd }}"
|
||||||
addons:
|
addons:
|
||||||
coreDNS:
|
coreDNS:
|
||||||
dnsServiceIPs:
|
dnsServiceIPs:
|
||||||
- 10.95.0.10
|
- 10.95.0.10
|
||||||
konnectivity: {}
|
konnectivity:
|
||||||
|
server:
|
||||||
|
port: 8132
|
||||||
|
{{- if .Values.kamajiControlPlane.addons.konnectivity.server.resources }}
|
||||||
|
resources: {{- toYaml .Values.kamajiControlPlane.addons.konnectivity.server.resources | nindent 10 }}
|
||||||
|
{{- else if ne .Values.kamajiControlPlane.addons.konnectivity.server.resourcesPreset "none" }}
|
||||||
|
resources: {{- include "resources.preset" (dict "type" .Values.kamajiControlPlane.addons.konnectivity.server.resourcesPreset "Release" .Release) | nindent 10 }}
|
||||||
|
{{- end }}
|
||||||
kubelet:
|
kubelet:
|
||||||
cgroupfs: systemd
|
cgroupfs: systemd
|
||||||
preferredAddressTypes:
|
preferredAddressTypes:
|
||||||
@@ -117,7 +143,7 @@ spec:
|
|||||||
ingress:
|
ingress:
|
||||||
extraAnnotations:
|
extraAnnotations:
|
||||||
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
|
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
|
||||||
hostname: {{ .Values.host | default (printf "%s.%s" .Release.Name $host) }}:443
|
hostname: {{ .Values.host | default (printf "%s.%s" .Release.Name $host) }}
|
||||||
className: "{{ $ingress }}"
|
className: "{{ $ingress }}"
|
||||||
deployment:
|
deployment:
|
||||||
podAdditionalMetadata:
|
podAdditionalMetadata:
|
||||||
@@ -126,6 +152,21 @@ spec:
|
|||||||
replicas: 2
|
replicas: 2
|
||||||
version: 1.30.1
|
version: 1.30.1
|
||||||
---
|
---
|
||||||
|
apiVersion: cozystack.io/v1alpha1
|
||||||
|
kind: WorkloadMonitor
|
||||||
|
metadata:
|
||||||
|
name: {{ .Release.Name }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
spec:
|
||||||
|
replicas: 2
|
||||||
|
minReplicas: 1
|
||||||
|
kind: kubernetes
|
||||||
|
type: control-plane
|
||||||
|
selector:
|
||||||
|
kamaji.clastix.io/component: deployment
|
||||||
|
kamaji.clastix.io/name: {{ .Release.Name }}
|
||||||
|
version: {{ $.Chart.Version }}
|
||||||
|
---
|
||||||
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
|
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
|
||||||
kind: KubevirtCluster
|
kind: KubevirtCluster
|
||||||
metadata:
|
metadata:
|
||||||
@@ -172,6 +213,7 @@ spec:
|
|||||||
---
|
---
|
||||||
{{- $context := deepCopy $ }}
|
{{- $context := deepCopy $ }}
|
||||||
{{- $_ := set $context "group" $group }}
|
{{- $_ := set $context "group" $group }}
|
||||||
|
{{- $_ := set $context "groupName" $groupName }}
|
||||||
{{- $kubevirtmachinetemplate := include "kubevirtmachinetemplate" $context }}
|
{{- $kubevirtmachinetemplate := include "kubevirtmachinetemplate" $context }}
|
||||||
{{- $kubevirtmachinetemplateHash := $kubevirtmachinetemplate | sha256sum | trunc 6 }}
|
{{- $kubevirtmachinetemplateHash := $kubevirtmachinetemplate | sha256sum | trunc 6 }}
|
||||||
{{- $kubevirtmachinetemplateName := printf "%s-%s-%s" $.Release.Name $groupName $kubevirtmachinetemplateHash }}
|
{{- $kubevirtmachinetemplateName := printf "%s-%s-%s" $.Release.Name $groupName $kubevirtmachinetemplateHash }}
|
||||||
@@ -233,7 +275,7 @@ spec:
|
|||||||
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
|
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
|
||||||
kind: KubevirtMachineTemplate
|
kind: KubevirtMachineTemplate
|
||||||
name: {{ $.Release.Name }}-{{ $groupName }}-{{ $kubevirtmachinetemplateHash }}
|
name: {{ $.Release.Name }}-{{ $groupName }}-{{ $kubevirtmachinetemplateHash }}
|
||||||
namespace: default
|
namespace: {{ $.Release.Namespace }}
|
||||||
version: v1.30.1
|
version: v1.30.1
|
||||||
---
|
---
|
||||||
apiVersion: cluster.x-k8s.io/v1beta1
|
apiVersion: cluster.x-k8s.io/v1beta1
|
||||||
@@ -255,6 +297,21 @@ spec:
|
|||||||
- type: Ready
|
- type: Ready
|
||||||
status: "False"
|
status: "False"
|
||||||
timeout: 300s
|
timeout: 300s
|
||||||
|
---
|
||||||
|
apiVersion: cozystack.io/v1alpha1
|
||||||
|
kind: WorkloadMonitor
|
||||||
|
metadata:
|
||||||
|
name: {{ $.Release.Name }}-{{ $groupName }}
|
||||||
|
namespace: {{ $.Release.Namespace }}
|
||||||
|
spec:
|
||||||
|
minReplicas: {{ $group.minReplicas }}
|
||||||
|
kind: kubernetes
|
||||||
|
type: worker
|
||||||
|
selector:
|
||||||
|
cluster.x-k8s.io/cluster-name: {{ $.Release.Name }}
|
||||||
|
cluster.x-k8s.io/deployment-name: {{ $.Release.Name }}-{{ $groupName }}
|
||||||
|
cluster.x-k8s.io/role: worker
|
||||||
|
version: {{ $.Chart.Version }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
---
|
---
|
||||||
{{- /*
|
{{- /*
|
||||||
|
|||||||
@@ -63,11 +63,21 @@ spec:
|
|||||||
mountPath: /etc/kubernetes/kubeconfig
|
mountPath: /etc/kubernetes/kubeconfig
|
||||||
readOnly: true
|
readOnly: true
|
||||||
resources:
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: 512m
|
||||||
|
memory: 512Mi
|
||||||
requests:
|
requests:
|
||||||
memory: 50Mi
|
cpu: 125m
|
||||||
cpu: 10m
|
memory: 128Mi
|
||||||
- name: csi-provisioner
|
- name: csi-provisioner
|
||||||
image: quay.io/openshift/origin-csi-external-provisioner:latest
|
image: quay.io/openshift/origin-csi-external-provisioner:latest
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: 512m
|
||||||
|
memory: 512Mi
|
||||||
|
requests:
|
||||||
|
cpu: 125m
|
||||||
|
memory: 128Mi
|
||||||
args:
|
args:
|
||||||
- "--csi-address=$(ADDRESS)"
|
- "--csi-address=$(ADDRESS)"
|
||||||
- "--default-fstype=ext4"
|
- "--default-fstype=ext4"
|
||||||
@@ -102,9 +112,12 @@ spec:
|
|||||||
mountPath: /etc/kubernetes/kubeconfig
|
mountPath: /etc/kubernetes/kubeconfig
|
||||||
readOnly: true
|
readOnly: true
|
||||||
resources:
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: 512m
|
||||||
|
memory: 512Mi
|
||||||
requests:
|
requests:
|
||||||
memory: 50Mi
|
cpu: 125m
|
||||||
cpu: 10m
|
memory: 128Mi
|
||||||
- name: csi-liveness-probe
|
- name: csi-liveness-probe
|
||||||
image: quay.io/openshift/origin-csi-livenessprobe:latest
|
image: quay.io/openshift/origin-csi-livenessprobe:latest
|
||||||
args:
|
args:
|
||||||
@@ -115,9 +128,12 @@ spec:
|
|||||||
- name: socket-dir
|
- name: socket-dir
|
||||||
mountPath: /csi
|
mountPath: /csi
|
||||||
resources:
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: 512m
|
||||||
|
memory: 512Mi
|
||||||
requests:
|
requests:
|
||||||
memory: 50Mi
|
cpu: 125m
|
||||||
cpu: 10m
|
memory: 128Mi
|
||||||
volumes:
|
volumes:
|
||||||
- name: socket-dir
|
- name: socket-dir
|
||||||
emptyDir: {}
|
emptyDir: {}
|
||||||
|
|||||||
@@ -24,3 +24,13 @@ rules:
|
|||||||
resourceNames:
|
resourceNames:
|
||||||
- {{ .Release.Name }}
|
- {{ .Release.Name }}
|
||||||
verbs: ["get", "list", "watch"]
|
verbs: ["get", "list", "watch"]
|
||||||
|
- apiGroups:
|
||||||
|
- cozystack.io
|
||||||
|
resources:
|
||||||
|
- workloadmonitors
|
||||||
|
resourceNames:
|
||||||
|
- {{ .Release.Name }}
|
||||||
|
{{- range $groupName, $group := .Values.nodeGroups }}
|
||||||
|
- {{ $.Release.Name }}-{{ $groupName }}
|
||||||
|
{{- end }}
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
|||||||
@@ -18,7 +18,8 @@ spec:
|
|||||||
namespace: cozy-system
|
namespace: cozy-system
|
||||||
kubeConfig:
|
kubeConfig:
|
||||||
secretRef:
|
secretRef:
|
||||||
name: {{ .Release.Name }}-kubeconfig
|
name: {{ .Release.Name }}-admin-kubeconfig
|
||||||
|
key: super-admin.svc
|
||||||
targetNamespace: cozy-cert-manager-crds
|
targetNamespace: cozy-cert-manager-crds
|
||||||
storageNamespace: cozy-cert-manager-crds
|
storageNamespace: cozy-cert-manager-crds
|
||||||
install:
|
install:
|
||||||
|
|||||||
@@ -19,7 +19,8 @@ spec:
|
|||||||
namespace: cozy-system
|
namespace: cozy-system
|
||||||
kubeConfig:
|
kubeConfig:
|
||||||
secretRef:
|
secretRef:
|
||||||
name: {{ .Release.Name }}-kubeconfig
|
name: {{ .Release.Name }}-admin-kubeconfig
|
||||||
|
key: super-admin.svc
|
||||||
targetNamespace: cozy-cert-manager
|
targetNamespace: cozy-cert-manager
|
||||||
storageNamespace: cozy-cert-manager
|
storageNamespace: cozy-cert-manager
|
||||||
install:
|
install:
|
||||||
|
|||||||
@@ -18,7 +18,8 @@ spec:
|
|||||||
namespace: cozy-system
|
namespace: cozy-system
|
||||||
kubeConfig:
|
kubeConfig:
|
||||||
secretRef:
|
secretRef:
|
||||||
name: {{ .Release.Name }}-kubeconfig
|
name: {{ .Release.Name }}-admin-kubeconfig
|
||||||
|
key: super-admin.svc
|
||||||
targetNamespace: cozy-cilium
|
targetNamespace: cozy-cilium
|
||||||
storageNamespace: cozy-cilium
|
storageNamespace: cozy-cilium
|
||||||
install:
|
install:
|
||||||
|
|||||||
@@ -18,7 +18,8 @@ spec:
|
|||||||
namespace: cozy-system
|
namespace: cozy-system
|
||||||
kubeConfig:
|
kubeConfig:
|
||||||
secretRef:
|
secretRef:
|
||||||
name: {{ .Release.Name }}-kubeconfig
|
name: {{ .Release.Name }}-admin-kubeconfig
|
||||||
|
key: super-admin.svc
|
||||||
targetNamespace: cozy-csi
|
targetNamespace: cozy-csi
|
||||||
storageNamespace: cozy-csi
|
storageNamespace: cozy-csi
|
||||||
install:
|
install:
|
||||||
|
|||||||
@@ -19,7 +19,8 @@ spec:
|
|||||||
namespace: cozy-system
|
namespace: cozy-system
|
||||||
kubeConfig:
|
kubeConfig:
|
||||||
secretRef:
|
secretRef:
|
||||||
name: {{ .Release.Name }}-kubeconfig
|
name: {{ .Release.Name }}-admin-kubeconfig
|
||||||
|
key: super-admin.svc
|
||||||
targetNamespace: cozy-fluxcd
|
targetNamespace: cozy-fluxcd
|
||||||
storageNamespace: cozy-fluxcd
|
storageNamespace: cozy-fluxcd
|
||||||
install:
|
install:
|
||||||
|
|||||||
@@ -19,7 +19,8 @@ spec:
|
|||||||
namespace: cozy-system
|
namespace: cozy-system
|
||||||
kubeConfig:
|
kubeConfig:
|
||||||
secretRef:
|
secretRef:
|
||||||
name: {{ .Release.Name }}-kubeconfig
|
name: {{ .Release.Name }}-admin-kubeconfig
|
||||||
|
key: super-admin.svc
|
||||||
targetNamespace: cozy-ingress-nginx
|
targetNamespace: cozy-ingress-nginx
|
||||||
storageNamespace: cozy-ingress-nginx
|
storageNamespace: cozy-ingress-nginx
|
||||||
install:
|
install:
|
||||||
|
|||||||
@@ -21,7 +21,8 @@ spec:
|
|||||||
namespace: cozy-system
|
namespace: cozy-system
|
||||||
kubeConfig:
|
kubeConfig:
|
||||||
secretRef:
|
secretRef:
|
||||||
name: {{ .Release.Name }}-kubeconfig
|
name: {{ .Release.Name }}-admin-kubeconfig
|
||||||
|
key: super-admin.svc
|
||||||
targetNamespace: cozy-monitoring-agents
|
targetNamespace: cozy-monitoring-agents
|
||||||
storageNamespace: cozy-monitoring-agents
|
storageNamespace: cozy-monitoring-agents
|
||||||
install:
|
install:
|
||||||
@@ -48,7 +49,6 @@ spec:
|
|||||||
tenant: {{ .Release.Namespace }}
|
tenant: {{ .Release.Namespace }}
|
||||||
remoteWrite:
|
remoteWrite:
|
||||||
url: http://vminsert-shortterm.{{ $targetTenant }}.svc:8480/insert/0/prometheus
|
url: http://vminsert-shortterm.{{ $targetTenant }}.svc:8480/insert/0/prometheus
|
||||||
|
|
||||||
fluent-bit:
|
fluent-bit:
|
||||||
readinessProbe:
|
readinessProbe:
|
||||||
httpGet:
|
httpGet:
|
||||||
|
|||||||
@@ -19,7 +19,8 @@ spec:
|
|||||||
namespace: cozy-system
|
namespace: cozy-system
|
||||||
kubeConfig:
|
kubeConfig:
|
||||||
secretRef:
|
secretRef:
|
||||||
name: {{ .Release.Name }}-kubeconfig
|
name: {{ .Release.Name }}-admin-kubeconfig
|
||||||
|
key: super-admin.svc
|
||||||
targetNamespace: cozy-victoria-metrics-operator
|
targetNamespace: cozy-victoria-metrics-operator
|
||||||
storageNamespace: cozy-victoria-metrics-operator
|
storageNamespace: cozy-victoria-metrics-operator
|
||||||
install:
|
install:
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user